summaryrefslogtreecommitdiff
path: root/arch/loongarch/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/loongarch/include')
-rw-r--r--arch/loongarch/include/asm/Kbuild30
-rw-r--r--arch/loongarch/include/asm/acenv.h18
-rw-r--r--arch/loongarch/include/asm/acpi.h38
-rw-r--r--arch/loongarch/include/asm/addrspace.h112
-rw-r--r--arch/loongarch/include/asm/asm-offsets.h5
-rw-r--r--arch/loongarch/include/asm/asm-prototypes.h7
-rw-r--r--arch/loongarch/include/asm/asm.h191
-rw-r--r--arch/loongarch/include/asm/asmmacro.h289
-rw-r--r--arch/loongarch/include/asm/atomic.h362
-rw-r--r--arch/loongarch/include/asm/barrier.h159
-rw-r--r--arch/loongarch/include/asm/bitops.h33
-rw-r--r--arch/loongarch/include/asm/bitrev.h34
-rw-r--r--arch/loongarch/include/asm/bootinfo.h43
-rw-r--r--arch/loongarch/include/asm/branch.h21
-rw-r--r--arch/loongarch/include/asm/bug.h23
-rw-r--r--arch/loongarch/include/asm/cache.h13
-rw-r--r--arch/loongarch/include/asm/cacheflush.h80
-rw-r--r--arch/loongarch/include/asm/cacheops.h37
-rw-r--r--arch/loongarch/include/asm/clocksource.h12
-rw-r--r--arch/loongarch/include/asm/cmpxchg.h123
-rw-r--r--arch/loongarch/include/asm/compiler.h15
-rw-r--r--arch/loongarch/include/asm/cpu-features.h73
-rw-r--r--arch/loongarch/include/asm/cpu-info.h116
-rw-r--r--arch/loongarch/include/asm/cpu.h127
-rw-r--r--arch/loongarch/include/asm/cpufeature.h24
-rw-r--r--arch/loongarch/include/asm/delay.h26
-rw-r--r--arch/loongarch/include/asm/dma-direct.h11
-rw-r--r--arch/loongarch/include/asm/dmi.h24
-rw-r--r--arch/loongarch/include/asm/efi.h41
-rw-r--r--arch/loongarch/include/asm/elf.h301
-rw-r--r--arch/loongarch/include/asm/entry-common.h13
-rw-r--r--arch/loongarch/include/asm/exec.h10
-rw-r--r--arch/loongarch/include/asm/fb.h23
-rw-r--r--arch/loongarch/include/asm/fixmap.h13
-rw-r--r--arch/loongarch/include/asm/fpregdef.h53
-rw-r--r--arch/loongarch/include/asm/fpu.h129
-rw-r--r--arch/loongarch/include/asm/futex.h108
-rw-r--r--arch/loongarch/include/asm/hardirq.h26
-rw-r--r--arch/loongarch/include/asm/hugetlb.h83
-rw-r--r--arch/loongarch/include/asm/hw_irq.h17
-rw-r--r--arch/loongarch/include/asm/idle.h9
-rw-r--r--arch/loongarch/include/asm/inst.h117
-rw-r--r--arch/loongarch/include/asm/io.h129
-rw-r--r--arch/loongarch/include/asm/irq.h132
-rw-r--r--arch/loongarch/include/asm/irq_regs.h27
-rw-r--r--arch/loongarch/include/asm/irqflags.h78
-rw-r--r--arch/loongarch/include/asm/kdebug.h23
-rw-r--r--arch/loongarch/include/asm/linkage.h36
-rw-r--r--arch/loongarch/include/asm/local.h138
-rw-r--r--arch/loongarch/include/asm/loongarch.h1516
-rw-r--r--arch/loongarch/include/asm/loongson.h153
-rw-r--r--arch/loongarch/include/asm/mmu.h16
-rw-r--r--arch/loongarch/include/asm/mmu_context.h152
-rw-r--r--arch/loongarch/include/asm/mmzone.h18
-rw-r--r--arch/loongarch/include/asm/module.h80
-rw-r--r--arch/loongarch/include/asm/module.lds.h7
-rw-r--r--arch/loongarch/include/asm/numa.h67
-rw-r--r--arch/loongarch/include/asm/page.h115
-rw-r--r--arch/loongarch/include/asm/percpu.h215
-rw-r--r--arch/loongarch/include/asm/perf_event.h10
-rw-r--r--arch/loongarch/include/asm/pgalloc.h103
-rw-r--r--arch/loongarch/include/asm/pgtable-bits.h131
-rw-r--r--arch/loongarch/include/asm/pgtable.h565
-rw-r--r--arch/loongarch/include/asm/prefetch.h29
-rw-r--r--arch/loongarch/include/asm/processor.h209
-rw-r--r--arch/loongarch/include/asm/ptrace.h152
-rw-r--r--arch/loongarch/include/asm/reboot.h10
-rw-r--r--arch/loongarch/include/asm/regdef.h41
-rw-r--r--arch/loongarch/include/asm/seccomp.h20
-rw-r--r--arch/loongarch/include/asm/serial.h11
-rw-r--r--arch/loongarch/include/asm/setup.h21
-rw-r--r--arch/loongarch/include/asm/shmparam.h12
-rw-r--r--arch/loongarch/include/asm/smp.h115
-rw-r--r--arch/loongarch/include/asm/sparsemem.h23
-rw-r--r--arch/loongarch/include/asm/stackframe.h219
-rw-r--r--arch/loongarch/include/asm/stacktrace.h74
-rw-r--r--arch/loongarch/include/asm/string.h12
-rw-r--r--arch/loongarch/include/asm/switch_to.h37
-rw-r--r--arch/loongarch/include/asm/syscall.h74
-rw-r--r--arch/loongarch/include/asm/thread_info.h106
-rw-r--r--arch/loongarch/include/asm/time.h50
-rw-r--r--arch/loongarch/include/asm/timex.h26
-rw-r--r--arch/loongarch/include/asm/tlb.h180
-rw-r--r--arch/loongarch/include/asm/tlbflush.h48
-rw-r--r--arch/loongarch/include/asm/topology.h41
-rw-r--r--arch/loongarch/include/asm/types.h19
-rw-r--r--arch/loongarch/include/asm/uaccess.h269
-rw-r--r--arch/loongarch/include/asm/unistd.h11
-rw-r--r--arch/loongarch/include/asm/vdso.h38
-rw-r--r--arch/loongarch/include/asm/vdso/clocksource.h8
-rw-r--r--arch/loongarch/include/asm/vdso/gettimeofday.h99
-rw-r--r--arch/loongarch/include/asm/vdso/processor.h14
-rw-r--r--arch/loongarch/include/asm/vdso/vdso.h30
-rw-r--r--arch/loongarch/include/asm/vdso/vsyscall.h27
-rw-r--r--arch/loongarch/include/asm/vermagic.h19
-rw-r--r--arch/loongarch/include/asm/vmalloc.h4
-rw-r--r--arch/loongarch/include/uapi/asm/Kbuild2
-rw-r--r--arch/loongarch/include/uapi/asm/auxvec.h17
-rw-r--r--arch/loongarch/include/uapi/asm/bitsperlong.h9
-rw-r--r--arch/loongarch/include/uapi/asm/break.h23
-rw-r--r--arch/loongarch/include/uapi/asm/byteorder.h13
-rw-r--r--arch/loongarch/include/uapi/asm/hwcap.h20
-rw-r--r--arch/loongarch/include/uapi/asm/ptrace.h52
-rw-r--r--arch/loongarch/include/uapi/asm/reg.h59
-rw-r--r--arch/loongarch/include/uapi/asm/sigcontext.h44
-rw-r--r--arch/loongarch/include/uapi/asm/signal.h13
-rw-r--r--arch/loongarch/include/uapi/asm/ucontext.h35
-rw-r--r--arch/loongarch/include/uapi/asm/unistd.h5
108 files changed, 9040 insertions, 0 deletions
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
new file mode 100644
index 000000000000..83bc0681e72b
--- /dev/null
+++ b/arch/loongarch/include/asm/Kbuild
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+generic-y += dma-contiguous.h
+generic-y += export.h
+generic-y += parport.h
+generic-y += early_ioremap.h
+generic-y += qrwlock.h
+generic-y += qrwlock_types.h
+generic-y += spinlock.h
+generic-y += spinlock_types.h
+generic-y += rwsem.h
+generic-y += segment.h
+generic-y += user.h
+generic-y += stat.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += statfs.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += termios.h
+generic-y += termbits.h
+generic-y += poll.h
+generic-y += param.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += kvm_para.h
diff --git a/arch/loongarch/include/asm/acenv.h b/arch/loongarch/include/asm/acenv.h
new file mode 100644
index 000000000000..52f298f7293b
--- /dev/null
+++ b/arch/loongarch/include/asm/acenv.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * LoongArch specific ACPICA environments and implementation
+ *
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_ACENV_H
+#define _ASM_LOONGARCH_ACENV_H
+
+/*
+ * This header is required by ACPI core, but we have nothing to fill in
+ * right now. Will be updated later when needed.
+ */
+
+#endif /* _ASM_LOONGARCH_ACENV_H */
diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h
new file mode 100644
index 000000000000..62044cd5b7bc
--- /dev/null
+++ b/arch/loongarch/include/asm/acpi.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_ACPI_H
+#define _ASM_LOONGARCH_ACPI_H
+
+#ifdef CONFIG_ACPI
+extern int acpi_strict;
+extern int acpi_disabled;
+extern int acpi_pci_disabled;
+extern int acpi_noirq;
+
+#define acpi_os_ioremap acpi_os_ioremap
+void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
+
+static inline void disable_acpi(void)
+{
+ acpi_disabled = 1;
+ acpi_pci_disabled = 1;
+ acpi_noirq = 1;
+}
+
+static inline bool acpi_has_cpu_in_madt(void)
+{
+ return true;
+}
+
+extern struct list_head acpi_wakeup_device_list;
+
+#endif /* !CONFIG_ACPI */
+
+#define ACPI_TABLE_UPGRADE_MAX_PHYS ARCH_LOW_ADDRESS_LIMIT
+
+#endif /* _ASM_LOONGARCH_ACPI_H */
diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h
new file mode 100644
index 000000000000..b91e0733b2e5
--- /dev/null
+++ b/arch/loongarch/include/asm/addrspace.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1996, 99 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999 by Silicon Graphics, Inc.
+ */
+#ifndef _ASM_ADDRSPACE_H
+#define _ASM_ADDRSPACE_H
+
+#include <linux/const.h>
+
+#include <asm/loongarch.h>
+
+/*
+ * This gives the physical RAM offset.
+ */
+#ifndef __ASSEMBLY__
+#ifndef PHYS_OFFSET
+#define PHYS_OFFSET _AC(0, UL)
+#endif
+extern unsigned long vm_map_base;
+#endif /* __ASSEMBLY__ */
+
+#ifndef IO_BASE
+#define IO_BASE CSR_DMW0_BASE
+#endif
+
+#ifndef CACHE_BASE
+#define CACHE_BASE CSR_DMW1_BASE
+#endif
+
+#ifndef UNCACHE_BASE
+#define UNCACHE_BASE CSR_DMW0_BASE
+#endif
+
+#define DMW_PABITS 48
+#define TO_PHYS_MASK ((1ULL << DMW_PABITS) - 1)
+
+/*
+ * Memory above this physical address will be considered highmem.
+ */
+#ifndef HIGHMEM_START
+#define HIGHMEM_START (_AC(1, UL) << _AC(DMW_PABITS, UL))
+#endif
+
+#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK))
+#define TO_CACHE(x) (CACHE_BASE | ((x) & TO_PHYS_MASK))
+#define TO_UNCACHE(x) (UNCACHE_BASE | ((x) & TO_PHYS_MASK))
+
+/*
+ * This handles the memory map.
+ */
+#ifndef PAGE_OFFSET
+#define PAGE_OFFSET (CACHE_BASE + PHYS_OFFSET)
+#endif
+
+#ifndef FIXADDR_TOP
+#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
+#endif
+
+#ifdef __ASSEMBLY__
+#define _ATYPE_
+#define _ATYPE32_
+#define _ATYPE64_
+#define _CONST64_(x) x
+#else
+#define _ATYPE_ __PTRDIFF_TYPE__
+#define _ATYPE32_ int
+#define _ATYPE64_ __s64
+#ifdef CONFIG_64BIT
+#define _CONST64_(x) x ## L
+#else
+#define _CONST64_(x) x ## LL
+#endif
+#endif
+
+/*
+ * 32/64-bit LoongArch address spaces
+ */
+#ifdef __ASSEMBLY__
+#define _ACAST32_
+#define _ACAST64_
+#else
+#define _ACAST32_ (_ATYPE_)(_ATYPE32_) /* widen if necessary */
+#define _ACAST64_ (_ATYPE64_) /* do _not_ narrow */
+#endif
+
+#ifdef CONFIG_32BIT
+
+#define UVRANGE 0x00000000
+#define KPRANGE0 0x80000000
+#define KPRANGE1 0xa0000000
+#define KVRANGE 0xc0000000
+
+#else
+
+#define XUVRANGE _CONST64_(0x0000000000000000)
+#define XSPRANGE _CONST64_(0x4000000000000000)
+#define XKPRANGE _CONST64_(0x8000000000000000)
+#define XKVRANGE _CONST64_(0xc000000000000000)
+
+#endif
+
+/*
+ * Returns the physical address of a KPRANGEx / XKPRANGE address
+ */
+#define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK)
+
+#endif /* _ASM_ADDRSPACE_H */
diff --git a/arch/loongarch/include/asm/asm-offsets.h b/arch/loongarch/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d9ad88d293e7
--- /dev/null
+++ b/arch/loongarch/include/asm/asm-offsets.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <generated/asm-offsets.h>
diff --git a/arch/loongarch/include/asm/asm-prototypes.h b/arch/loongarch/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..ed06d3997420
--- /dev/null
+++ b/arch/loongarch/include/asm/asm-prototypes.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/uaccess.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+#include <asm/ftrace.h>
+#include <asm-generic/asm-prototypes.h>
diff --git a/arch/loongarch/include/asm/asm.h b/arch/loongarch/include/asm/asm.h
new file mode 100644
index 000000000000..40eea6aa469e
--- /dev/null
+++ b/arch/loongarch/include/asm/asm.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Some useful macros for LoongArch assembler code
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1995, 1996, 1997, 1999, 2001 by Ralf Baechle
+ * Copyright (C) 1999 by Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ * Copyright (C) 2002 Maciej W. Rozycki
+ */
+#ifndef __ASM_ASM_H
+#define __ASM_ASM_H
+
+/* LoongArch pref instruction. */
+#ifdef CONFIG_CPU_HAS_PREFETCH
+
+#define PREF(hint, addr, offs) \
+ preld hint, addr, offs; \
+
+#define PREFX(hint, addr, index) \
+ preldx hint, addr, index; \
+
+#else /* !CONFIG_CPU_HAS_PREFETCH */
+
+#define PREF(hint, addr, offs)
+#define PREFX(hint, addr, index)
+
+#endif /* !CONFIG_CPU_HAS_PREFETCH */
+
+/*
+ * Stack alignment
+ */
+#define STACK_ALIGN ~(0xf)
+
+/*
+ * Macros to handle different pointer/register sizes for 32/64-bit code
+ */
+
+/*
+ * Size of a register
+ */
+#ifndef __loongarch64
+#define SZREG 4
+#else
+#define SZREG 8
+#endif
+
+/*
+ * Use the following macros in assemblercode to load/store registers,
+ * pointers etc.
+ */
+#if (SZREG == 4)
+#define REG_L ld.w
+#define REG_S st.w
+#define REG_ADD add.w
+#define REG_SUB sub.w
+#else /* SZREG == 8 */
+#define REG_L ld.d
+#define REG_S st.d
+#define REG_ADD add.d
+#define REG_SUB sub.d
+#endif
+
+/*
+ * How to add/sub/load/store/shift C int variables.
+ */
+#if (__SIZEOF_INT__ == 4)
+#define INT_ADD add.w
+#define INT_ADDI addi.w
+#define INT_SUB sub.w
+#define INT_L ld.w
+#define INT_S st.w
+#define INT_SLL slli.w
+#define INT_SLLV sll.w
+#define INT_SRL srli.w
+#define INT_SRLV srl.w
+#define INT_SRA srai.w
+#define INT_SRAV sra.w
+#endif
+
+#if (__SIZEOF_INT__ == 8)
+#define INT_ADD add.d
+#define INT_ADDI addi.d
+#define INT_SUB sub.d
+#define INT_L ld.d
+#define INT_S st.d
+#define INT_SLL slli.d
+#define INT_SLLV sll.d
+#define INT_SRL srli.d
+#define INT_SRLV srl.d
+#define INT_SRA srai.d
+#define INT_SRAV sra.d
+#endif
+
+/*
+ * How to add/sub/load/store/shift C long variables.
+ */
+#if (__SIZEOF_LONG__ == 4)
+#define LONG_ADD add.w
+#define LONG_ADDI addi.w
+#define LONG_SUB sub.w
+#define LONG_L ld.w
+#define LONG_S st.w
+#define LONG_SLL slli.w
+#define LONG_SLLV sll.w
+#define LONG_SRL srli.w
+#define LONG_SRLV srl.w
+#define LONG_SRA srai.w
+#define LONG_SRAV sra.w
+
+#ifdef __ASSEMBLY__
+#define LONG .word
+#endif
+#define LONGSIZE 4
+#define LONGMASK 3
+#define LONGLOG 2
+#endif
+
+#if (__SIZEOF_LONG__ == 8)
+#define LONG_ADD add.d
+#define LONG_ADDI addi.d
+#define LONG_SUB sub.d
+#define LONG_L ld.d
+#define LONG_S st.d
+#define LONG_SLL slli.d
+#define LONG_SLLV sll.d
+#define LONG_SRL srli.d
+#define LONG_SRLV srl.d
+#define LONG_SRA srai.d
+#define LONG_SRAV sra.d
+
+#ifdef __ASSEMBLY__
+#define LONG .dword
+#endif
+#define LONGSIZE 8
+#define LONGMASK 7
+#define LONGLOG 3
+#endif
+
+/*
+ * How to add/sub/load/store/shift pointers.
+ */
+#if (__SIZEOF_POINTER__ == 4)
+#define PTR_ADD add.w
+#define PTR_ADDI addi.w
+#define PTR_SUB sub.w
+#define PTR_L ld.w
+#define PTR_S st.w
+#define PTR_LI li.w
+#define PTR_SLL slli.w
+#define PTR_SLLV sll.w
+#define PTR_SRL srli.w
+#define PTR_SRLV srl.w
+#define PTR_SRA srai.w
+#define PTR_SRAV sra.w
+
+#define PTR_SCALESHIFT 2
+
+#ifdef __ASSEMBLY__
+#define PTR .word
+#endif
+#define PTRSIZE 4
+#define PTRLOG 2
+#endif
+
+#if (__SIZEOF_POINTER__ == 8)
+#define PTR_ADD add.d
+#define PTR_ADDI addi.d
+#define PTR_SUB sub.d
+#define PTR_L ld.d
+#define PTR_S st.d
+#define PTR_LI li.d
+#define PTR_SLL slli.d
+#define PTR_SLLV sll.d
+#define PTR_SRL srli.d
+#define PTR_SRLV srl.d
+#define PTR_SRA srai.d
+#define PTR_SRAV sra.d
+
+#define PTR_SCALESHIFT 3
+
+#ifdef __ASSEMBLY__
+#define PTR .dword
+#endif
+#define PTRSIZE 8
+#define PTRLOG 3
+#endif
+
+#endif /* __ASM_ASM_H */
diff --git a/arch/loongarch/include/asm/asmmacro.h b/arch/loongarch/include/asm/asmmacro.h
new file mode 100644
index 000000000000..a1a04083bd67
--- /dev/null
+++ b/arch/loongarch/include/asm/asmmacro.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_ASMMACRO_H
+#define _ASM_ASMMACRO_H
+
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+#include <asm/loongarch.h>
+
+ .macro parse_v var val
+ \var = \val
+ .endm
+
+ .macro parse_r var r
+ \var = -1
+ .ifc \r, $r0
+ \var = 0
+ .endif
+ .ifc \r, $r1
+ \var = 1
+ .endif
+ .ifc \r, $r2
+ \var = 2
+ .endif
+ .ifc \r, $r3
+ \var = 3
+ .endif
+ .ifc \r, $r4
+ \var = 4
+ .endif
+ .ifc \r, $r5
+ \var = 5
+ .endif
+ .ifc \r, $r6
+ \var = 6
+ .endif
+ .ifc \r, $r7
+ \var = 7
+ .endif
+ .ifc \r, $r8
+ \var = 8
+ .endif
+ .ifc \r, $r9
+ \var = 9
+ .endif
+ .ifc \r, $r10
+ \var = 10
+ .endif
+ .ifc \r, $r11
+ \var = 11
+ .endif
+ .ifc \r, $r12
+ \var = 12
+ .endif
+ .ifc \r, $r13
+ \var = 13
+ .endif
+ .ifc \r, $r14
+ \var = 14
+ .endif
+ .ifc \r, $r15
+ \var = 15
+ .endif
+ .ifc \r, $r16
+ \var = 16
+ .endif
+ .ifc \r, $r17
+ \var = 17
+ .endif
+ .ifc \r, $r18
+ \var = 18
+ .endif
+ .ifc \r, $r19
+ \var = 19
+ .endif
+ .ifc \r, $r20
+ \var = 20
+ .endif
+ .ifc \r, $r21
+ \var = 21
+ .endif
+ .ifc \r, $r22
+ \var = 22
+ .endif
+ .ifc \r, $r23
+ \var = 23
+ .endif
+ .ifc \r, $r24
+ \var = 24
+ .endif
+ .ifc \r, $r25
+ \var = 25
+ .endif
+ .ifc \r, $r26
+ \var = 26
+ .endif
+ .ifc \r, $r27
+ \var = 27
+ .endif
+ .ifc \r, $r28
+ \var = 28
+ .endif
+ .ifc \r, $r29
+ \var = 29
+ .endif
+ .ifc \r, $r30
+ \var = 30
+ .endif
+ .ifc \r, $r31
+ \var = 31
+ .endif
+ .iflt \var
+ .error "Unable to parse register name \r"
+ .endif
+ .endm
+
+ .macro cpu_save_nonscratch thread
+ stptr.d s0, \thread, THREAD_REG23
+ stptr.d s1, \thread, THREAD_REG24
+ stptr.d s2, \thread, THREAD_REG25
+ stptr.d s3, \thread, THREAD_REG26
+ stptr.d s4, \thread, THREAD_REG27
+ stptr.d s5, \thread, THREAD_REG28
+ stptr.d s6, \thread, THREAD_REG29
+ stptr.d s7, \thread, THREAD_REG30
+ stptr.d s8, \thread, THREAD_REG31
+ stptr.d sp, \thread, THREAD_REG03
+ stptr.d fp, \thread, THREAD_REG22
+ .endm
+
+ .macro cpu_restore_nonscratch thread
+ ldptr.d s0, \thread, THREAD_REG23
+ ldptr.d s1, \thread, THREAD_REG24
+ ldptr.d s2, \thread, THREAD_REG25
+ ldptr.d s3, \thread, THREAD_REG26
+ ldptr.d s4, \thread, THREAD_REG27
+ ldptr.d s5, \thread, THREAD_REG28
+ ldptr.d s6, \thread, THREAD_REG29
+ ldptr.d s7, \thread, THREAD_REG30
+ ldptr.d s8, \thread, THREAD_REG31
+ ldptr.d ra, \thread, THREAD_REG01
+ ldptr.d sp, \thread, THREAD_REG03
+ ldptr.d fp, \thread, THREAD_REG22
+ .endm
+
+ .macro fpu_save_csr thread tmp
+ movfcsr2gr \tmp, fcsr0
+ stptr.w \tmp, \thread, THREAD_FCSR
+ .endm
+
+ .macro fpu_restore_csr thread tmp
+ ldptr.w \tmp, \thread, THREAD_FCSR
+ movgr2fcsr fcsr0, \tmp
+ .endm
+
+ .macro fpu_save_cc thread tmp0 tmp1
+ movcf2gr \tmp0, $fcc0
+ move \tmp1, \tmp0
+ movcf2gr \tmp0, $fcc1
+ bstrins.d \tmp1, \tmp0, 15, 8
+ movcf2gr \tmp0, $fcc2
+ bstrins.d \tmp1, \tmp0, 23, 16
+ movcf2gr \tmp0, $fcc3
+ bstrins.d \tmp1, \tmp0, 31, 24
+ movcf2gr \tmp0, $fcc4
+ bstrins.d \tmp1, \tmp0, 39, 32
+ movcf2gr \tmp0, $fcc5
+ bstrins.d \tmp1, \tmp0, 47, 40
+ movcf2gr \tmp0, $fcc6
+ bstrins.d \tmp1, \tmp0, 55, 48
+ movcf2gr \tmp0, $fcc7
+ bstrins.d \tmp1, \tmp0, 63, 56
+ stptr.d \tmp1, \thread, THREAD_FCC
+ .endm
+
+ .macro fpu_restore_cc thread tmp0 tmp1
+ ldptr.d \tmp0, \thread, THREAD_FCC
+ bstrpick.d \tmp1, \tmp0, 7, 0
+ movgr2cf $fcc0, \tmp1
+ bstrpick.d \tmp1, \tmp0, 15, 8
+ movgr2cf $fcc1, \tmp1
+ bstrpick.d \tmp1, \tmp0, 23, 16
+ movgr2cf $fcc2, \tmp1
+ bstrpick.d \tmp1, \tmp0, 31, 24
+ movgr2cf $fcc3, \tmp1
+ bstrpick.d \tmp1, \tmp0, 39, 32
+ movgr2cf $fcc4, \tmp1
+ bstrpick.d \tmp1, \tmp0, 47, 40
+ movgr2cf $fcc5, \tmp1
+ bstrpick.d \tmp1, \tmp0, 55, 48
+ movgr2cf $fcc6, \tmp1
+ bstrpick.d \tmp1, \tmp0, 63, 56
+ movgr2cf $fcc7, \tmp1
+ .endm
+
+ .macro fpu_save_double thread tmp
+ li.w \tmp, THREAD_FPR0
+ PTR_ADD \tmp, \tmp, \thread
+ fst.d $f0, \tmp, THREAD_FPR0 - THREAD_FPR0
+ fst.d $f1, \tmp, THREAD_FPR1 - THREAD_FPR0
+ fst.d $f2, \tmp, THREAD_FPR2 - THREAD_FPR0
+ fst.d $f3, \tmp, THREAD_FPR3 - THREAD_FPR0
+ fst.d $f4, \tmp, THREAD_FPR4 - THREAD_FPR0
+ fst.d $f5, \tmp, THREAD_FPR5 - THREAD_FPR0
+ fst.d $f6, \tmp, THREAD_FPR6 - THREAD_FPR0
+ fst.d $f7, \tmp, THREAD_FPR7 - THREAD_FPR0
+ fst.d $f8, \tmp, THREAD_FPR8 - THREAD_FPR0
+ fst.d $f9, \tmp, THREAD_FPR9 - THREAD_FPR0
+ fst.d $f10, \tmp, THREAD_FPR10 - THREAD_FPR0
+ fst.d $f11, \tmp, THREAD_FPR11 - THREAD_FPR0
+ fst.d $f12, \tmp, THREAD_FPR12 - THREAD_FPR0
+ fst.d $f13, \tmp, THREAD_FPR13 - THREAD_FPR0
+ fst.d $f14, \tmp, THREAD_FPR14 - THREAD_FPR0
+ fst.d $f15, \tmp, THREAD_FPR15 - THREAD_FPR0
+ fst.d $f16, \tmp, THREAD_FPR16 - THREAD_FPR0
+ fst.d $f17, \tmp, THREAD_FPR17 - THREAD_FPR0
+ fst.d $f18, \tmp, THREAD_FPR18 - THREAD_FPR0
+ fst.d $f19, \tmp, THREAD_FPR19 - THREAD_FPR0
+ fst.d $f20, \tmp, THREAD_FPR20 - THREAD_FPR0
+ fst.d $f21, \tmp, THREAD_FPR21 - THREAD_FPR0
+ fst.d $f22, \tmp, THREAD_FPR22 - THREAD_FPR0
+ fst.d $f23, \tmp, THREAD_FPR23 - THREAD_FPR0
+ fst.d $f24, \tmp, THREAD_FPR24 - THREAD_FPR0
+ fst.d $f25, \tmp, THREAD_FPR25 - THREAD_FPR0
+ fst.d $f26, \tmp, THREAD_FPR26 - THREAD_FPR0
+ fst.d $f27, \tmp, THREAD_FPR27 - THREAD_FPR0
+ fst.d $f28, \tmp, THREAD_FPR28 - THREAD_FPR0
+ fst.d $f29, \tmp, THREAD_FPR29 - THREAD_FPR0
+ fst.d $f30, \tmp, THREAD_FPR30 - THREAD_FPR0
+ fst.d $f31, \tmp, THREAD_FPR31 - THREAD_FPR0
+ .endm
+
+ .macro fpu_restore_double thread tmp
+ li.w \tmp, THREAD_FPR0
+ PTR_ADD \tmp, \tmp, \thread
+ fld.d $f0, \tmp, THREAD_FPR0 - THREAD_FPR0
+ fld.d $f1, \tmp, THREAD_FPR1 - THREAD_FPR0
+ fld.d $f2, \tmp, THREAD_FPR2 - THREAD_FPR0
+ fld.d $f3, \tmp, THREAD_FPR3 - THREAD_FPR0
+ fld.d $f4, \tmp, THREAD_FPR4 - THREAD_FPR0
+ fld.d $f5, \tmp, THREAD_FPR5 - THREAD_FPR0
+ fld.d $f6, \tmp, THREAD_FPR6 - THREAD_FPR0
+ fld.d $f7, \tmp, THREAD_FPR7 - THREAD_FPR0
+ fld.d $f8, \tmp, THREAD_FPR8 - THREAD_FPR0
+ fld.d $f9, \tmp, THREAD_FPR9 - THREAD_FPR0
+ fld.d $f10, \tmp, THREAD_FPR10 - THREAD_FPR0
+ fld.d $f11, \tmp, THREAD_FPR11 - THREAD_FPR0
+ fld.d $f12, \tmp, THREAD_FPR12 - THREAD_FPR0
+ fld.d $f13, \tmp, THREAD_FPR13 - THREAD_FPR0
+ fld.d $f14, \tmp, THREAD_FPR14 - THREAD_FPR0
+ fld.d $f15, \tmp, THREAD_FPR15 - THREAD_FPR0
+ fld.d $f16, \tmp, THREAD_FPR16 - THREAD_FPR0
+ fld.d $f17, \tmp, THREAD_FPR17 - THREAD_FPR0
+ fld.d $f18, \tmp, THREAD_FPR18 - THREAD_FPR0
+ fld.d $f19, \tmp, THREAD_FPR19 - THREAD_FPR0
+ fld.d $f20, \tmp, THREAD_FPR20 - THREAD_FPR0
+ fld.d $f21, \tmp, THREAD_FPR21 - THREAD_FPR0
+ fld.d $f22, \tmp, THREAD_FPR22 - THREAD_FPR0
+ fld.d $f23, \tmp, THREAD_FPR23 - THREAD_FPR0
+ fld.d $f24, \tmp, THREAD_FPR24 - THREAD_FPR0
+ fld.d $f25, \tmp, THREAD_FPR25 - THREAD_FPR0
+ fld.d $f26, \tmp, THREAD_FPR26 - THREAD_FPR0
+ fld.d $f27, \tmp, THREAD_FPR27 - THREAD_FPR0
+ fld.d $f28, \tmp, THREAD_FPR28 - THREAD_FPR0
+ fld.d $f29, \tmp, THREAD_FPR29 - THREAD_FPR0
+ fld.d $f30, \tmp, THREAD_FPR30 - THREAD_FPR0
+ fld.d $f31, \tmp, THREAD_FPR31 - THREAD_FPR0
+ .endm
+
+.macro not dst src
+ nor \dst, \src, zero
+.endm
+
+.macro bgt r0 r1 label
+ blt \r1, \r0, \label
+.endm
+
+.macro bltz r0 label
+ blt \r0, zero, \label
+.endm
+
+.macro bgez r0 label
+ bge \r0, zero, \label
+.endm
+
+#endif /* _ASM_ASMMACRO_H */
diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h
new file mode 100644
index 000000000000..979367ad4e2c
--- /dev/null
+++ b/arch/loongarch/include/asm/atomic.h
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Atomic operations.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_ATOMIC_H
+#define _ASM_ATOMIC_H
+
+#include <linux/types.h>
+#include <asm/barrier.h>
+#include <asm/cmpxchg.h>
+#include <asm/compiler.h>
+
+#if __SIZEOF_LONG__ == 4
+#define __LL "ll.w "
+#define __SC "sc.w "
+#define __AMADD "amadd.w "
+#define __AMAND_DB "amand_db.w "
+#define __AMOR_DB "amor_db.w "
+#define __AMXOR_DB "amxor_db.w "
+#elif __SIZEOF_LONG__ == 8
+#define __LL "ll.d "
+#define __SC "sc.d "
+#define __AMADD "amadd.d "
+#define __AMAND_DB "amand_db.d "
+#define __AMOR_DB "amor_db.d "
+#define __AMXOR_DB "amxor_db.d "
+#endif
+
+#define ATOMIC_INIT(i) { (i) }
+
+/*
+ * arch_atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+
+/*
+ * arch_atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+#define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
+
+#define ATOMIC_OP(op, I, asm_op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ __asm__ __volatile__( \
+ "am"#asm_op"_db.w" " $zero, %1, %0 \n" \
+ : "+ZB" (v->counter) \
+ : "r" (I) \
+ : "memory"); \
+}
+
+#define ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
+{ \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "am"#asm_op"_db.w" " %1, %2, %0 \n" \
+ : "+ZB" (v->counter), "=&r" (result) \
+ : "r" (I) \
+ : "memory"); \
+ \
+ return result c_op I; \
+}
+
+#define ATOMIC_FETCH_OP(op, I, asm_op) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+{ \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "am"#asm_op"_db.w" " %1, %2, %0 \n" \
+ : "+ZB" (v->counter), "=&r" (result) \
+ : "r" (I) \
+ : "memory"); \
+ \
+ return result; \
+}
+
+#define ATOMIC_OPS(op, I, asm_op, c_op) \
+ ATOMIC_OP(op, I, asm_op) \
+ ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
+ ATOMIC_FETCH_OP(op, I, asm_op)
+
+ATOMIC_OPS(add, i, add, +)
+ATOMIC_OPS(sub, -i, add, +)
+
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+
+#define ATOMIC_OPS(op, I, asm_op) \
+ ATOMIC_OP(op, I, asm_op) \
+ ATOMIC_FETCH_OP(op, I, asm_op)
+
+ATOMIC_OPS(and, i, and)
+ATOMIC_OPS(or, i, or)
+ATOMIC_OPS(xor, i, xor)
+
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int prev, rc;
+
+ __asm__ __volatile__ (
+ "0: ll.w %[p], %[c]\n"
+ " beq %[p], %[u], 1f\n"
+ " add.w %[rc], %[p], %[a]\n"
+ " sc.w %[rc], %[c]\n"
+ " beqz %[rc], 0b\n"
+ " b 2f\n"
+ "1:\n"
+ __WEAK_LLSC_MB
+ "2:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc),
+ [c]"=ZB" (v->counter)
+ : [a]"r" (a), [u]"r" (u)
+ : "memory");
+
+ return prev;
+}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
+
+/*
+ * arch_atomic_sub_if_positive - conditionally subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
+{
+ int result;
+ int temp;
+
+ if (__builtin_constant_p(i)) {
+ __asm__ __volatile__(
+ "1: ll.w %1, %2 # atomic_sub_if_positive\n"
+ " addi.w %0, %1, %3 \n"
+ " or %1, %0, $zero \n"
+ " blt %0, $zero, 2f \n"
+ " sc.w %1, %2 \n"
+ " beq $zero, %1, 1b \n"
+ "2: \n"
+ __WEAK_LLSC_MB
+ : "=&r" (result), "=&r" (temp),
+ "+" GCC_OFF_SMALL_ASM() (v->counter)
+ : "I" (-i));
+ } else {
+ __asm__ __volatile__(
+ "1: ll.w %1, %2 # atomic_sub_if_positive\n"
+ " sub.w %0, %1, %3 \n"
+ " or %1, %0, $zero \n"
+ " blt %0, $zero, 2f \n"
+ " sc.w %1, %2 \n"
+ " beq $zero, %1, 1b \n"
+ "2: \n"
+ __WEAK_LLSC_MB
+ : "=&r" (result), "=&r" (temp),
+ "+" GCC_OFF_SMALL_ASM() (v->counter)
+ : "r" (i));
+ }
+
+ return result;
+}
+
+#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
+
+/*
+ * arch_atomic_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+ */
+#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
+
+#ifdef CONFIG_64BIT
+
+#define ATOMIC64_INIT(i) { (i) }
+
+/*
+ * arch_atomic64_read - read atomic variable
+ * @v: pointer of type atomic64_t
+ *
+ */
+#define arch_atomic64_read(v) READ_ONCE((v)->counter)
+
+/*
+ * arch_atomic64_set - set atomic variable
+ * @v: pointer of type atomic64_t
+ * @i: required value
+ */
+#define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
+
+#define ATOMIC64_OP(op, I, asm_op) \
+static inline void arch_atomic64_##op(long i, atomic64_t *v) \
+{ \
+ __asm__ __volatile__( \
+ "am"#asm_op"_db.d " " $zero, %1, %0 \n" \
+ : "+ZB" (v->counter) \
+ : "r" (I) \
+ : "memory"); \
+}
+
+#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
+static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \
+{ \
+ long result; \
+ __asm__ __volatile__( \
+ "am"#asm_op"_db.d " " %1, %2, %0 \n" \
+ : "+ZB" (v->counter), "=&r" (result) \
+ : "r" (I) \
+ : "memory"); \
+ \
+ return result c_op I; \
+}
+
+#define ATOMIC64_FETCH_OP(op, I, asm_op) \
+static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \
+{ \
+ long result; \
+ \
+ __asm__ __volatile__( \
+ "am"#asm_op"_db.d " " %1, %2, %0 \n" \
+ : "+ZB" (v->counter), "=&r" (result) \
+ : "r" (I) \
+ : "memory"); \
+ \
+ return result; \
+}
+
+#define ATOMIC64_OPS(op, I, asm_op, c_op) \
+ ATOMIC64_OP(op, I, asm_op) \
+ ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
+ ATOMIC64_FETCH_OP(op, I, asm_op)
+
+ATOMIC64_OPS(add, i, add, +)
+ATOMIC64_OPS(sub, -i, add, +)
+
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+
+#define ATOMIC64_OPS(op, I, asm_op) \
+ ATOMIC64_OP(op, I, asm_op) \
+ ATOMIC64_FETCH_OP(op, I, asm_op)
+
+ATOMIC64_OPS(and, i, and)
+ATOMIC64_OPS(or, i, or)
+ATOMIC64_OPS(xor, i, xor)
+
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
+{
+ long prev, rc;
+
+ __asm__ __volatile__ (
+ "0: ll.d %[p], %[c]\n"
+ " beq %[p], %[u], 1f\n"
+ " add.d %[rc], %[p], %[a]\n"
+ " sc.d %[rc], %[c]\n"
+ " beqz %[rc], 0b\n"
+ " b 2f\n"
+ "1:\n"
+ __WEAK_LLSC_MB
+ "2:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc),
+ [c] "=ZB" (v->counter)
+ : [a]"r" (a), [u]"r" (u)
+ : "memory");
+
+ return prev;
+}
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
+
+/*
+ * arch_atomic64_sub_if_positive - conditionally subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
+{
+ long result;
+ long temp;
+
+ if (__builtin_constant_p(i)) {
+ __asm__ __volatile__(
+ "1: ll.d %1, %2 # atomic64_sub_if_positive \n"
+ " addi.d %0, %1, %3 \n"
+ " or %1, %0, $zero \n"
+ " blt %0, $zero, 2f \n"
+ " sc.d %1, %2 \n"
+ " beq %1, $zero, 1b \n"
+ "2: \n"
+ __WEAK_LLSC_MB
+ : "=&r" (result), "=&r" (temp),
+ "+" GCC_OFF_SMALL_ASM() (v->counter)
+ : "I" (-i));
+ } else {
+ __asm__ __volatile__(
+ "1: ll.d %1, %2 # atomic64_sub_if_positive \n"
+ " sub.d %0, %1, %3 \n"
+ " or %1, %0, $zero \n"
+ " blt %0, $zero, 2f \n"
+ " sc.d %1, %2 \n"
+ " beq %1, $zero, 1b \n"
+ "2: \n"
+ __WEAK_LLSC_MB
+ : "=&r" (result), "=&r" (temp),
+ "+" GCC_OFF_SMALL_ASM() (v->counter)
+ : "r" (i));
+ }
+
+ return result;
+}
+
+#define arch_atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
+
+/*
+ * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic64_t
+ */
+#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
+
+#endif /* CONFIG_64BIT */
+
+#endif /* _ASM_ATOMIC_H */
diff --git a/arch/loongarch/include/asm/barrier.h b/arch/loongarch/include/asm/barrier.h
new file mode 100644
index 000000000000..b6517eeeb141
--- /dev/null
+++ b/arch/loongarch/include/asm/barrier.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#define __sync() __asm__ __volatile__("dbar 0" : : : "memory")
+
+#define fast_wmb() __sync()
+#define fast_rmb() __sync()
+#define fast_mb() __sync()
+#define fast_iob() __sync()
+#define wbflush() __sync()
+
+#define wmb() fast_wmb()
+#define rmb() fast_rmb()
+#define mb() fast_mb()
+#define iob() fast_iob()
+
+#define __smp_mb() __asm__ __volatile__("dbar 0" : : : "memory")
+#define __smp_rmb() __asm__ __volatile__("dbar 0" : : : "memory")
+#define __smp_wmb() __asm__ __volatile__("dbar 0" : : : "memory")
+
+#ifdef CONFIG_SMP
+#define __WEAK_LLSC_MB " dbar 0 \n"
+#else
+#define __WEAK_LLSC_MB " \n"
+#endif
+
+#define __smp_mb__before_atomic() barrier()
+#define __smp_mb__after_atomic() barrier()
+
+/**
+ * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
+ * @index: array element index
+ * @size: number of elements in array
+ *
+ * Returns:
+ * 0 - (@index < @size)
+ */
+#define array_index_mask_nospec array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long index,
+ unsigned long size)
+{
+ unsigned long mask;
+
+ __asm__ __volatile__(
+ "sltu %0, %1, %2\n\t"
+#if (__SIZEOF_LONG__ == 4)
+ "sub.w %0, $r0, %0\n\t"
+#elif (__SIZEOF_LONG__ == 8)
+ "sub.d %0, $r0, %0\n\t"
+#endif
+ : "=r" (mask)
+ : "r" (index), "r" (size)
+ :);
+
+ return mask;
+}
+
+#define __smp_load_acquire(p) \
+({ \
+ union { typeof(*p) __val; char __c[1]; } __u; \
+ unsigned long __tmp = 0; \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 1: \
+ *(__u8 *)__u.__c = *(volatile __u8 *)p; \
+ __smp_mb(); \
+ break; \
+ case 2: \
+ *(__u16 *)__u.__c = *(volatile __u16 *)p; \
+ __smp_mb(); \
+ break; \
+ case 4: \
+ __asm__ __volatile__( \
+ "amor_db.w %[val], %[tmp], %[mem] \n" \
+ : [val] "=&r" (*(__u32 *)__u.__c) \
+ : [mem] "ZB" (*(u32 *) p), [tmp] "r" (__tmp) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__( \
+ "amor_db.d %[val], %[tmp], %[mem] \n" \
+ : [val] "=&r" (*(__u64 *)__u.__c) \
+ : [mem] "ZB" (*(u64 *) p), [tmp] "r" (__tmp) \
+ : "memory"); \
+ break; \
+ } \
+ (typeof(*p))__u.__val; \
+})
+
+#define __smp_store_release(p, v) \
+do { \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(*p)) (v) }; \
+ unsigned long __tmp; \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 1: \
+ __smp_mb(); \
+ *(volatile __u8 *)p = *(__u8 *)__u.__c; \
+ break; \
+ case 2: \
+ __smp_mb(); \
+ *(volatile __u16 *)p = *(__u16 *)__u.__c; \
+ break; \
+ case 4: \
+ __asm__ __volatile__( \
+ "amswap_db.w %[tmp], %[val], %[mem] \n" \
+ : [mem] "+ZB" (*(u32 *)p), [tmp] "=&r" (__tmp) \
+ : [val] "r" (*(__u32 *)__u.__c) \
+ : ); \
+ break; \
+ case 8: \
+ __asm__ __volatile__( \
+ "amswap_db.d %[tmp], %[val], %[mem] \n" \
+ : [mem] "+ZB" (*(u64 *)p), [tmp] "=&r" (__tmp) \
+ : [val] "r" (*(__u64 *)__u.__c) \
+ : ); \
+ break; \
+ } \
+} while (0)
+
+#define __smp_store_mb(p, v) \
+do { \
+ union { typeof(p) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(p)) (v) }; \
+ unsigned long __tmp; \
+ switch (sizeof(p)) { \
+ case 1: \
+ *(volatile __u8 *)&p = *(__u8 *)__u.__c; \
+ __smp_mb(); \
+ break; \
+ case 2: \
+ *(volatile __u16 *)&p = *(__u16 *)__u.__c; \
+ __smp_mb(); \
+ break; \
+ case 4: \
+ __asm__ __volatile__( \
+ "amswap_db.w %[tmp], %[val], %[mem] \n" \
+ : [mem] "+ZB" (*(u32 *)&p), [tmp] "=&r" (__tmp) \
+ : [val] "r" (*(__u32 *)__u.__c) \
+ : ); \
+ break; \
+ case 8: \
+ __asm__ __volatile__( \
+ "amswap_db.d %[tmp], %[val], %[mem] \n" \
+ : [mem] "+ZB" (*(u64 *)&p), [tmp] "=&r" (__tmp) \
+ : [val] "r" (*(__u64 *)__u.__c) \
+ : ); \
+ break; \
+ } \
+} while (0)
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASM_BARRIER_H */
diff --git a/arch/loongarch/include/asm/bitops.h b/arch/loongarch/include/asm/bitops.h
new file mode 100644
index 000000000000..69e00f8d8034
--- /dev/null
+++ b/arch/loongarch/include/asm/bitops.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_BITOPS_H
+#define _ASM_BITOPS_H
+
+#include <linux/compiler.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm/barrier.h>
+
+#include <asm-generic/bitops/builtin-ffs.h>
+#include <asm-generic/bitops/builtin-fls.h>
+#include <asm-generic/bitops/builtin-__ffs.h>
+#include <asm-generic/bitops/builtin-__fls.h>
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls64.h>
+
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* _ASM_BITOPS_H */
diff --git a/arch/loongarch/include/asm/bitrev.h b/arch/loongarch/include/asm/bitrev.h
new file mode 100644
index 000000000000..46f275b9cdf7
--- /dev/null
+++ b/arch/loongarch/include/asm/bitrev.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __LOONGARCH_ASM_BITREV_H__
+#define __LOONGARCH_ASM_BITREV_H__
+
+#include <linux/swab.h>
+
+static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
+{
+ u32 ret;
+
+ asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(__swab32(x)));
+ return ret;
+}
+
+static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x)
+{
+ u16 ret;
+
+ asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(__swab16(x)));
+ return ret;
+}
+
+static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x)
+{
+ u8 ret;
+
+ asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(x));
+ return ret;
+}
+
+#endif /* __LOONGARCH_ASM_BITREV_H__ */
diff --git a/arch/loongarch/include/asm/bootinfo.h b/arch/loongarch/include/asm/bootinfo.h
new file mode 100644
index 000000000000..9b8d49d9e61b
--- /dev/null
+++ b/arch/loongarch/include/asm/bootinfo.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_BOOTINFO_H
+#define _ASM_BOOTINFO_H
+
+#include <linux/types.h>
+#include <asm/setup.h>
+
+const char *get_system_type(void);
+
+extern void init_environ(void);
+extern void memblock_init(void);
+extern void platform_init(void);
+extern void plat_swiotlb_setup(void);
+extern int __init init_numa_memory(void);
+
+struct loongson_board_info {
+ int bios_size;
+ const char *bios_vendor;
+ const char *bios_version;
+ const char *bios_release_date;
+ const char *board_name;
+ const char *board_vendor;
+};
+
+struct loongson_system_configuration {
+ int nr_cpus;
+ int nr_nodes;
+ int nr_io_pics;
+ int boot_cpu_id;
+ int cores_per_node;
+ int cores_per_package;
+ const char *cpuname;
+};
+
+extern u64 efi_system_table;
+extern unsigned long fw_arg0, fw_arg1;
+extern struct loongson_board_info b_info;
+extern struct loongson_system_configuration loongson_sysconf;
+
+#endif /* _ASM_BOOTINFO_H */
diff --git a/arch/loongarch/include/asm/branch.h b/arch/loongarch/include/asm/branch.h
new file mode 100644
index 000000000000..3f33c89f35b4
--- /dev/null
+++ b/arch/loongarch/include/asm/branch.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_BRANCH_H
+#define _ASM_BRANCH_H
+
+#include <asm/ptrace.h>
+
+static inline unsigned long exception_era(struct pt_regs *regs)
+{
+ return regs->csr_era;
+}
+
+static inline int compute_return_era(struct pt_regs *regs)
+{
+ regs->csr_era += 4;
+ return 0;
+}
+
+#endif /* _ASM_BRANCH_H */
diff --git a/arch/loongarch/include/asm/bug.h b/arch/loongarch/include/asm/bug.h
new file mode 100644
index 000000000000..bda49108a76d
--- /dev/null
+++ b/arch/loongarch/include/asm/bug.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BUG_H
+#define __ASM_BUG_H
+
+#include <linux/compiler.h>
+
+#ifdef CONFIG_BUG
+
+#include <asm/break.h>
+
+static inline void __noreturn BUG(void)
+{
+ __asm__ __volatile__("break %0" : : "i" (BRK_BUG));
+ unreachable();
+}
+
+#define HAVE_ARCH_BUG
+
+#endif
+
+#include <asm-generic/bug.h>
+
+#endif /* __ASM_BUG_H */
diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h
new file mode 100644
index 000000000000..1b6d09617199
--- /dev/null
+++ b/arch/loongarch/include/asm/cache.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_CACHE_H
+#define _ASM_CACHE_H
+
+#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define __read_mostly __section(".data..read_mostly")
+
+#endif /* _ASM_CACHE_H */
diff --git a/arch/loongarch/include/asm/cacheflush.h b/arch/loongarch/include/asm/cacheflush.h
new file mode 100644
index 000000000000..670900141b7c
--- /dev/null
+++ b/arch/loongarch/include/asm/cacheflush.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_CACHEFLUSH_H
+#define _ASM_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/cpu-features.h>
+#include <asm/cacheops.h>
+
+extern void local_flush_icache_range(unsigned long start, unsigned long end);
+
+#define flush_icache_range local_flush_icache_range
+#define flush_icache_user_range local_flush_icache_range
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+#define flush_icache_page(vma, page) do { } while (0)
+#define flush_icache_user_page(vma, page, addr, len) do { } while (0)
+#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+#define cache_op(op, addr) \
+ __asm__ __volatile__( \
+ " cacop %0, %1 \n" \
+ : \
+ : "i" (op), "ZC" (*(unsigned char *)(addr)))
+
+static inline void flush_icache_line_indexed(unsigned long addr)
+{
+ cache_op(Index_Invalidate_I, addr);
+}
+
+static inline void flush_dcache_line_indexed(unsigned long addr)
+{
+ cache_op(Index_Writeback_Inv_D, addr);
+}
+
+static inline void flush_vcache_line_indexed(unsigned long addr)
+{
+ cache_op(Index_Writeback_Inv_V, addr);
+}
+
+static inline void flush_scache_line_indexed(unsigned long addr)
+{
+ cache_op(Index_Writeback_Inv_S, addr);
+}
+
+static inline void flush_icache_line(unsigned long addr)
+{
+ cache_op(Hit_Invalidate_I, addr);
+}
+
+static inline void flush_dcache_line(unsigned long addr)
+{
+ cache_op(Hit_Writeback_Inv_D, addr);
+}
+
+static inline void flush_vcache_line(unsigned long addr)
+{
+ cache_op(Hit_Writeback_Inv_V, addr);
+}
+
+static inline void flush_scache_line(unsigned long addr)
+{
+ cache_op(Hit_Writeback_Inv_S, addr);
+}
+
+#include <asm-generic/cacheflush.h>
+
+#endif /* _ASM_CACHEFLUSH_H */
diff --git a/arch/loongarch/include/asm/cacheops.h b/arch/loongarch/include/asm/cacheops.h
new file mode 100644
index 000000000000..dc280efecebd
--- /dev/null
+++ b/arch/loongarch/include/asm/cacheops.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cache operations for the cache instruction.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_CACHEOPS_H
+#define __ASM_CACHEOPS_H
+
+/*
+ * Most cache ops are split into a 2 bit field identifying the cache, and a 3
+ * bit field identifying the cache operation.
+ */
+#define CacheOp_Cache 0x03
+#define CacheOp_Op 0x1c
+
+#define Cache_I 0x00
+#define Cache_D 0x01
+#define Cache_V 0x02
+#define Cache_S 0x03
+
+#define Index_Invalidate 0x08
+#define Index_Writeback_Inv 0x08
+#define Hit_Invalidate 0x10
+#define Hit_Writeback_Inv 0x10
+#define CacheOp_User_Defined 0x18
+
+#define Index_Invalidate_I (Cache_I | Index_Invalidate)
+#define Index_Writeback_Inv_D (Cache_D | Index_Writeback_Inv)
+#define Index_Writeback_Inv_V (Cache_V | Index_Writeback_Inv)
+#define Index_Writeback_Inv_S (Cache_S | Index_Writeback_Inv)
+#define Hit_Invalidate_I (Cache_I | Hit_Invalidate)
+#define Hit_Writeback_Inv_D (Cache_D | Hit_Writeback_Inv)
+#define Hit_Writeback_Inv_V (Cache_V | Hit_Writeback_Inv)
+#define Hit_Writeback_Inv_S (Cache_S | Hit_Writeback_Inv)
+
+#endif /* __ASM_CACHEOPS_H */
diff --git a/arch/loongarch/include/asm/clocksource.h b/arch/loongarch/include/asm/clocksource.h
new file mode 100644
index 000000000000..58e64aa05d26
--- /dev/null
+++ b/arch/loongarch/include/asm/clocksource.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_CLOCKSOURCE_H
+#define __ASM_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif /* __ASM_CLOCKSOURCE_H */
diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..75b3a4478652
--- /dev/null
+++ b/arch/loongarch/include/asm/cmpxchg.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_CMPXCHG_H
+#define __ASM_CMPXCHG_H
+
+#include <asm/barrier.h>
+#include <linux/build_bug.h>
+
+#define __xchg_asm(amswap_db, m, val) \
+({ \
+ __typeof(val) __ret; \
+ \
+ __asm__ __volatile__ ( \
+ " "amswap_db" %1, %z2, %0 \n" \
+ : "+ZB" (*m), "=&r" (__ret) \
+ : "Jr" (val) \
+ : "memory"); \
+ \
+ __ret; \
+})
+
+static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
+ int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
+
+ case 8:
+ return __xchg_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x);
+
+ default:
+ BUILD_BUG();
+ }
+
+ return 0;
+}
+
+#define arch_xchg(ptr, x) \
+({ \
+ __typeof__(*(ptr)) __res; \
+ \
+ __res = (__typeof__(*(ptr))) \
+ __xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \
+ \
+ __res; \
+})
+
+#define __cmpxchg_asm(ld, st, m, old, new) \
+({ \
+ __typeof(old) __ret; \
+ \
+ __asm__ __volatile__( \
+ "1: " ld " %0, %2 # __cmpxchg_asm \n" \
+ " bne %0, %z3, 2f \n" \
+ " or $t0, %z4, $zero \n" \
+ " " st " $t0, %1 \n" \
+ " beq $zero, $t0, 1b \n" \
+ "2: \n" \
+ __WEAK_LLSC_MB \
+ : "=&r" (__ret), "=ZB"(*m) \
+ : "ZB"(*m), "Jr" (old), "Jr" (new) \
+ : "t0", "memory"); \
+ \
+ __ret; \
+})
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr,
+ (u32)old, new);
+
+ case 8:
+ return __cmpxchg_asm("ll.d", "sc.d", (volatile u64 *)ptr,
+ (u64)old, new);
+
+ default:
+ BUILD_BUG();
+ }
+
+ return 0;
+}
+
+#define arch_cmpxchg_local(ptr, old, new) \
+ ((__typeof__(*(ptr))) \
+ __cmpxchg((ptr), \
+ (unsigned long)(__typeof__(*(ptr)))(old), \
+ (unsigned long)(__typeof__(*(ptr)))(new), \
+ sizeof(*(ptr))))
+
+#define arch_cmpxchg(ptr, old, new) \
+({ \
+ __typeof__(*(ptr)) __res; \
+ \
+ __res = arch_cmpxchg_local((ptr), (old), (new)); \
+ \
+ __res; \
+})
+
+#ifdef CONFIG_64BIT
+#define arch_cmpxchg64_local(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
+ })
+
+#define arch_cmpxchg64(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_cmpxchg((ptr), (o), (n)); \
+ })
+#else
+#include <asm-generic/cmpxchg-local.h>
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
+#endif
+
+#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/loongarch/include/asm/compiler.h b/arch/loongarch/include/asm/compiler.h
new file mode 100644
index 000000000000..657cebe70ace
--- /dev/null
+++ b/arch/loongarch/include/asm/compiler.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_COMPILER_H
+#define _ASM_COMPILER_H
+
+#define GCC_OFF_SMALL_ASM() "ZC"
+
+#define LOONGARCH_ISA_LEVEL "loongarch"
+#define LOONGARCH_ISA_ARCH_LEVEL "arch=loongarch"
+#define LOONGARCH_ISA_LEVEL_RAW loongarch
+#define LOONGARCH_ISA_ARCH_LEVEL_RAW LOONGARCH_ISA_LEVEL_RAW
+
+#endif /* _ASM_COMPILER_H */
diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h
new file mode 100644
index 000000000000..a8d87c40a0eb
--- /dev/null
+++ b/arch/loongarch/include/asm/cpu-features.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#ifndef __ASM_CPU_FEATURES_H
+#define __ASM_CPU_FEATURES_H
+
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+
+#define cpu_opt(opt) (cpu_data[0].options & (opt))
+#define cpu_has(feat) (cpu_data[0].options & BIT_ULL(feat))
+
+#define cpu_has_loongarch (cpu_has_loongarch32 | cpu_has_loongarch64)
+#define cpu_has_loongarch32 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_32BIT)
+#define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
+
+#define cpu_icache_line_size() cpu_data[0].icache.linesz
+#define cpu_dcache_line_size() cpu_data[0].dcache.linesz
+#define cpu_vcache_line_size() cpu_data[0].vcache.linesz
+#define cpu_scache_line_size() cpu_data[0].scache.linesz
+
+#ifdef CONFIG_32BIT
+# define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
+# define cpu_vabits 31
+# define cpu_pabits 31
+#endif
+
+#ifdef CONFIG_64BIT
+# define cpu_has_64bits 1
+# define cpu_vabits cpu_data[0].vabits
+# define cpu_pabits cpu_data[0].pabits
+# define __NEED_ADDRBITS_PROBE
+#endif
+
+/*
+ * SMP assumption: Options of CPU 0 are a superset of all processors.
+ * This is true for all known LoongArch systems.
+ */
+#define cpu_has_cpucfg cpu_opt(LOONGARCH_CPU_CPUCFG)
+#define cpu_has_lam cpu_opt(LOONGARCH_CPU_LAM)
+#define cpu_has_ual cpu_opt(LOONGARCH_CPU_UAL)
+#define cpu_has_fpu cpu_opt(LOONGARCH_CPU_FPU)
+#define cpu_has_lsx cpu_opt(LOONGARCH_CPU_LSX)
+#define cpu_has_lasx cpu_opt(LOONGARCH_CPU_LASX)
+#define cpu_has_complex cpu_opt(LOONGARCH_CPU_COMPLEX)
+#define cpu_has_crypto cpu_opt(LOONGARCH_CPU_CRYPTO)
+#define cpu_has_lvz cpu_opt(LOONGARCH_CPU_LVZ)
+#define cpu_has_lbt_x86 cpu_opt(LOONGARCH_CPU_LBT_X86)
+#define cpu_has_lbt_arm cpu_opt(LOONGARCH_CPU_LBT_ARM)
+#define cpu_has_lbt_mips cpu_opt(LOONGARCH_CPU_LBT_MIPS)
+#define cpu_has_lbt (cpu_has_lbt_x86|cpu_has_lbt_arm|cpu_has_lbt_mips)
+#define cpu_has_csr cpu_opt(LOONGARCH_CPU_CSR)
+#define cpu_has_tlb cpu_opt(LOONGARCH_CPU_TLB)
+#define cpu_has_watch cpu_opt(LOONGARCH_CPU_WATCH)
+#define cpu_has_vint cpu_opt(LOONGARCH_CPU_VINT)
+#define cpu_has_csripi cpu_opt(LOONGARCH_CPU_CSRIPI)
+#define cpu_has_extioi cpu_opt(LOONGARCH_CPU_EXTIOI)
+#define cpu_has_prefetch cpu_opt(LOONGARCH_CPU_PREFETCH)
+#define cpu_has_pmp cpu_opt(LOONGARCH_CPU_PMP)
+#define cpu_has_perf cpu_opt(LOONGARCH_CPU_PMP)
+#define cpu_has_scalefreq cpu_opt(LOONGARCH_CPU_SCALEFREQ)
+#define cpu_has_flatmode cpu_opt(LOONGARCH_CPU_FLATMODE)
+#define cpu_has_eiodecode cpu_opt(LOONGARCH_CPU_EIODECODE)
+#define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID)
+#define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR)
+
+
+#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/loongarch/include/asm/cpu-info.h b/arch/loongarch/include/asm/cpu-info.h
new file mode 100644
index 000000000000..b6c4f96079df
--- /dev/null
+++ b/arch/loongarch/include/asm/cpu-info.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_CPU_INFO_H
+#define __ASM_CPU_INFO_H
+
+#include <linux/cache.h>
+#include <linux/types.h>
+
+#include <asm/loongarch.h>
+
+/*
+ * Descriptor for a cache
+ */
+struct cache_desc {
+ unsigned int waysize; /* Bytes per way */
+ unsigned short sets; /* Number of lines per set */
+ unsigned char ways; /* Number of ways */
+ unsigned char linesz; /* Size of line in bytes */
+ unsigned char waybit; /* Bits to select in a cache set */
+ unsigned char flags; /* Flags describing cache properties */
+};
+
+struct cpuinfo_loongarch {
+ u64 asid_cache;
+ unsigned long asid_mask;
+
+ /*
+ * Capability and feature descriptor structure for LoongArch CPU
+ */
+ unsigned long long options;
+ unsigned int processor_id;
+ unsigned int fpu_vers;
+ unsigned int fpu_csr0;
+ unsigned int fpu_mask;
+ unsigned int cputype;
+ int isa_level;
+ int tlbsize;
+ int tlbsizemtlb;
+ int tlbsizestlbsets;
+ int tlbsizestlbways;
+ struct cache_desc icache; /* Primary I-cache */
+ struct cache_desc dcache; /* Primary D or combined I/D cache */
+ struct cache_desc vcache; /* Victim cache, between pcache and scache */
+ struct cache_desc scache; /* Secondary cache */
+ struct cache_desc tcache; /* Tertiary/split secondary cache */
+ int core; /* physical core number in package */
+ int package;/* physical package number */
+ int vabits; /* Virtual Address size in bits */
+ int pabits; /* Physical Address size in bits */
+ unsigned int ksave_mask; /* Usable KSave mask. */
+ unsigned int watch_dreg_count; /* Number data breakpoints */
+ unsigned int watch_ireg_count; /* Number instruction breakpoints */
+ unsigned int watch_reg_use_cnt; /* min(NUM_WATCH_REGS, watch_dreg_count + watch_ireg_count), Usable by ptrace */
+} __aligned(SMP_CACHE_BYTES);
+
+extern struct cpuinfo_loongarch cpu_data[];
+#define boot_cpu_data cpu_data[0]
+#define current_cpu_data cpu_data[smp_processor_id()]
+#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
+
+extern void cpu_probe(void);
+
+extern const char *__cpu_family[];
+extern const char *__cpu_full_name[];
+#define cpu_family_string() __cpu_family[raw_smp_processor_id()]
+#define cpu_full_name_string() __cpu_full_name[raw_smp_processor_id()]
+
+struct seq_file;
+struct notifier_block;
+
+extern int register_proc_cpuinfo_notifier(struct notifier_block *nb);
+extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v);
+
+#define proc_cpuinfo_notifier(fn, pri) \
+({ \
+ static struct notifier_block fn##_nb = { \
+ .notifier_call = fn, \
+ .priority = pri \
+ }; \
+ \
+ register_proc_cpuinfo_notifier(&fn##_nb); \
+})
+
+struct proc_cpuinfo_notifier_args {
+ struct seq_file *m;
+ unsigned long n;
+};
+
+static inline bool cpus_are_siblings(int cpua, int cpub)
+{
+ struct cpuinfo_loongarch *infoa = &cpu_data[cpua];
+ struct cpuinfo_loongarch *infob = &cpu_data[cpub];
+
+ if (infoa->package != infob->package)
+ return false;
+
+ if (infoa->core != infob->core)
+ return false;
+
+ return true;
+}
+
+static inline unsigned long cpu_asid_mask(struct cpuinfo_loongarch *cpuinfo)
+{
+ return cpuinfo->asid_mask;
+}
+
+static inline void set_cpu_asid_mask(struct cpuinfo_loongarch *cpuinfo,
+ unsigned long asid_mask)
+{
+ cpuinfo->asid_mask = asid_mask;
+}
+
+#endif /* __ASM_CPU_INFO_H */
diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h
new file mode 100644
index 000000000000..754f28506791
--- /dev/null
+++ b/arch/loongarch/include/asm/cpu.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cpu.h: Values of the PRID register used to match up
+ * various LoongArch CPU types.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_CPU_H
+#define _ASM_CPU_H
+
+/*
+ * As described in LoongArch specs from Loongson Technology, the PRID register
+ * (CPUCFG.00) has the following layout:
+ *
+ * +---------------+----------------+------------+--------------------+
+ * | Reserved | Company ID | Series ID | Product ID |
+ * +---------------+----------------+------------+--------------------+
+ * 31 24 23 16 15 12 11 0
+ */
+
+/*
+ * Assigned Company values for bits 23:16 of the PRID register.
+ */
+
+#define PRID_COMP_MASK 0xff0000
+
+#define PRID_COMP_LOONGSON 0x140000
+
+/*
+ * Assigned Series ID values for bits 15:12 of the PRID register. In order
+ * to detect a certain CPU type exactly eventually additional registers may
+ * need to be examined.
+ */
+
+#define PRID_SERIES_MASK 0xf000
+
+#define PRID_SERIES_LA132 0x8000 /* Loongson 32bit */
+#define PRID_SERIES_LA264 0xa000 /* Loongson 64bit, 2-issue */
+#define PRID_SERIES_LA364 0xb000 /* Loongson 64bit,3-issue */
+#define PRID_SERIES_LA464 0xc000 /* Loongson 64bit, 4-issue */
+#define PRID_SERIES_LA664 0xd000 /* Loongson 64bit, 6-issue */
+
+/*
+ * Particular Product ID values for bits 11:0 of the PRID register.
+ */
+
+#define PRID_PRODUCT_MASK 0x0fff
+
+#if !defined(__ASSEMBLY__)
+
+enum cpu_type_enum {
+ CPU_UNKNOWN,
+ CPU_LOONGSON32,
+ CPU_LOONGSON64,
+ CPU_LAST
+};
+
+#endif /* !__ASSEMBLY */
+
+/*
+ * ISA Level encodings
+ *
+ */
+
+#define LOONGARCH_CPU_ISA_LA32R 0x00000001
+#define LOONGARCH_CPU_ISA_LA32S 0x00000002
+#define LOONGARCH_CPU_ISA_LA64 0x00000004
+
+#define LOONGARCH_CPU_ISA_32BIT (LOONGARCH_CPU_ISA_LA32R | LOONGARCH_CPU_ISA_LA32S)
+#define LOONGARCH_CPU_ISA_64BIT LOONGARCH_CPU_ISA_LA64
+
+/*
+ * CPU Option encodings
+ */
+#define CPU_FEATURE_CPUCFG 0 /* CPU has CPUCFG */
+#define CPU_FEATURE_LAM 1 /* CPU has Atomic instructions */
+#define CPU_FEATURE_UAL 2 /* CPU supports unaligned access */
+#define CPU_FEATURE_FPU 3 /* CPU has FPU */
+#define CPU_FEATURE_LSX 4 /* CPU has LSX (128-bit SIMD) */
+#define CPU_FEATURE_LASX 5 /* CPU has LASX (256-bit SIMD) */
+#define CPU_FEATURE_COMPLEX 6 /* CPU has Complex instructions */
+#define CPU_FEATURE_CRYPTO 7 /* CPU has Crypto instructions */
+#define CPU_FEATURE_LVZ 8 /* CPU has Virtualization extension */
+#define CPU_FEATURE_LBT_X86 9 /* CPU has X86 Binary Translation */
+#define CPU_FEATURE_LBT_ARM 10 /* CPU has ARM Binary Translation */
+#define CPU_FEATURE_LBT_MIPS 11 /* CPU has MIPS Binary Translation */
+#define CPU_FEATURE_TLB 12 /* CPU has TLB */
+#define CPU_FEATURE_CSR 13 /* CPU has CSR */
+#define CPU_FEATURE_WATCH 14 /* CPU has watchpoint registers */
+#define CPU_FEATURE_VINT 15 /* CPU has vectored interrupts */
+#define CPU_FEATURE_CSRIPI 16 /* CPU has CSR-IPI */
+#define CPU_FEATURE_EXTIOI 17 /* CPU has EXT-IOI */
+#define CPU_FEATURE_PREFETCH 18 /* CPU has prefetch instructions */
+#define CPU_FEATURE_PMP 19 /* CPU has perfermance counter */
+#define CPU_FEATURE_SCALEFREQ 20 /* CPU supports cpufreq scaling */
+#define CPU_FEATURE_FLATMODE 21 /* CPU has flat mode */
+#define CPU_FEATURE_EIODECODE 22 /* CPU has EXTIOI interrupt pin decode mode */
+#define CPU_FEATURE_GUESTID 23 /* CPU has GuestID feature */
+#define CPU_FEATURE_HYPERVISOR 24 /* CPU has hypervisor (running in VM) */
+
+#define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG)
+#define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM)
+#define LOONGARCH_CPU_UAL BIT_ULL(CPU_FEATURE_UAL)
+#define LOONGARCH_CPU_FPU BIT_ULL(CPU_FEATURE_FPU)
+#define LOONGARCH_CPU_LSX BIT_ULL(CPU_FEATURE_LSX)
+#define LOONGARCH_CPU_LASX BIT_ULL(CPU_FEATURE_LASX)
+#define LOONGARCH_CPU_COMPLEX BIT_ULL(CPU_FEATURE_COMPLEX)
+#define LOONGARCH_CPU_CRYPTO BIT_ULL(CPU_FEATURE_CRYPTO)
+#define LOONGARCH_CPU_LVZ BIT_ULL(CPU_FEATURE_LVZ)
+#define LOONGARCH_CPU_LBT_X86 BIT_ULL(CPU_FEATURE_LBT_X86)
+#define LOONGARCH_CPU_LBT_ARM BIT_ULL(CPU_FEATURE_LBT_ARM)
+#define LOONGARCH_CPU_LBT_MIPS BIT_ULL(CPU_FEATURE_LBT_MIPS)
+#define LOONGARCH_CPU_TLB BIT_ULL(CPU_FEATURE_TLB)
+#define LOONGARCH_CPU_CSR BIT_ULL(CPU_FEATURE_CSR)
+#define LOONGARCH_CPU_WATCH BIT_ULL(CPU_FEATURE_WATCH)
+#define LOONGARCH_CPU_VINT BIT_ULL(CPU_FEATURE_VINT)
+#define LOONGARCH_CPU_CSRIPI BIT_ULL(CPU_FEATURE_CSRIPI)
+#define LOONGARCH_CPU_EXTIOI BIT_ULL(CPU_FEATURE_EXTIOI)
+#define LOONGARCH_CPU_PREFETCH BIT_ULL(CPU_FEATURE_PREFETCH)
+#define LOONGARCH_CPU_PMP BIT_ULL(CPU_FEATURE_PMP)
+#define LOONGARCH_CPU_SCALEFREQ BIT_ULL(CPU_FEATURE_SCALEFREQ)
+#define LOONGARCH_CPU_FLATMODE BIT_ULL(CPU_FEATURE_FLATMODE)
+#define LOONGARCH_CPU_EIODECODE BIT_ULL(CPU_FEATURE_EIODECODE)
+#define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID)
+#define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR)
+
+#endif /* _ASM_CPU_H */
diff --git a/arch/loongarch/include/asm/cpufeature.h b/arch/loongarch/include/asm/cpufeature.h
new file mode 100644
index 000000000000..4da22a8e63de
--- /dev/null
+++ b/arch/loongarch/include/asm/cpufeature.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CPU feature definitions for module loading, used by
+ * module_cpu_feature_match(), see uapi/asm/hwcap.h for LoongArch CPU features.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_CPUFEATURE_H
+#define __ASM_CPUFEATURE_H
+
+#include <uapi/asm/hwcap.h>
+#include <asm/elf.h>
+
+#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
+
+#define cpu_feature(x) ilog2(HWCAP_ ## x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+ return elf_hwcap & (1UL << num);
+}
+
+#endif /* __ASM_CPUFEATURE_H */
diff --git a/arch/loongarch/include/asm/delay.h b/arch/loongarch/include/asm/delay.h
new file mode 100644
index 000000000000..36d775191310
--- /dev/null
+++ b/arch/loongarch/include/asm/delay.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_DELAY_H
+#define _ASM_DELAY_H
+
+#include <linux/param.h>
+
+extern void __delay(unsigned long cycles);
+extern void __ndelay(unsigned long ns);
+extern void __udelay(unsigned long us);
+
+#define ndelay(ns) __ndelay(ns)
+#define udelay(us) __udelay(us)
+
+/* make sure "usecs *= ..." in udelay do not overflow. */
+#if HZ >= 1000
+#define MAX_UDELAY_MS 1
+#elif HZ <= 200
+#define MAX_UDELAY_MS 5
+#else
+#define MAX_UDELAY_MS (1000 / HZ)
+#endif
+
+#endif /* _ASM_DELAY_H */
diff --git a/arch/loongarch/include/asm/dma-direct.h b/arch/loongarch/include/asm/dma-direct.h
new file mode 100644
index 000000000000..75ccd808a2af
--- /dev/null
+++ b/arch/loongarch/include/asm/dma-direct.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _LOONGARCH_DMA_DIRECT_H
+#define _LOONGARCH_DMA_DIRECT_H
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+
+#endif /* _LOONGARCH_DMA_DIRECT_H */
diff --git a/arch/loongarch/include/asm/dmi.h b/arch/loongarch/include/asm/dmi.h
new file mode 100644
index 000000000000..605493417753
--- /dev/null
+++ b/arch/loongarch/include/asm/dmi.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_DMI_H
+#define _ASM_DMI_H
+
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#define dmi_early_remap(x, l) dmi_remap(x, l)
+#define dmi_early_unmap(x, l) dmi_unmap(x)
+#define dmi_alloc(l) memblock_alloc(l, PAGE_SIZE)
+
+static inline void *dmi_remap(u64 phys_addr, unsigned long size)
+{
+ return ((void *)TO_CACHE(phys_addr));
+}
+
+static inline void dmi_unmap(void *addr)
+{
+}
+
+#endif /* _ASM_DMI_H */
diff --git a/arch/loongarch/include/asm/efi.h b/arch/loongarch/include/asm/efi.h
new file mode 100644
index 000000000000..0127d84d5e1d
--- /dev/null
+++ b/arch/loongarch/include/asm/efi.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_LOONGARCH_EFI_H
+#define _ASM_LOONGARCH_EFI_H
+
+#include <linux/efi.h>
+
+void __init efi_init(void);
+void __init efi_runtime_init(void);
+void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
+
+#define ARCH_EFI_IRQ_FLAGS_MASK 0x00000004 /* Bit 2: CSR.CRMD.IE */
+
+#define arch_efi_call_virt_setup() \
+({ \
+})
+
+#define arch_efi_call_virt(p, f, args...) \
+({ \
+ efi_##f##_t * __f; \
+ __f = p->f; \
+ __f(args); \
+})
+
+#define arch_efi_call_virt_teardown() \
+({ \
+})
+
+#define EFI_ALLOC_ALIGN SZ_64K
+
+struct screen_info *alloc_screen_info(void);
+void free_screen_info(struct screen_info *si);
+
+static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
+{
+ return ULONG_MAX;
+}
+
+#endif /* _ASM_LOONGARCH_EFI_H */
diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h
new file mode 100644
index 000000000000..f3960b18a90e
--- /dev/null
+++ b/arch/loongarch/include/asm/elf.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_ELF_H
+#define _ASM_ELF_H
+
+#include <linux/auxvec.h>
+#include <linux/fs.h>
+#include <uapi/linux/elf.h>
+
+#include <asm/current.h>
+#include <asm/vdso.h>
+
+/* The ABI of a file. */
+#define EF_LOONGARCH_ABI_LP64_SOFT_FLOAT 0x1
+#define EF_LOONGARCH_ABI_LP64_SINGLE_FLOAT 0x2
+#define EF_LOONGARCH_ABI_LP64_DOUBLE_FLOAT 0x3
+
+#define EF_LOONGARCH_ABI_ILP32_SOFT_FLOAT 0x5
+#define EF_LOONGARCH_ABI_ILP32_SINGLE_FLOAT 0x6
+#define EF_LOONGARCH_ABI_ILP32_DOUBLE_FLOAT 0x7
+
+/* LoongArch relocation types used by the dynamic linker */
+#define R_LARCH_NONE 0
+#define R_LARCH_32 1
+#define R_LARCH_64 2
+#define R_LARCH_RELATIVE 3
+#define R_LARCH_COPY 4
+#define R_LARCH_JUMP_SLOT 5
+#define R_LARCH_TLS_DTPMOD32 6
+#define R_LARCH_TLS_DTPMOD64 7
+#define R_LARCH_TLS_DTPREL32 8
+#define R_LARCH_TLS_DTPREL64 9
+#define R_LARCH_TLS_TPREL32 10
+#define R_LARCH_TLS_TPREL64 11
+#define R_LARCH_IRELATIVE 12
+#define R_LARCH_MARK_LA 20
+#define R_LARCH_MARK_PCREL 21
+#define R_LARCH_SOP_PUSH_PCREL 22
+#define R_LARCH_SOP_PUSH_ABSOLUTE 23
+#define R_LARCH_SOP_PUSH_DUP 24
+#define R_LARCH_SOP_PUSH_GPREL 25
+#define R_LARCH_SOP_PUSH_TLS_TPREL 26
+#define R_LARCH_SOP_PUSH_TLS_GOT 27
+#define R_LARCH_SOP_PUSH_TLS_GD 28
+#define R_LARCH_SOP_PUSH_PLT_PCREL 29
+#define R_LARCH_SOP_ASSERT 30
+#define R_LARCH_SOP_NOT 31
+#define R_LARCH_SOP_SUB 32
+#define R_LARCH_SOP_SL 33
+#define R_LARCH_SOP_SR 34
+#define R_LARCH_SOP_ADD 35
+#define R_LARCH_SOP_AND 36
+#define R_LARCH_SOP_IF_ELSE 37
+#define R_LARCH_SOP_POP_32_S_10_5 38
+#define R_LARCH_SOP_POP_32_U_10_12 39
+#define R_LARCH_SOP_POP_32_S_10_12 40
+#define R_LARCH_SOP_POP_32_S_10_16 41
+#define R_LARCH_SOP_POP_32_S_10_16_S2 42
+#define R_LARCH_SOP_POP_32_S_5_20 43
+#define R_LARCH_SOP_POP_32_S_0_5_10_16_S2 44
+#define R_LARCH_SOP_POP_32_S_0_10_10_16_S2 45
+#define R_LARCH_SOP_POP_32_U 46
+#define R_LARCH_ADD8 47
+#define R_LARCH_ADD16 48
+#define R_LARCH_ADD24 49
+#define R_LARCH_ADD32 50
+#define R_LARCH_ADD64 51
+#define R_LARCH_SUB8 52
+#define R_LARCH_SUB16 53
+#define R_LARCH_SUB24 54
+#define R_LARCH_SUB32 55
+#define R_LARCH_SUB64 56
+#define R_LARCH_GNU_VTINHERIT 57
+#define R_LARCH_GNU_VTENTRY 58
+
+#ifndef ELF_ARCH
+
+/* ELF register definitions */
+
+/*
+ * General purpose have the following registers:
+ * Register Number
+ * GPRs 32
+ * ORIG_A0 1
+ * ERA 1
+ * BADVADDR 1
+ * CRMD 1
+ * PRMD 1
+ * EUEN 1
+ * ECFG 1
+ * ESTAT 1
+ * Reserved 5
+ */
+#define ELF_NGREG 45
+
+/*
+ * Floating point have the following registers:
+ * Register Number
+ * FPR 32
+ * FCC 1
+ * FCSR 1
+ */
+#define ELF_NFPREG 34
+
+typedef unsigned long elf_greg_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef double elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs);
+
+#ifdef CONFIG_32BIT
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch elf32_check_arch
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+
+#define ELF_CORE_COPY_REGS(dest, regs) \
+ loongarch_dump_regs32((u32 *)&(dest), (regs));
+
+#endif /* CONFIG_32BIT */
+
+#ifdef CONFIG_64BIT
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch elf64_check_arch
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS64
+
+#define ELF_CORE_COPY_REGS(dest, regs) \
+ loongarch_dump_regs64((u64 *)&(dest), (regs));
+
+#endif /* CONFIG_64BIT */
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_LOONGARCH
+
+#endif /* !defined(ELF_ARCH) */
+
+#define loongarch_elf_check_machine(x) ((x)->e_machine == EM_LOONGARCH)
+
+#define vmcore_elf32_check_arch loongarch_elf_check_machine
+#define vmcore_elf64_check_arch loongarch_elf_check_machine
+
+/*
+ * Return non-zero if HDR identifies an 32bit ELF binary.
+ */
+#define elf32_check_arch(hdr) \
+({ \
+ int __res = 1; \
+ struct elfhdr *__h = (hdr); \
+ \
+ if (!loongarch_elf_check_machine(__h)) \
+ __res = 0; \
+ if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
+ __res = 0; \
+ \
+ __res; \
+})
+
+/*
+ * Return non-zero if HDR identifies an 64bit ELF binary.
+ */
+#define elf64_check_arch(hdr) \
+({ \
+ int __res = 1; \
+ struct elfhdr *__h = (hdr); \
+ \
+ if (!loongarch_elf_check_machine(__h)) \
+ __res = 0; \
+ if (__h->e_ident[EI_CLASS] != ELFCLASS64) \
+ __res = 0; \
+ \
+ __res; \
+})
+
+#ifdef CONFIG_32BIT
+
+#define SET_PERSONALITY2(ex, state) \
+do { \
+ current->thread.vdso = &vdso_info; \
+ \
+ loongarch_set_personality_fcsr(state); \
+ \
+ if (personality(current->personality) != PER_LINUX) \
+ set_personality(PER_LINUX); \
+} while (0)
+
+#endif /* CONFIG_32BIT */
+
+#ifdef CONFIG_64BIT
+
+#define SET_PERSONALITY2(ex, state) \
+do { \
+ unsigned int p; \
+ \
+ clear_thread_flag(TIF_32BIT_REGS); \
+ clear_thread_flag(TIF_32BIT_ADDR); \
+ \
+ current->thread.vdso = &vdso_info; \
+ loongarch_set_personality_fcsr(state); \
+ \
+ p = personality(current->personality); \
+ if (p != PER_LINUX32 && p != PER_LINUX) \
+ set_personality(PER_LINUX); \
+} while (0)
+
+#endif /* CONFIG_64BIT */
+
+#define CORE_DUMP_USE_REGSET
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports. This could be done in userspace,
+ * but it's not easy, and we've already done it here.
+ */
+
+#define ELF_HWCAP (elf_hwcap)
+extern unsigned int elf_hwcap;
+#include <asm/hwcap.h>
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+
+#define ELF_PLATFORM __elf_platform
+extern const char *__elf_platform;
+
+#define ELF_PLAT_INIT(_r, load_addr) do { \
+ _r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \
+ _r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \
+ _r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0; \
+ _r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \
+ _r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \
+ _r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \
+ _r->regs[25] = _r->regs[26] = _r->regs[27] = _r->regs[28] = 0; \
+ _r->regs[29] = _r->regs[30] = _r->regs[31] = 0; \
+} while (0)
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
+#define ARCH_DLINFO \
+do { \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (unsigned long)current->mm->context.vdso); \
+} while (0)
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+
+struct arch_elf_state {
+ int fp_abi;
+ int interp_fp_abi;
+};
+
+#define LOONGARCH_ABI_FP_ANY (0)
+
+#define INIT_ARCH_ELF_STATE { \
+ .fp_abi = LOONGARCH_ABI_FP_ANY, \
+ .interp_fp_abi = LOONGARCH_ABI_FP_ANY, \
+}
+
+#define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT)
+
+extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
+ bool is_interp, struct arch_elf_state *state);
+
+extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr,
+ struct arch_elf_state *state);
+
+extern void loongarch_set_personality_fcsr(struct arch_elf_state *state);
+
+#endif /* _ASM_ELF_H */
diff --git a/arch/loongarch/include/asm/entry-common.h b/arch/loongarch/include/asm/entry-common.h
new file mode 100644
index 000000000000..0fe2a098ded9
--- /dev/null
+++ b/arch/loongarch/include/asm/entry-common.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ARCH_LOONGARCH_ENTRY_COMMON_H
+#define ARCH_LOONGARCH_ENTRY_COMMON_H
+
+#include <linux/sched.h>
+#include <linux/processor.h>
+
+static inline bool on_thread_stack(void)
+{
+ return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
+}
+
+#endif
diff --git a/arch/loongarch/include/asm/exec.h b/arch/loongarch/include/asm/exec.h
new file mode 100644
index 000000000000..ba0220812ebb
--- /dev/null
+++ b/arch/loongarch/include/asm/exec.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_EXEC_H
+#define _ASM_EXEC_H
+
+extern unsigned long arch_align_stack(unsigned long sp);
+
+#endif /* _ASM_EXEC_H */
diff --git a/arch/loongarch/include/asm/fb.h b/arch/loongarch/include/asm/fb.h
new file mode 100644
index 000000000000..3116bde8772d
--- /dev/null
+++ b/arch/loongarch/include/asm/fb.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/loongarch/include/asm/fixmap.h b/arch/loongarch/include/asm/fixmap.h
new file mode 100644
index 000000000000..b3541dfa2013
--- /dev/null
+++ b/arch/loongarch/include/asm/fixmap.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#define NR_FIX_BTMAPS 64
+
+#endif
diff --git a/arch/loongarch/include/asm/fpregdef.h b/arch/loongarch/include/asm/fpregdef.h
new file mode 100644
index 000000000000..adb16e4b43b0
--- /dev/null
+++ b/arch/loongarch/include/asm/fpregdef.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for the FPU register names
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_FPREGDEF_H
+#define _ASM_FPREGDEF_H
+
+#define fa0 $f0 /* argument registers, fa0/fa1 reused as fv0/fv1 for return value */
+#define fa1 $f1
+#define fa2 $f2
+#define fa3 $f3
+#define fa4 $f4
+#define fa5 $f5
+#define fa6 $f6
+#define fa7 $f7
+#define ft0 $f8 /* caller saved */
+#define ft1 $f9
+#define ft2 $f10
+#define ft3 $f11
+#define ft4 $f12
+#define ft5 $f13
+#define ft6 $f14
+#define ft7 $f15
+#define ft8 $f16
+#define ft9 $f17
+#define ft10 $f18
+#define ft11 $f19
+#define ft12 $f20
+#define ft13 $f21
+#define ft14 $f22
+#define ft15 $f23
+#define fs0 $f24 /* callee saved */
+#define fs1 $f25
+#define fs2 $f26
+#define fs3 $f27
+#define fs4 $f28
+#define fs5 $f29
+#define fs6 $f30
+#define fs7 $f31
+
+/*
+ * Current binutils expects *GPRs* at FCSR position for the FCSR
+ * operation instructions, so define aliases for those used.
+ */
+#define fcsr0 $r0
+#define fcsr1 $r1
+#define fcsr2 $r2
+#define fcsr3 $r3
+#define vcsr16 $r16
+
+#endif /* _ASM_FPREGDEF_H */
diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h
new file mode 100644
index 000000000000..358b254d9c1d
--- /dev/null
+++ b/arch/loongarch/include/asm/fpu.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_FPU_H
+#define _ASM_FPU_H
+
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/ptrace.h>
+#include <linux/thread_info.h>
+#include <linux/bitops.h>
+
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/current.h>
+#include <asm/loongarch.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+
+struct sigcontext;
+
+extern void _init_fpu(unsigned int);
+extern void _save_fp(struct loongarch_fpu *);
+extern void _restore_fp(struct loongarch_fpu *);
+
+/*
+ * Mask the FCSR Cause bits according to the Enable bits, observing
+ * that Unimplemented is always enabled.
+ */
+static inline unsigned long mask_fcsr_x(unsigned long fcsr)
+{
+ return fcsr & ((fcsr & FPU_CSR_ALL_E) <<
+ (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)));
+}
+
+static inline int is_fp_enabled(void)
+{
+ return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_FPEN) ?
+ 1 : 0;
+}
+
+#define enable_fpu() set_csr_euen(CSR_EUEN_FPEN)
+
+#define disable_fpu() clear_csr_euen(CSR_EUEN_FPEN)
+
+#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)
+
+static inline int is_fpu_owner(void)
+{
+ return test_thread_flag(TIF_USEDFPU);
+}
+
+static inline void __own_fpu(void)
+{
+ enable_fpu();
+ set_thread_flag(TIF_USEDFPU);
+ KSTK_EUEN(current) |= CSR_EUEN_FPEN;
+}
+
+static inline void own_fpu_inatomic(int restore)
+{
+ if (cpu_has_fpu && !is_fpu_owner()) {
+ __own_fpu();
+ if (restore)
+ _restore_fp(&current->thread.fpu);
+ }
+}
+
+static inline void own_fpu(int restore)
+{
+ preempt_disable();
+ own_fpu_inatomic(restore);
+ preempt_enable();
+}
+
+static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
+{
+ if (is_fpu_owner()) {
+ if (save)
+ _save_fp(&tsk->thread.fpu);
+ disable_fpu();
+ clear_tsk_thread_flag(tsk, TIF_USEDFPU);
+ }
+ KSTK_EUEN(tsk) &= ~(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
+}
+
+static inline void lose_fpu(int save)
+{
+ preempt_disable();
+ lose_fpu_inatomic(save, current);
+ preempt_enable();
+}
+
+static inline void init_fpu(void)
+{
+ unsigned int fcsr = current->thread.fpu.fcsr;
+
+ __own_fpu();
+ _init_fpu(fcsr);
+ set_used_math();
+}
+
+static inline void save_fp(struct task_struct *tsk)
+{
+ if (cpu_has_fpu)
+ _save_fp(&tsk->thread.fpu);
+}
+
+static inline void restore_fp(struct task_struct *tsk)
+{
+ if (cpu_has_fpu)
+ _restore_fp(&tsk->thread.fpu);
+}
+
+static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
+{
+ if (tsk == current) {
+ preempt_disable();
+ if (is_fpu_owner())
+ _save_fp(&current->thread.fpu);
+ preempt_enable();
+ }
+
+ return tsk->thread.fpu.fpr;
+}
+
+#endif /* _ASM_FPU_H */
diff --git a/arch/loongarch/include/asm/futex.h b/arch/loongarch/include/asm/futex.h
new file mode 100644
index 000000000000..9de8231694ec
--- /dev/null
+++ b/arch/loongarch/include/asm/futex.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/barrier.h>
+#include <asm/compiler.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+{ \
+ __asm__ __volatile__( \
+ "1: ll.w %1, %4 # __futex_atomic_op\n" \
+ " " insn " \n" \
+ "2: sc.w $t0, %2 \n" \
+ " beq $t0, $zero, 1b \n" \
+ "3: \n" \
+ " .section .fixup,\"ax\" \n" \
+ "4: li.w %0, %6 \n" \
+ " b 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "__UA_ADDR "\t1b, 4b \n" \
+ " "__UA_ADDR "\t2b, 4b \n" \
+ " .previous \n" \
+ : "=r" (ret), "=&r" (oldval), \
+ "=ZC" (*uaddr) \
+ : "0" (0), "ZC" (*uaddr), "Jr" (oparg), \
+ "i" (-EFAULT) \
+ : "memory", "t0"); \
+}
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval = 0, ret = 0;
+
+ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("move $t0, %z5", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add.w $t0, %1, %z5", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or $t0, %1, %z5", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and $t0, %1, %z5", ret, oldval, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor $t0, %1, %z5", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable();
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval)
+{
+ int ret = 0;
+ u32 val = 0;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ __asm__ __volatile__(
+ "# futex_atomic_cmpxchg_inatomic \n"
+ "1: ll.w %1, %3 \n"
+ " bne %1, %z4, 3f \n"
+ " or $t0, %z5, $zero \n"
+ "2: sc.w $t0, %2 \n"
+ " beq $zero, $t0, 1b \n"
+ "3: \n"
+ __WEAK_LLSC_MB
+ " .section .fixup,\"ax\" \n"
+ "4: li.d %0, %6 \n"
+ " b 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " "__UA_ADDR "\t1b, 4b \n"
+ " "__UA_ADDR "\t2b, 4b \n"
+ " .previous \n"
+ : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+ : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+ "i" (-EFAULT)
+ : "memory", "t0");
+
+ *uval = val;
+
+ return ret;
+}
+
+#endif /* _ASM_FUTEX_H */
diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h
new file mode 100644
index 000000000000..0ef3b18f8980
--- /dev/null
+++ b/arch/loongarch/include/asm/hardirq.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_HARDIRQ_H
+#define _ASM_HARDIRQ_H
+
+#include <linux/cache.h>
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+extern void ack_bad_irq(unsigned int irq);
+#define ack_bad_irq ack_bad_irq
+
+#define NR_IPI 2
+
+typedef struct {
+ unsigned int ipi_irqs[NR_IPI];
+ unsigned int __softirq_pending;
+} ____cacheline_aligned irq_cpustat_t;
+
+DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+
+#define __ARCH_IRQ_STAT
+
+#endif /* _ASM_HARDIRQ_H */
diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h
new file mode 100644
index 000000000000..aa44b3fe43dd
--- /dev/null
+++ b/arch/loongarch/include/asm/hugetlb.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_HUGETLB_H
+#define __ASM_HUGETLB_H
+
+#include <asm/page.h>
+
+uint64_t pmd_to_entrylo(unsigned long pmd_val);
+
+#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr,
+ unsigned long len)
+{
+ unsigned long task_size = STACK_TOP;
+ struct hstate *h = hstate_file(file);
+
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+ if (addr & ~huge_page_mask(h))
+ return -EINVAL;
+ if (len > task_size)
+ return -ENOMEM;
+ if (task_size - len < addr)
+ return -EINVAL;
+ return 0;
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t clear;
+ pte_t pte = *ptep;
+
+ pte_val(clear) = (unsigned long)invalid_pte_table;
+ set_pte_at(mm, addr, ptep, clear);
+ return pte;
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t pte;
+
+ pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ flush_tlb_page(vma, addr);
+ return pte;
+}
+
+#define __HAVE_ARCH_HUGE_PTE_NONE
+static inline int huge_pte_none(pte_t pte)
+{
+ unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL;
+ return !val || (val == (unsigned long)invalid_pte_table);
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep, pte_t pte,
+ int dirty)
+{
+ int changed = !pte_same(*ptep, pte);
+
+ if (changed) {
+ set_pte_at(vma->vm_mm, addr, ptep, pte);
+ /*
+ * There could be some standard sized pages in there,
+ * get them all.
+ */
+ flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
+ }
+ return changed;
+}
+
+#include <asm-generic/hugetlb.h>
+
+#endif /* __ASM_HUGETLB_H */
diff --git a/arch/loongarch/include/asm/hw_irq.h b/arch/loongarch/include/asm/hw_irq.h
new file mode 100644
index 000000000000..af4f4e8fbd85
--- /dev/null
+++ b/arch/loongarch/include/asm/hw_irq.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_HW_IRQ_H
+#define __ASM_HW_IRQ_H
+
+#include <linux/atomic.h>
+
+extern atomic_t irq_err_count;
+
+/*
+ * interrupt-retrigger: NOP for now. This may not be appropriate for all
+ * machines, we'll see ...
+ */
+
+#endif /* __ASM_HW_IRQ_H */
diff --git a/arch/loongarch/include/asm/idle.h b/arch/loongarch/include/asm/idle.h
new file mode 100644
index 000000000000..f7f2b7dbf958
--- /dev/null
+++ b/arch/loongarch/include/asm/idle.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_IDLE_H
+#define __ASM_IDLE_H
+
+#include <linux/linkage.h>
+
+extern asmlinkage void __arch_cpu_idle(void);
+
+#endif /* __ASM_IDLE_H */
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
new file mode 100644
index 000000000000..575d1bb66ffb
--- /dev/null
+++ b/arch/loongarch/include/asm/inst.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_INST_H
+#define _ASM_INST_H
+
+#include <linux/types.h>
+#include <asm/asm.h>
+
+#define ADDR_IMMMASK_LU52ID 0xFFF0000000000000
+#define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000
+#define ADDR_IMMMASK_ADDU16ID 0x00000000FFFF0000
+
+#define ADDR_IMMSHIFT_LU52ID 52
+#define ADDR_IMMSHIFT_LU32ID 32
+#define ADDR_IMMSHIFT_ADDU16ID 16
+
+#define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN)
+
+enum reg1i20_op {
+ lu12iw_op = 0x0a,
+ lu32id_op = 0x0b,
+};
+
+enum reg2i12_op {
+ lu52id_op = 0x0c,
+};
+
+enum reg2i16_op {
+ jirl_op = 0x13,
+};
+
+struct reg0i26_format {
+ unsigned int immediate_h : 10;
+ unsigned int immediate_l : 16;
+ unsigned int opcode : 6;
+};
+
+struct reg1i20_format {
+ unsigned int rd : 5;
+ unsigned int immediate : 20;
+ unsigned int opcode : 7;
+};
+
+struct reg1i21_format {
+ unsigned int immediate_h : 5;
+ unsigned int rj : 5;
+ unsigned int immediate_l : 16;
+ unsigned int opcode : 6;
+};
+
+struct reg2i12_format {
+ unsigned int rd : 5;
+ unsigned int rj : 5;
+ unsigned int immediate : 12;
+ unsigned int opcode : 10;
+};
+
+struct reg2i16_format {
+ unsigned int rd : 5;
+ unsigned int rj : 5;
+ unsigned int immediate : 16;
+ unsigned int opcode : 6;
+};
+
+union loongarch_instruction {
+ unsigned int word;
+ struct reg0i26_format reg0i26_format;
+ struct reg1i20_format reg1i20_format;
+ struct reg1i21_format reg1i21_format;
+ struct reg2i12_format reg2i12_format;
+ struct reg2i16_format reg2i16_format;
+};
+
+#define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction)
+
+enum loongarch_gpr {
+ LOONGARCH_GPR_ZERO = 0,
+ LOONGARCH_GPR_RA = 1,
+ LOONGARCH_GPR_TP = 2,
+ LOONGARCH_GPR_SP = 3,
+ LOONGARCH_GPR_A0 = 4, /* Reused as V0 for return value */
+ LOONGARCH_GPR_A1, /* Reused as V1 for return value */
+ LOONGARCH_GPR_A2,
+ LOONGARCH_GPR_A3,
+ LOONGARCH_GPR_A4,
+ LOONGARCH_GPR_A5,
+ LOONGARCH_GPR_A6,
+ LOONGARCH_GPR_A7,
+ LOONGARCH_GPR_T0 = 12,
+ LOONGARCH_GPR_T1,
+ LOONGARCH_GPR_T2,
+ LOONGARCH_GPR_T3,
+ LOONGARCH_GPR_T4,
+ LOONGARCH_GPR_T5,
+ LOONGARCH_GPR_T6,
+ LOONGARCH_GPR_T7,
+ LOONGARCH_GPR_T8,
+ LOONGARCH_GPR_FP = 22,
+ LOONGARCH_GPR_S0 = 23,
+ LOONGARCH_GPR_S1,
+ LOONGARCH_GPR_S2,
+ LOONGARCH_GPR_S3,
+ LOONGARCH_GPR_S4,
+ LOONGARCH_GPR_S5,
+ LOONGARCH_GPR_S6,
+ LOONGARCH_GPR_S7,
+ LOONGARCH_GPR_S8,
+ LOONGARCH_GPR_MAX
+};
+
+u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm);
+u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
+u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest);
+
+#endif /* _ASM_INST_H */
diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
new file mode 100644
index 000000000000..884599739b36
--- /dev/null
+++ b/arch/loongarch/include/asm/io.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#define ARCH_HAS_IOREMAP_WC
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <asm/addrspace.h>
+#include <asm/bug.h>
+#include <asm/byteorder.h>
+#include <asm/cpu.h>
+#include <asm/page.h>
+#include <asm/pgtable-bits.h>
+#include <asm/string.h>
+
+/*
+ * On LoongArch, I/O ports mappring is following:
+ *
+ * | .... |
+ * |-----------------------|
+ * | pci io ports(64K~32M) |
+ * |-----------------------|
+ * | isa io ports(0 ~16K) |
+ * PCI_IOBASE ->|-----------------------|
+ * | .... |
+ */
+#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE)))
+#define PCI_IOSIZE SZ_32M
+#define ISA_IOSIZE SZ_16K
+#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+
+extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size);
+extern void __init early_iounmap(void __iomem *addr, unsigned long size);
+
+#define early_memremap early_ioremap
+#define early_memunmap early_iounmap
+
+static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+ unsigned long prot_val)
+{
+ if (prot_val == _CACHE_CC)
+ return (void __iomem *)(unsigned long)(CACHE_BASE + offset);
+ else
+ return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset);
+}
+
+/*
+ * ioremap - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ */
+#define ioremap(offset, size) \
+ ioremap_prot((offset), (size), _CACHE_SUC)
+
+/*
+ * ioremap_wc - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_wc performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked uncachable
+ * but accelerated by means of write-combining feature. It is specifically
+ * useful for PCIe prefetchable windows, which may vastly improve a
+ * communications performance. If it was determined on boot stage, what
+ * CPU CCA doesn't support WUC, the method shall fall-back to the
+ * _CACHE_SUC option (see cpu_probe() method).
+ */
+#define ioremap_wc(offset, size) \
+ ioremap_prot((offset), (size), _CACHE_WUC)
+
+/*
+ * ioremap_cache - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_cache performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked cachable by
+ * the CPU. Also enables full write-combining. Useful for some
+ * memory-like regions on I/O busses.
+ */
+#define ioremap_cache(offset, size) \
+ ioremap_prot((offset), (size), _CACHE_CC)
+
+static inline void iounmap(const volatile void __iomem *addr)
+{
+}
+
+#define mmiowb() asm volatile ("dbar 0" ::: "memory")
+
+/*
+ * String version of I/O memory access operations.
+ */
+extern void __memset_io(volatile void __iomem *dst, int c, size_t count);
+extern void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count);
+extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count);
+#define memset_io(c, v, l) __memset_io((c), (v), (l))
+#define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l))
+#define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l))
+
+#include <asm-generic/io.h>
+
+#endif /* _ASM_IO_H */
diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
new file mode 100644
index 000000000000..ace3ea6da72e
--- /dev/null
+++ b/arch/loongarch/include/asm/irq.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
+
+#include <linux/irqdomain.h>
+#include <linux/irqreturn.h>
+
+#define IRQ_STACK_SIZE THREAD_SIZE
+#define IRQ_STACK_START (IRQ_STACK_SIZE - 16)
+
+DECLARE_PER_CPU(unsigned long, irq_stack);
+
+/*
+ * The highest address on the IRQ stack contains a dummy frame which is
+ * structured as follows:
+ *
+ * top ------------
+ * | task sp | <- irq_stack[cpu] + IRQ_STACK_START
+ * ------------
+ * | | <- First frame of IRQ context
+ * ------------
+ *
+ * task sp holds a copy of the task stack pointer where the struct pt_regs
+ * from exception entry can be found.
+ */
+
+static inline bool on_irq_stack(int cpu, unsigned long sp)
+{
+ unsigned long low = per_cpu(irq_stack, cpu);
+ unsigned long high = low + IRQ_STACK_SIZE;
+
+ return (low <= sp && sp <= high);
+}
+
+int get_ipi_irq(void);
+int get_pmc_irq(void);
+int get_timer_irq(void);
+void spurious_interrupt(void);
+
+#define NR_IRQS_LEGACY 16
+
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+void arch_trigger_cpumask_backtrace(const struct cpumask *mask, bool exclude_self);
+
+#define MAX_IO_PICS 2
+#define NR_IRQS (64 + (256 * MAX_IO_PICS))
+
+#define CORES_PER_EIO_NODE 4
+
+#define LOONGSON_CPU_UART0_VEC 10 /* CPU UART0 */
+#define LOONGSON_CPU_THSENS_VEC 14 /* CPU Thsens */
+#define LOONGSON_CPU_HT0_VEC 16 /* CPU HT0 irq vector base number */
+#define LOONGSON_CPU_HT1_VEC 24 /* CPU HT1 irq vector base number */
+
+/* IRQ number definitions */
+#define LOONGSON_LPC_IRQ_BASE 0
+#define LOONGSON_LPC_LAST_IRQ (LOONGSON_LPC_IRQ_BASE + 15)
+
+#define LOONGSON_CPU_IRQ_BASE 16
+#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 14)
+
+#define LOONGSON_PCH_IRQ_BASE 64
+#define LOONGSON_PCH_ACPI_IRQ (LOONGSON_PCH_IRQ_BASE + 47)
+#define LOONGSON_PCH_LAST_IRQ (LOONGSON_PCH_IRQ_BASE + 64 - 1)
+
+#define LOONGSON_MSI_IRQ_BASE (LOONGSON_PCH_IRQ_BASE + 64)
+#define LOONGSON_MSI_LAST_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1)
+
+#define GSI_MIN_LPC_IRQ LOONGSON_LPC_IRQ_BASE
+#define GSI_MAX_LPC_IRQ (LOONGSON_LPC_IRQ_BASE + 16 - 1)
+#define GSI_MIN_CPU_IRQ LOONGSON_CPU_IRQ_BASE
+#define GSI_MAX_CPU_IRQ (LOONGSON_CPU_IRQ_BASE + 48 - 1)
+#define GSI_MIN_PCH_IRQ LOONGSON_PCH_IRQ_BASE
+#define GSI_MAX_PCH_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1)
+
+extern int find_pch_pic(u32 gsi);
+extern int eiointc_get_node(int id);
+
+static inline void eiointc_enable(void)
+{
+ uint64_t misc;
+
+ misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
+ misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
+ iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
+}
+
+struct acpi_madt_lio_pic;
+struct acpi_madt_eio_pic;
+struct acpi_madt_ht_pic;
+struct acpi_madt_bio_pic;
+struct acpi_madt_msi_pic;
+struct acpi_madt_lpc_pic;
+
+struct irq_domain *loongarch_cpu_irq_init(void);
+
+struct irq_domain *liointc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_lio_pic *acpi_liointc);
+struct irq_domain *eiointc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_eio_pic *acpi_eiointc);
+
+struct irq_domain *htvec_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_ht_pic *acpi_htvec);
+struct irq_domain *pch_lpc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_lpc_pic *acpi_pchlpc);
+struct irq_domain *pch_msi_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_msi_pic *acpi_pchmsi);
+struct irq_domain *pch_pic_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_bio_pic *acpi_pchpic);
+
+extern struct acpi_madt_lio_pic *acpi_liointc;
+extern struct acpi_madt_eio_pic *acpi_eiointc[MAX_IO_PICS];
+
+extern struct acpi_madt_ht_pic *acpi_htintc;
+extern struct acpi_madt_lpc_pic *acpi_pchlpc;
+extern struct acpi_madt_msi_pic *acpi_pchmsi[MAX_IO_PICS];
+extern struct acpi_madt_bio_pic *acpi_pchpic[MAX_IO_PICS];
+
+extern struct irq_domain *cpu_domain;
+extern struct irq_domain *liointc_domain;
+extern struct irq_domain *pch_lpc_domain;
+extern struct irq_domain *pch_msi_domain[MAX_IO_PICS];
+extern struct irq_domain *pch_pic_domain[MAX_IO_PICS];
+
+extern irqreturn_t loongson3_ipi_interrupt(int irq, void *dev);
+
+#include <asm-generic/irq.h>
+
+#endif /* _ASM_IRQ_H */
diff --git a/arch/loongarch/include/asm/irq_regs.h b/arch/loongarch/include/asm/irq_regs.h
new file mode 100644
index 000000000000..3d62d815bf6b
--- /dev/null
+++ b/arch/loongarch/include/asm/irq_regs.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_IRQ_REGS_H
+#define __ASM_IRQ_REGS_H
+
+#define ARCH_HAS_OWN_IRQ_REGS
+
+#include <linux/thread_info.h>
+
+static inline struct pt_regs *get_irq_regs(void)
+{
+ return current_thread_info()->regs;
+}
+
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+ struct pt_regs *old_regs;
+
+ old_regs = get_irq_regs();
+ current_thread_info()->regs = new_regs;
+
+ return old_regs;
+}
+
+#endif /* __ASM_IRQ_REGS_H */
diff --git a/arch/loongarch/include/asm/irqflags.h b/arch/loongarch/include/asm/irqflags.h
new file mode 100644
index 000000000000..52121cd791fe
--- /dev/null
+++ b/arch/loongarch/include/asm/irqflags.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+#include <linux/stringify.h>
+#include <asm/compiler.h>
+#include <asm/loongarch.h>
+
+static inline void arch_local_irq_enable(void)
+{
+ u32 flags = CSR_CRMD_IE;
+ __asm__ __volatile__(
+ "csrxchg %[val], %[mask], %[reg]\n\t"
+ : [val] "+r" (flags)
+ : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : "memory");
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ u32 flags = 0;
+ __asm__ __volatile__(
+ "csrxchg %[val], %[mask], %[reg]\n\t"
+ : [val] "+r" (flags)
+ : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : "memory");
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ u32 flags = 0;
+ __asm__ __volatile__(
+ "csrxchg %[val], %[mask], %[reg]\n\t"
+ : [val] "+r" (flags)
+ : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : "memory");
+ return flags;
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ __asm__ __volatile__(
+ "csrxchg %[val], %[mask], %[reg]\n\t"
+ : [val] "+r" (flags)
+ : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : "memory");
+}
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ u32 flags;
+ __asm__ __volatile__(
+ "csrrd %[val], %[reg]\n\t"
+ : [val] "=r" (flags)
+ : [reg] "i" (LOONGARCH_CSR_CRMD)
+ : "memory");
+ return flags;
+}
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & CSR_CRMD_IE);
+}
+
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#endif /* #ifndef __ASSEMBLY__ */
+
+#endif /* _ASM_IRQFLAGS_H */
diff --git a/arch/loongarch/include/asm/kdebug.h b/arch/loongarch/include/asm/kdebug.h
new file mode 100644
index 000000000000..d721b4b82fae
--- /dev/null
+++ b/arch/loongarch/include/asm/kdebug.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_LOONGARCH_KDEBUG_H
+#define _ASM_LOONGARCH_KDEBUG_H
+
+#include <linux/notifier.h>
+
+enum die_val {
+ DIE_OOPS = 1,
+ DIE_RI,
+ DIE_FP,
+ DIE_SIMD,
+ DIE_TRAP,
+ DIE_PAGE_FAULT,
+ DIE_BREAK,
+ DIE_SSTEPBP,
+ DIE_UPROBE,
+ DIE_UPROBE_XOL,
+};
+
+#endif /* _ASM_LOONGARCH_KDEBUG_H */
diff --git a/arch/loongarch/include/asm/linkage.h b/arch/loongarch/include/asm/linkage.h
new file mode 100644
index 000000000000..81b0c4cfbf4f
--- /dev/null
+++ b/arch/loongarch/include/asm/linkage.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN .align 2
+#define __ALIGN_STR __stringify(__ALIGN)
+
+#define SYM_FUNC_START(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \
+ .cfi_startproc;
+
+#define SYM_FUNC_START_NOALIGN(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) \
+ .cfi_startproc;
+
+#define SYM_FUNC_START_LOCAL(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) \
+ .cfi_startproc;
+
+#define SYM_FUNC_START_LOCAL_NOALIGN(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) \
+ .cfi_startproc;
+
+#define SYM_FUNC_START_WEAK(name) \
+ SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN) \
+ .cfi_startproc;
+
+#define SYM_FUNC_START_WEAK_NOALIGN(name) \
+ SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \
+ .cfi_startproc;
+
+#define SYM_FUNC_END(name) \
+ .cfi_endproc; \
+ SYM_END(name, SYM_T_FUNC)
+
+#endif
diff --git a/arch/loongarch/include/asm/local.h b/arch/loongarch/include/asm/local.h
new file mode 100644
index 000000000000..2052a2267337
--- /dev/null
+++ b/arch/loongarch/include/asm/local.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ARCH_LOONGARCH_LOCAL_H
+#define _ARCH_LOONGARCH_LOCAL_H
+
+#include <linux/percpu.h>
+#include <linux/bitops.h>
+#include <linux/atomic.h>
+#include <asm/cmpxchg.h>
+#include <asm/compiler.h>
+
+typedef struct {
+ atomic_long_t a;
+} local_t;
+
+#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+#define local_read(l) atomic_long_read(&(l)->a)
+#define local_set(l, i) atomic_long_set(&(l)->a, (i))
+
+#define local_add(i, l) atomic_long_add((i), (&(l)->a))
+#define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
+#define local_inc(l) atomic_long_inc(&(l)->a)
+#define local_dec(l) atomic_long_dec(&(l)->a)
+
+/*
+ * Same as above, but return the result value
+ */
+static inline long local_add_return(long i, local_t *l)
+{
+ unsigned long result;
+
+ __asm__ __volatile__(
+ " " __AMADD " %1, %2, %0 \n"
+ : "+ZB" (l->a.counter), "=&r" (result)
+ : "r" (i)
+ : "memory");
+ result = result + i;
+
+ return result;
+}
+
+static inline long local_sub_return(long i, local_t *l)
+{
+ unsigned long result;
+
+ __asm__ __volatile__(
+ " " __AMADD "%1, %2, %0 \n"
+ : "+ZB" (l->a.counter), "=&r" (result)
+ : "r" (-i)
+ : "memory");
+
+ result = result - i;
+
+ return result;
+}
+
+#define local_cmpxchg(l, o, n) \
+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
+
+/**
+ * local_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to l...
+ * @u: ...unless l is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+#define local_add_unless(l, a, u) \
+({ \
+ long c, old; \
+ c = local_read(l); \
+ while (c != (u) && (old = local_cmpxchg((l), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+
+#define local_dec_return(l) local_sub_return(1, (l))
+#define local_inc_return(l) local_add_return(1, (l))
+
+/*
+ * local_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @l: pointer of type local_t
+ *
+ * Atomically subtracts @i from @l and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_sub_and_test(i, l) (local_sub_return((i), (l)) == 0)
+
+/*
+ * local_inc_and_test - increment and test
+ * @l: pointer of type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_inc_and_test(l) (local_inc_return(l) == 0)
+
+/*
+ * local_dec_and_test - decrement by 1 and test
+ * @l: pointer of type local_t
+ *
+ * Atomically decrements @l by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0)
+
+/*
+ * local_add_negative - add and test if negative
+ * @l: pointer of type local_t
+ * @i: integer value to add
+ *
+ * Atomically adds @i to @l and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+#define local_add_negative(i, l) (local_add_return(i, (l)) < 0)
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations. Note they take
+ * a variable, not an address.
+ */
+
+#define __local_inc(l) ((l)->a.counter++)
+#define __local_dec(l) ((l)->a.counter++)
+#define __local_add(i, l) ((l)->a.counter += (i))
+#define __local_sub(i, l) ((l)->a.counter -= (i))
+
+#endif /* _ARCH_LOONGARCH_LOCAL_H */
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
new file mode 100644
index 000000000000..3ba4f7e87cd2
--- /dev/null
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -0,0 +1,1516 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_LOONGARCH_H
+#define _ASM_LOONGARCH_H
+
+#include <linux/bits.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+#ifndef __ASSEMBLY__
+#include <larchintrin.h>
+
+/*
+ * parse_r var, r - Helper assembler macro for parsing register names.
+ *
+ * This converts the register name in $n form provided in \r to the
+ * corresponding register number, which is assigned to the variable \var. It is
+ * needed to allow explicit encoding of instructions in inline assembly where
+ * registers are chosen by the compiler in $n form, allowing us to avoid using
+ * fixed register numbers.
+ *
+ * It also allows newer instructions (not implemented by the assembler) to be
+ * transparently implemented using assembler macros, instead of needing separate
+ * cases depending on toolchain support.
+ *
+ * Simple usage example:
+ * __asm__ __volatile__("parse_r addr, %0\n\t"
+ * "#invtlb op, 0, %0\n\t"
+ * ".word ((0x6498000) | (addr << 10) | (0 << 5) | op)"
+ * : "=r" (status);
+ */
+
+/* Match an individual register number and assign to \var */
+#define _IFC_REG(n) \
+ ".ifc \\r, $r" #n "\n\t" \
+ "\\var = " #n "\n\t" \
+ ".endif\n\t"
+
+__asm__(".macro parse_r var r\n\t"
+ "\\var = -1\n\t"
+ _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3)
+ _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7)
+ _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11)
+ _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15)
+ _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19)
+ _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23)
+ _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27)
+ _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31)
+ ".iflt \\var\n\t"
+ ".error \"Unable to parse register name \\r\"\n\t"
+ ".endif\n\t"
+ ".endm");
+
+#undef _IFC_REG
+
+/* CPUCFG */
+static inline u32 read_cpucfg(u32 reg)
+{
+ return __cpucfg(reg);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef __ASSEMBLY__
+
+/* LoongArch Registers */
+#define REG_ZERO 0x0
+#define REG_RA 0x1
+#define REG_TP 0x2
+#define REG_SP 0x3
+#define REG_A0 0x4 /* Reused as V0 for return value */
+#define REG_A1 0x5 /* Reused as V1 for return value */
+#define REG_A2 0x6
+#define REG_A3 0x7
+#define REG_A4 0x8
+#define REG_A5 0x9
+#define REG_A6 0xa
+#define REG_A7 0xb
+#define REG_T0 0xc
+#define REG_T1 0xd
+#define REG_T2 0xe
+#define REG_T3 0xf
+#define REG_T4 0x10
+#define REG_T5 0x11
+#define REG_T6 0x12
+#define REG_T7 0x13
+#define REG_T8 0x14
+#define REG_U0 0x15 /* Kernel uses it as percpu base */
+#define REG_FP 0x16
+#define REG_S0 0x17
+#define REG_S1 0x18
+#define REG_S2 0x19
+#define REG_S3 0x1a
+#define REG_S4 0x1b
+#define REG_S5 0x1c
+#define REG_S6 0x1d
+#define REG_S7 0x1e
+#define REG_S8 0x1f
+
+#endif /* __ASSEMBLY__ */
+
+/* Bit fields for CPUCFG registers */
+#define LOONGARCH_CPUCFG0 0x0
+#define CPUCFG0_PRID GENMASK(31, 0)
+
+#define LOONGARCH_CPUCFG1 0x1
+#define CPUCFG1_ISGR32 BIT(0)
+#define CPUCFG1_ISGR64 BIT(1)
+#define CPUCFG1_PAGING BIT(2)
+#define CPUCFG1_IOCSR BIT(3)
+#define CPUCFG1_PABITS GENMASK(11, 4)
+#define CPUCFG1_VABITS GENMASK(19, 12)
+#define CPUCFG1_UAL BIT(20)
+#define CPUCFG1_RI BIT(21)
+#define CPUCFG1_EP BIT(22)
+#define CPUCFG1_RPLV BIT(23)
+#define CPUCFG1_HUGEPG BIT(24)
+#define CPUCFG1_IOCSRBRD BIT(25)
+#define CPUCFG1_MSGINT BIT(26)
+
+#define LOONGARCH_CPUCFG2 0x2
+#define CPUCFG2_FP BIT(0)
+#define CPUCFG2_FPSP BIT(1)
+#define CPUCFG2_FPDP BIT(2)
+#define CPUCFG2_FPVERS GENMASK(5, 3)
+#define CPUCFG2_LSX BIT(6)
+#define CPUCFG2_LASX BIT(7)
+#define CPUCFG2_COMPLEX BIT(8)
+#define CPUCFG2_CRYPTO BIT(9)
+#define CPUCFG2_LVZP BIT(10)
+#define CPUCFG2_LVZVER GENMASK(13, 11)
+#define CPUCFG2_LLFTP BIT(14)
+#define CPUCFG2_LLFTPREV GENMASK(17, 15)
+#define CPUCFG2_X86BT BIT(18)
+#define CPUCFG2_ARMBT BIT(19)
+#define CPUCFG2_MIPSBT BIT(20)
+#define CPUCFG2_LSPW BIT(21)
+#define CPUCFG2_LAM BIT(22)
+
+#define LOONGARCH_CPUCFG3 0x3
+#define CPUCFG3_CCDMA BIT(0)
+#define CPUCFG3_SFB BIT(1)
+#define CPUCFG3_UCACC BIT(2)
+#define CPUCFG3_LLEXC BIT(3)
+#define CPUCFG3_SCDLY BIT(4)
+#define CPUCFG3_LLDBAR BIT(5)
+#define CPUCFG3_ITLBT BIT(6)
+#define CPUCFG3_ICACHET BIT(7)
+#define CPUCFG3_SPW_LVL GENMASK(10, 8)
+#define CPUCFG3_SPW_HG_HF BIT(11)
+#define CPUCFG3_RVA BIT(12)
+#define CPUCFG3_RVAMAX GENMASK(16, 13)
+
+#define LOONGARCH_CPUCFG4 0x4
+#define CPUCFG4_CCFREQ GENMASK(31, 0)
+
+#define LOONGARCH_CPUCFG5 0x5
+#define CPUCFG5_CCMUL GENMASK(15, 0)
+#define CPUCFG5_CCDIV GENMASK(31, 16)
+
+#define LOONGARCH_CPUCFG6 0x6
+#define CPUCFG6_PMP BIT(0)
+#define CPUCFG6_PAMVER GENMASK(3, 1)
+#define CPUCFG6_PMNUM GENMASK(7, 4)
+#define CPUCFG6_PMBITS GENMASK(13, 8)
+#define CPUCFG6_UPM BIT(14)
+
+#define LOONGARCH_CPUCFG16 0x10
+#define CPUCFG16_L1_IUPRE BIT(0)
+#define CPUCFG16_L1_IUUNIFY BIT(1)
+#define CPUCFG16_L1_DPRE BIT(2)
+#define CPUCFG16_L2_IUPRE BIT(3)
+#define CPUCFG16_L2_IUUNIFY BIT(4)
+#define CPUCFG16_L2_IUPRIV BIT(5)
+#define CPUCFG16_L2_IUINCL BIT(6)
+#define CPUCFG16_L2_DPRE BIT(7)
+#define CPUCFG16_L2_DPRIV BIT(8)
+#define CPUCFG16_L2_DINCL BIT(9)
+#define CPUCFG16_L3_IUPRE BIT(10)
+#define CPUCFG16_L3_IUUNIFY BIT(11)
+#define CPUCFG16_L3_IUPRIV BIT(12)
+#define CPUCFG16_L3_IUINCL BIT(13)
+#define CPUCFG16_L3_DPRE BIT(14)
+#define CPUCFG16_L3_DPRIV BIT(15)
+#define CPUCFG16_L3_DINCL BIT(16)
+
+#define LOONGARCH_CPUCFG17 0x11
+#define CPUCFG17_L1I_WAYS_M GENMASK(15, 0)
+#define CPUCFG17_L1I_SETS_M GENMASK(23, 16)
+#define CPUCFG17_L1I_SIZE_M GENMASK(30, 24)
+#define CPUCFG17_L1I_WAYS 0
+#define CPUCFG17_L1I_SETS 16
+#define CPUCFG17_L1I_SIZE 24
+
+#define LOONGARCH_CPUCFG18 0x12
+#define CPUCFG18_L1D_WAYS_M GENMASK(15, 0)
+#define CPUCFG18_L1D_SETS_M GENMASK(23, 16)
+#define CPUCFG18_L1D_SIZE_M GENMASK(30, 24)
+#define CPUCFG18_L1D_WAYS 0
+#define CPUCFG18_L1D_SETS 16
+#define CPUCFG18_L1D_SIZE 24
+
+#define LOONGARCH_CPUCFG19 0x13
+#define CPUCFG19_L2_WAYS_M GENMASK(15, 0)
+#define CPUCFG19_L2_SETS_M GENMASK(23, 16)
+#define CPUCFG19_L2_SIZE_M GENMASK(30, 24)
+#define CPUCFG19_L2_WAYS 0
+#define CPUCFG19_L2_SETS 16
+#define CPUCFG19_L2_SIZE 24
+
+#define LOONGARCH_CPUCFG20 0x14
+#define CPUCFG20_L3_WAYS_M GENMASK(15, 0)
+#define CPUCFG20_L3_SETS_M GENMASK(23, 16)
+#define CPUCFG20_L3_SIZE_M GENMASK(30, 24)
+#define CPUCFG20_L3_WAYS 0
+#define CPUCFG20_L3_SETS 16
+#define CPUCFG20_L3_SIZE 24
+
+#define LOONGARCH_CPUCFG48 0x30
+#define CPUCFG48_MCSR_LCK BIT(0)
+#define CPUCFG48_NAP_EN BIT(1)
+#define CPUCFG48_VFPU_CG BIT(2)
+#define CPUCFG48_RAM_CG BIT(3)
+
+#ifndef __ASSEMBLY__
+
+/* CSR */
+static __always_inline u32 csr_read32(u32 reg)
+{
+ return __csrrd_w(reg);
+}
+
+static __always_inline u64 csr_read64(u32 reg)
+{
+ return __csrrd_d(reg);
+}
+
+static __always_inline void csr_write32(u32 val, u32 reg)
+{
+ __csrwr_w(val, reg);
+}
+
+static __always_inline void csr_write64(u64 val, u32 reg)
+{
+ __csrwr_d(val, reg);
+}
+
+static __always_inline u32 csr_xchg32(u32 val, u32 mask, u32 reg)
+{
+ return __csrxchg_w(val, mask, reg);
+}
+
+static __always_inline u64 csr_xchg64(u64 val, u64 mask, u32 reg)
+{
+ return __csrxchg_d(val, mask, reg);
+}
+
+/* IOCSR */
+static __always_inline u32 iocsr_read32(u32 reg)
+{
+ return __iocsrrd_w(reg);
+}
+
+static __always_inline u64 iocsr_read64(u32 reg)
+{
+ return __iocsrrd_d(reg);
+}
+
+static __always_inline void iocsr_write32(u32 val, u32 reg)
+{
+ __iocsrwr_w(val, reg);
+}
+
+static __always_inline void iocsr_write64(u64 val, u32 reg)
+{
+ __iocsrwr_d(val, reg);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+/* CSR register number */
+
+/* Basic CSR registers */
+#define LOONGARCH_CSR_CRMD 0x0 /* Current mode info */
+#define CSR_CRMD_WE_SHIFT 9
+#define CSR_CRMD_WE (_ULCAST_(0x1) << CSR_CRMD_WE_SHIFT)
+#define CSR_CRMD_DACM_SHIFT 7
+#define CSR_CRMD_DACM_WIDTH 2
+#define CSR_CRMD_DACM (_ULCAST_(0x3) << CSR_CRMD_DACM_SHIFT)
+#define CSR_CRMD_DACF_SHIFT 5
+#define CSR_CRMD_DACF_WIDTH 2
+#define CSR_CRMD_DACF (_ULCAST_(0x3) << CSR_CRMD_DACF_SHIFT)
+#define CSR_CRMD_PG_SHIFT 4
+#define CSR_CRMD_PG (_ULCAST_(0x1) << CSR_CRMD_PG_SHIFT)
+#define CSR_CRMD_DA_SHIFT 3
+#define CSR_CRMD_DA (_ULCAST_(0x1) << CSR_CRMD_DA_SHIFT)
+#define CSR_CRMD_IE_SHIFT 2
+#define CSR_CRMD_IE (_ULCAST_(0x1) << CSR_CRMD_IE_SHIFT)
+#define CSR_CRMD_PLV_SHIFT 0
+#define CSR_CRMD_PLV_WIDTH 2
+#define CSR_CRMD_PLV (_ULCAST_(0x3) << CSR_CRMD_PLV_SHIFT)
+
+#define PLV_KERN 0
+#define PLV_USER 3
+#define PLV_MASK 0x3
+
+#define LOONGARCH_CSR_PRMD 0x1 /* Prev-exception mode info */
+#define CSR_PRMD_PWE_SHIFT 3
+#define CSR_PRMD_PWE (_ULCAST_(0x1) << CSR_PRMD_PWE_SHIFT)
+#define CSR_PRMD_PIE_SHIFT 2
+#define CSR_PRMD_PIE (_ULCAST_(0x1) << CSR_PRMD_PIE_SHIFT)
+#define CSR_PRMD_PPLV_SHIFT 0
+#define CSR_PRMD_PPLV_WIDTH 2
+#define CSR_PRMD_PPLV (_ULCAST_(0x3) << CSR_PRMD_PPLV_SHIFT)
+
+#define LOONGARCH_CSR_EUEN 0x2 /* Extended unit enable */
+#define CSR_EUEN_LBTEN_SHIFT 3
+#define CSR_EUEN_LBTEN (_ULCAST_(0x1) << CSR_EUEN_LBTEN_SHIFT)
+#define CSR_EUEN_LASXEN_SHIFT 2
+#define CSR_EUEN_LASXEN (_ULCAST_(0x1) << CSR_EUEN_LASXEN_SHIFT)
+#define CSR_EUEN_LSXEN_SHIFT 1
+#define CSR_EUEN_LSXEN (_ULCAST_(0x1) << CSR_EUEN_LSXEN_SHIFT)
+#define CSR_EUEN_FPEN_SHIFT 0
+#define CSR_EUEN_FPEN (_ULCAST_(0x1) << CSR_EUEN_FPEN_SHIFT)
+
+#define LOONGARCH_CSR_MISC 0x3 /* Misc config */
+
+#define LOONGARCH_CSR_ECFG 0x4 /* Exception config */
+#define CSR_ECFG_VS_SHIFT 16
+#define CSR_ECFG_VS_WIDTH 3
+#define CSR_ECFG_VS (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT)
+#define CSR_ECFG_IM_SHIFT 0
+#define CSR_ECFG_IM_WIDTH 13
+#define CSR_ECFG_IM (_ULCAST_(0x1fff) << CSR_ECFG_IM_SHIFT)
+
+#define LOONGARCH_CSR_ESTAT 0x5 /* Exception status */
+#define CSR_ESTAT_ESUBCODE_SHIFT 22
+#define CSR_ESTAT_ESUBCODE_WIDTH 9
+#define CSR_ESTAT_ESUBCODE (_ULCAST_(0x1ff) << CSR_ESTAT_ESUBCODE_SHIFT)
+#define CSR_ESTAT_EXC_SHIFT 16
+#define CSR_ESTAT_EXC_WIDTH 6
+#define CSR_ESTAT_EXC (_ULCAST_(0x3f) << CSR_ESTAT_EXC_SHIFT)
+#define CSR_ESTAT_IS_SHIFT 0
+#define CSR_ESTAT_IS_WIDTH 15
+#define CSR_ESTAT_IS (_ULCAST_(0x7fff) << CSR_ESTAT_IS_SHIFT)
+
+#define LOONGARCH_CSR_ERA 0x6 /* ERA */
+
+#define LOONGARCH_CSR_BADV 0x7 /* Bad virtual address */
+
+#define LOONGARCH_CSR_BADI 0x8 /* Bad instruction */
+
+#define LOONGARCH_CSR_EENTRY 0xc /* Exception entry */
+
+/* TLB related CSR registers */
+#define LOONGARCH_CSR_TLBIDX 0x10 /* TLB Index, EHINV, PageSize, NP */
+#define CSR_TLBIDX_EHINV_SHIFT 31
+#define CSR_TLBIDX_EHINV (_ULCAST_(1) << CSR_TLBIDX_EHINV_SHIFT)
+#define CSR_TLBIDX_PS_SHIFT 24
+#define CSR_TLBIDX_PS_WIDTH 6
+#define CSR_TLBIDX_PS (_ULCAST_(0x3f) << CSR_TLBIDX_PS_SHIFT)
+#define CSR_TLBIDX_IDX_SHIFT 0
+#define CSR_TLBIDX_IDX_WIDTH 12
+#define CSR_TLBIDX_IDX (_ULCAST_(0xfff) << CSR_TLBIDX_IDX_SHIFT)
+#define CSR_TLBIDX_SIZEM 0x3f000000
+#define CSR_TLBIDX_SIZE CSR_TLBIDX_PS_SHIFT
+#define CSR_TLBIDX_IDXM 0xfff
+#define CSR_INVALID_ENTRY(e) (CSR_TLBIDX_EHINV | e)
+
+#define LOONGARCH_CSR_TLBEHI 0x11 /* TLB EntryHi */
+
+#define LOONGARCH_CSR_TLBELO0 0x12 /* TLB EntryLo0 */
+#define CSR_TLBLO0_RPLV_SHIFT 63
+#define CSR_TLBLO0_RPLV (_ULCAST_(0x1) << CSR_TLBLO0_RPLV_SHIFT)
+#define CSR_TLBLO0_NX_SHIFT 62
+#define CSR_TLBLO0_NX (_ULCAST_(0x1) << CSR_TLBLO0_NX_SHIFT)
+#define CSR_TLBLO0_NR_SHIFT 61
+#define CSR_TLBLO0_NR (_ULCAST_(0x1) << CSR_TLBLO0_NR_SHIFT)
+#define CSR_TLBLO0_PFN_SHIFT 12
+#define CSR_TLBLO0_PFN_WIDTH 36
+#define CSR_TLBLO0_PFN (_ULCAST_(0xfffffffff) << CSR_TLBLO0_PFN_SHIFT)
+#define CSR_TLBLO0_GLOBAL_SHIFT 6
+#define CSR_TLBLO0_GLOBAL (_ULCAST_(0x1) << CSR_TLBLO0_GLOBAL_SHIFT)
+#define CSR_TLBLO0_CCA_SHIFT 4
+#define CSR_TLBLO0_CCA_WIDTH 2
+#define CSR_TLBLO0_CCA (_ULCAST_(0x3) << CSR_TLBLO0_CCA_SHIFT)
+#define CSR_TLBLO0_PLV_SHIFT 2
+#define CSR_TLBLO0_PLV_WIDTH 2
+#define CSR_TLBLO0_PLV (_ULCAST_(0x3) << CSR_TLBLO0_PLV_SHIFT)
+#define CSR_TLBLO0_WE_SHIFT 1
+#define CSR_TLBLO0_WE (_ULCAST_(0x1) << CSR_TLBLO0_WE_SHIFT)
+#define CSR_TLBLO0_V_SHIFT 0
+#define CSR_TLBLO0_V (_ULCAST_(0x1) << CSR_TLBLO0_V_SHIFT)
+
+#define LOONGARCH_CSR_TLBELO1 0x13 /* TLB EntryLo1 */
+#define CSR_TLBLO1_RPLV_SHIFT 63
+#define CSR_TLBLO1_RPLV (_ULCAST_(0x1) << CSR_TLBLO1_RPLV_SHIFT)
+#define CSR_TLBLO1_NX_SHIFT 62
+#define CSR_TLBLO1_NX (_ULCAST_(0x1) << CSR_TLBLO1_NX_SHIFT)
+#define CSR_TLBLO1_NR_SHIFT 61
+#define CSR_TLBLO1_NR (_ULCAST_(0x1) << CSR_TLBLO1_NR_SHIFT)
+#define CSR_TLBLO1_PFN_SHIFT 12
+#define CSR_TLBLO1_PFN_WIDTH 36
+#define CSR_TLBLO1_PFN (_ULCAST_(0xfffffffff) << CSR_TLBLO1_PFN_SHIFT)
+#define CSR_TLBLO1_GLOBAL_SHIFT 6
+#define CSR_TLBLO1_GLOBAL (_ULCAST_(0x1) << CSR_TLBLO1_GLOBAL_SHIFT)
+#define CSR_TLBLO1_CCA_SHIFT 4
+#define CSR_TLBLO1_CCA_WIDTH 2
+#define CSR_TLBLO1_CCA (_ULCAST_(0x3) << CSR_TLBLO1_CCA_SHIFT)
+#define CSR_TLBLO1_PLV_SHIFT 2
+#define CSR_TLBLO1_PLV_WIDTH 2
+#define CSR_TLBLO1_PLV (_ULCAST_(0x3) << CSR_TLBLO1_PLV_SHIFT)
+#define CSR_TLBLO1_WE_SHIFT 1
+#define CSR_TLBLO1_WE (_ULCAST_(0x1) << CSR_TLBLO1_WE_SHIFT)
+#define CSR_TLBLO1_V_SHIFT 0
+#define CSR_TLBLO1_V (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT)
+
+#define LOONGARCH_CSR_GTLBC 0x15 /* Guest TLB control */
+#define CSR_GTLBC_RID_SHIFT 16
+#define CSR_GTLBC_RID_WIDTH 8
+#define CSR_GTLBC_RID (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT)
+#define CSR_GTLBC_TOTI_SHIFT 13
+#define CSR_GTLBC_TOTI (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT)
+#define CSR_GTLBC_USERID_SHIFT 12
+#define CSR_GTLBC_USERID (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT)
+#define CSR_GTLBC_GMTLBSZ_SHIFT 0
+#define CSR_GTLBC_GMTLBSZ_WIDTH 6
+#define CSR_GTLBC_GMTLBSZ (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT)
+
+#define LOONGARCH_CSR_TRGP 0x16 /* TLBR read guest info */
+#define CSR_TRGP_RID_SHIFT 16
+#define CSR_TRGP_RID_WIDTH 8
+#define CSR_TRGP_RID (_ULCAST_(0xff) << CSR_TRGP_RID_SHIFT)
+#define CSR_TRGP_GTLB_SHIFT 0
+#define CSR_TRGP_GTLB (1 << CSR_TRGP_GTLB_SHIFT)
+
+#define LOONGARCH_CSR_ASID 0x18 /* ASID */
+#define CSR_ASID_BIT_SHIFT 16 /* ASIDBits */
+#define CSR_ASID_BIT_WIDTH 8
+#define CSR_ASID_BIT (_ULCAST_(0xff) << CSR_ASID_BIT_SHIFT)
+#define CSR_ASID_ASID_SHIFT 0
+#define CSR_ASID_ASID_WIDTH 10
+#define CSR_ASID_ASID (_ULCAST_(0x3ff) << CSR_ASID_ASID_SHIFT)
+
+#define LOONGARCH_CSR_PGDL 0x19 /* Page table base address when VA[47] = 0 */
+
+#define LOONGARCH_CSR_PGDH 0x1a /* Page table base address when VA[47] = 1 */
+
+#define LOONGARCH_CSR_PGD 0x1b /* Page table base */
+
+#define LOONGARCH_CSR_PWCTL0 0x1c /* PWCtl0 */
+#define CSR_PWCTL0_PTEW_SHIFT 30
+#define CSR_PWCTL0_PTEW_WIDTH 2
+#define CSR_PWCTL0_PTEW (_ULCAST_(0x3) << CSR_PWCTL0_PTEW_SHIFT)
+#define CSR_PWCTL0_DIR1WIDTH_SHIFT 25
+#define CSR_PWCTL0_DIR1WIDTH_WIDTH 5
+#define CSR_PWCTL0_DIR1WIDTH (_ULCAST_(0x1f) << CSR_PWCTL0_DIR1WIDTH_SHIFT)
+#define CSR_PWCTL0_DIR1BASE_SHIFT 20
+#define CSR_PWCTL0_DIR1BASE_WIDTH 5
+#define CSR_PWCTL0_DIR1BASE (_ULCAST_(0x1f) << CSR_PWCTL0_DIR1BASE_SHIFT)
+#define CSR_PWCTL0_DIR0WIDTH_SHIFT 15
+#define CSR_PWCTL0_DIR0WIDTH_WIDTH 5
+#define CSR_PWCTL0_DIR0WIDTH (_ULCAST_(0x1f) << CSR_PWCTL0_DIR0WIDTH_SHIFT)
+#define CSR_PWCTL0_DIR0BASE_SHIFT 10
+#define CSR_PWCTL0_DIR0BASE_WIDTH 5
+#define CSR_PWCTL0_DIR0BASE (_ULCAST_(0x1f) << CSR_PWCTL0_DIR0BASE_SHIFT)
+#define CSR_PWCTL0_PTWIDTH_SHIFT 5
+#define CSR_PWCTL0_PTWIDTH_WIDTH 5
+#define CSR_PWCTL0_PTWIDTH (_ULCAST_(0x1f) << CSR_PWCTL0_PTWIDTH_SHIFT)
+#define CSR_PWCTL0_PTBASE_SHIFT 0
+#define CSR_PWCTL0_PTBASE_WIDTH 5
+#define CSR_PWCTL0_PTBASE (_ULCAST_(0x1f) << CSR_PWCTL0_PTBASE_SHIFT)
+
+#define LOONGARCH_CSR_PWCTL1 0x1d /* PWCtl1 */
+#define CSR_PWCTL1_DIR3WIDTH_SHIFT 18
+#define CSR_PWCTL1_DIR3WIDTH_WIDTH 5
+#define CSR_PWCTL1_DIR3WIDTH (_ULCAST_(0x1f) << CSR_PWCTL1_DIR3WIDTH_SHIFT)
+#define CSR_PWCTL1_DIR3BASE_SHIFT 12
+#define CSR_PWCTL1_DIR3BASE_WIDTH 5
+#define CSR_PWCTL1_DIR3BASE (_ULCAST_(0x1f) << CSR_PWCTL0_DIR3BASE_SHIFT)
+#define CSR_PWCTL1_DIR2WIDTH_SHIFT 6
+#define CSR_PWCTL1_DIR2WIDTH_WIDTH 5
+#define CSR_PWCTL1_DIR2WIDTH (_ULCAST_(0x1f) << CSR_PWCTL1_DIR2WIDTH_SHIFT)
+#define CSR_PWCTL1_DIR2BASE_SHIFT 0
+#define CSR_PWCTL1_DIR2BASE_WIDTH 5
+#define CSR_PWCTL1_DIR2BASE (_ULCAST_(0x1f) << CSR_PWCTL0_DIR2BASE_SHIFT)
+
+#define LOONGARCH_CSR_STLBPGSIZE 0x1e
+#define CSR_STLBPGSIZE_PS_WIDTH 6
+#define CSR_STLBPGSIZE_PS (_ULCAST_(0x3f))
+
+#define LOONGARCH_CSR_RVACFG 0x1f
+#define CSR_RVACFG_RDVA_WIDTH 4
+#define CSR_RVACFG_RDVA (_ULCAST_(0xf))
+
+/* Config CSR registers */
+#define LOONGARCH_CSR_CPUID 0x20 /* CPU core id */
+#define CSR_CPUID_COREID_WIDTH 9
+#define CSR_CPUID_COREID _ULCAST_(0x1ff)
+
+#define LOONGARCH_CSR_PRCFG1 0x21 /* Config1 */
+#define CSR_CONF1_VSMAX_SHIFT 12
+#define CSR_CONF1_VSMAX_WIDTH 3
+#define CSR_CONF1_VSMAX (_ULCAST_(7) << CSR_CONF1_VSMAX_SHIFT)
+#define CSR_CONF1_TMRBITS_SHIFT 4
+#define CSR_CONF1_TMRBITS_WIDTH 8
+#define CSR_CONF1_TMRBITS (_ULCAST_(0xff) << CSR_CONF1_TMRBITS_SHIFT)
+#define CSR_CONF1_KSNUM_WIDTH 4
+#define CSR_CONF1_KSNUM _ULCAST_(0xf)
+
+#define LOONGARCH_CSR_PRCFG2 0x22 /* Config2 */
+#define CSR_CONF2_PGMASK_SUPP 0x3ffff000
+
+#define LOONGARCH_CSR_PRCFG3 0x23 /* Config3 */
+#define CSR_CONF3_STLBIDX_SHIFT 20
+#define CSR_CONF3_STLBIDX_WIDTH 6
+#define CSR_CONF3_STLBIDX (_ULCAST_(0x3f) << CSR_CONF3_STLBIDX_SHIFT)
+#define CSR_CONF3_STLBWAYS_SHIFT 12
+#define CSR_CONF3_STLBWAYS_WIDTH 8
+#define CSR_CONF3_STLBWAYS (_ULCAST_(0xff) << CSR_CONF3_STLBWAYS_SHIFT)
+#define CSR_CONF3_MTLBSIZE_SHIFT 4
+#define CSR_CONF3_MTLBSIZE_WIDTH 8
+#define CSR_CONF3_MTLBSIZE (_ULCAST_(0xff) << CSR_CONF3_MTLBSIZE_SHIFT)
+#define CSR_CONF3_TLBTYPE_SHIFT 0
+#define CSR_CONF3_TLBTYPE_WIDTH 4
+#define CSR_CONF3_TLBTYPE (_ULCAST_(0xf) << CSR_CONF3_TLBTYPE_SHIFT)
+
+/* KSave registers */
+#define LOONGARCH_CSR_KS0 0x30
+#define LOONGARCH_CSR_KS1 0x31
+#define LOONGARCH_CSR_KS2 0x32
+#define LOONGARCH_CSR_KS3 0x33
+#define LOONGARCH_CSR_KS4 0x34
+#define LOONGARCH_CSR_KS5 0x35
+#define LOONGARCH_CSR_KS6 0x36
+#define LOONGARCH_CSR_KS7 0x37
+#define LOONGARCH_CSR_KS8 0x38
+
+/* Exception allocated KS0, KS1 and KS2 statically */
+#define EXCEPTION_KS0 LOONGARCH_CSR_KS0
+#define EXCEPTION_KS1 LOONGARCH_CSR_KS1
+#define EXCEPTION_KS2 LOONGARCH_CSR_KS2
+#define EXC_KSAVE_MASK (1 << 0 | 1 << 1 | 1 << 2)
+
+/* Percpu-data base allocated KS3 statically */
+#define PERCPU_BASE_KS LOONGARCH_CSR_KS3
+#define PERCPU_KSAVE_MASK (1 << 3)
+
+/* KVM allocated KS4 and KS5 statically */
+#define KVM_VCPU_KS LOONGARCH_CSR_KS4
+#define KVM_TEMP_KS LOONGARCH_CSR_KS5
+#define KVM_KSAVE_MASK (1 << 4 | 1 << 5)
+
+/* Timer registers */
+#define LOONGARCH_CSR_TMID 0x40 /* Timer ID */
+
+#define LOONGARCH_CSR_TCFG 0x41 /* Timer config */
+#define CSR_TCFG_VAL_SHIFT 2
+#define CSR_TCFG_VAL_WIDTH 48
+#define CSR_TCFG_VAL (_ULCAST_(0x3fffffffffff) << CSR_TCFG_VAL_SHIFT)
+#define CSR_TCFG_PERIOD_SHIFT 1
+#define CSR_TCFG_PERIOD (_ULCAST_(0x1) << CSR_TCFG_PERIOD_SHIFT)
+#define CSR_TCFG_EN (_ULCAST_(0x1))
+
+#define LOONGARCH_CSR_TVAL 0x42 /* Timer value */
+
+#define LOONGARCH_CSR_CNTC 0x43 /* Timer offset */
+
+#define LOONGARCH_CSR_TINTCLR 0x44 /* Timer interrupt clear */
+#define CSR_TINTCLR_TI_SHIFT 0
+#define CSR_TINTCLR_TI (1 << CSR_TINTCLR_TI_SHIFT)
+
+/* Guest registers */
+#define LOONGARCH_CSR_GSTAT 0x50 /* Guest status */
+#define CSR_GSTAT_GID_SHIFT 16
+#define CSR_GSTAT_GID_WIDTH 8
+#define CSR_GSTAT_GID (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT)
+#define CSR_GSTAT_GIDBIT_SHIFT 4
+#define CSR_GSTAT_GIDBIT_WIDTH 6
+#define CSR_GSTAT_GIDBIT (_ULCAST_(0x3f) << CSR_GSTAT_GIDBIT_SHIFT)
+#define CSR_GSTAT_PVM_SHIFT 1
+#define CSR_GSTAT_PVM (_ULCAST_(0x1) << CSR_GSTAT_PVM_SHIFT)
+#define CSR_GSTAT_VM_SHIFT 0
+#define CSR_GSTAT_VM (_ULCAST_(0x1) << CSR_GSTAT_VM_SHIFT)
+
+#define LOONGARCH_CSR_GCFG 0x51 /* Guest config */
+#define CSR_GCFG_GPERF_SHIFT 24
+#define CSR_GCFG_GPERF_WIDTH 3
+#define CSR_GCFG_GPERF (_ULCAST_(0x7) << CSR_GCFG_GPERF_SHIFT)
+#define CSR_GCFG_GCI_SHIFT 20
+#define CSR_GCFG_GCI_WIDTH 2
+#define CSR_GCFG_GCI (_ULCAST_(0x3) << CSR_GCFG_GCI_SHIFT)
+#define CSR_GCFG_GCI_ALL (_ULCAST_(0x0) << CSR_GCFG_GCI_SHIFT)
+#define CSR_GCFG_GCI_HIT (_ULCAST_(0x1) << CSR_GCFG_GCI_SHIFT)
+#define CSR_GCFG_GCI_SECURE (_ULCAST_(0x2) << CSR_GCFG_GCI_SHIFT)
+#define CSR_GCFG_GCIP_SHIFT 16
+#define CSR_GCFG_GCIP (_ULCAST_(0xf) << CSR_GCFG_GCIP_SHIFT)
+#define CSR_GCFG_GCIP_ALL (_ULCAST_(0x1) << CSR_GCFG_GCIP_SHIFT)
+#define CSR_GCFG_GCIP_HIT (_ULCAST_(0x1) << (CSR_GCFG_GCIP_SHIFT + 1))
+#define CSR_GCFG_GCIP_SECURE (_ULCAST_(0x1) << (CSR_GCFG_GCIP_SHIFT + 2))
+#define CSR_GCFG_TORU_SHIFT 15
+#define CSR_GCFG_TORU (_ULCAST_(0x1) << CSR_GCFG_TORU_SHIFT)
+#define CSR_GCFG_TORUP_SHIFT 14
+#define CSR_GCFG_TORUP (_ULCAST_(0x1) << CSR_GCFG_TORUP_SHIFT)
+#define CSR_GCFG_TOP_SHIFT 13
+#define CSR_GCFG_TOP (_ULCAST_(0x1) << CSR_GCFG_TOP_SHIFT)
+#define CSR_GCFG_TOPP_SHIFT 12
+#define CSR_GCFG_TOPP (_ULCAST_(0x1) << CSR_GCFG_TOPP_SHIFT)
+#define CSR_GCFG_TOE_SHIFT 11
+#define CSR_GCFG_TOE (_ULCAST_(0x1) << CSR_GCFG_TOE_SHIFT)
+#define CSR_GCFG_TOEP_SHIFT 10
+#define CSR_GCFG_TOEP (_ULCAST_(0x1) << CSR_GCFG_TOEP_SHIFT)
+#define CSR_GCFG_TIT_SHIFT 9
+#define CSR_GCFG_TIT (_ULCAST_(0x1) << CSR_GCFG_TIT_SHIFT)
+#define CSR_GCFG_TITP_SHIFT 8
+#define CSR_GCFG_TITP (_ULCAST_(0x1) << CSR_GCFG_TITP_SHIFT)
+#define CSR_GCFG_SIT_SHIFT 7
+#define CSR_GCFG_SIT (_ULCAST_(0x1) << CSR_GCFG_SIT_SHIFT)
+#define CSR_GCFG_SITP_SHIFT 6
+#define CSR_GCFG_SITP (_ULCAST_(0x1) << CSR_GCFG_SITP_SHIFT)
+#define CSR_GCFG_MATC_SHITF 4
+#define CSR_GCFG_MATC_WIDTH 2
+#define CSR_GCFG_MATC_MASK (_ULCAST_(0x3) << CSR_GCFG_MATC_SHITF)
+#define CSR_GCFG_MATC_GUEST (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF)
+#define CSR_GCFG_MATC_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF)
+#define CSR_GCFG_MATC_NEST (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF)
+
+#define LOONGARCH_CSR_GINTC 0x52 /* Guest interrupt control */
+#define CSR_GINTC_HC_SHIFT 16
+#define CSR_GINTC_HC_WIDTH 8
+#define CSR_GINTC_HC (_ULCAST_(0xff) << CSR_GINTC_HC_SHIFT)
+#define CSR_GINTC_PIP_SHIFT 8
+#define CSR_GINTC_PIP_WIDTH 8
+#define CSR_GINTC_PIP (_ULCAST_(0xff) << CSR_GINTC_PIP_SHIFT)
+#define CSR_GINTC_VIP_SHIFT 0
+#define CSR_GINTC_VIP_WIDTH 8
+#define CSR_GINTC_VIP (_ULCAST_(0xff))
+
+#define LOONGARCH_CSR_GCNTC 0x53 /* Guest timer offset */
+
+/* LLBCTL register */
+#define LOONGARCH_CSR_LLBCTL 0x60 /* LLBit control */
+#define CSR_LLBCTL_ROLLB_SHIFT 0
+#define CSR_LLBCTL_ROLLB (_ULCAST_(1) << CSR_LLBCTL_ROLLB_SHIFT)
+#define CSR_LLBCTL_WCLLB_SHIFT 1
+#define CSR_LLBCTL_WCLLB (_ULCAST_(1) << CSR_LLBCTL_WCLLB_SHIFT)
+#define CSR_LLBCTL_KLO_SHIFT 2
+#define CSR_LLBCTL_KLO (_ULCAST_(1) << CSR_LLBCTL_KLO_SHIFT)
+
+/* Implement dependent */
+#define LOONGARCH_CSR_IMPCTL1 0x80 /* Loongson config1 */
+#define CSR_MISPEC_SHIFT 20
+#define CSR_MISPEC_WIDTH 8
+#define CSR_MISPEC (_ULCAST_(0xff) << CSR_MISPEC_SHIFT)
+#define CSR_SSEN_SHIFT 18
+#define CSR_SSEN (_ULCAST_(1) << CSR_SSEN_SHIFT)
+#define CSR_SCRAND_SHIFT 17
+#define CSR_SCRAND (_ULCAST_(1) << CSR_SCRAND_SHIFT)
+#define CSR_LLEXCL_SHIFT 16
+#define CSR_LLEXCL (_ULCAST_(1) << CSR_LLEXCL_SHIFT)
+#define CSR_DISVC_SHIFT 15
+#define CSR_DISVC (_ULCAST_(1) << CSR_DISVC_SHIFT)
+#define CSR_VCLRU_SHIFT 14
+#define CSR_VCLRU (_ULCAST_(1) << CSR_VCLRU_SHIFT)
+#define CSR_DCLRU_SHIFT 13
+#define CSR_DCLRU (_ULCAST_(1) << CSR_DCLRU_SHIFT)
+#define CSR_FASTLDQ_SHIFT 12
+#define CSR_FASTLDQ (_ULCAST_(1) << CSR_FASTLDQ_SHIFT)
+#define CSR_USERCAC_SHIFT 11
+#define CSR_USERCAC (_ULCAST_(1) << CSR_USERCAC_SHIFT)
+#define CSR_ANTI_MISPEC_SHIFT 10
+#define CSR_ANTI_MISPEC (_ULCAST_(1) << CSR_ANTI_MISPEC_SHIFT)
+#define CSR_AUTO_FLUSHSFB_SHIFT 9
+#define CSR_AUTO_FLUSHSFB (_ULCAST_(1) << CSR_AUTO_FLUSHSFB_SHIFT)
+#define CSR_STFILL_SHIFT 8
+#define CSR_STFILL (_ULCAST_(1) << CSR_STFILL_SHIFT)
+#define CSR_LIFEP_SHIFT 7
+#define CSR_LIFEP (_ULCAST_(1) << CSR_LIFEP_SHIFT)
+#define CSR_LLSYNC_SHIFT 6
+#define CSR_LLSYNC (_ULCAST_(1) << CSR_LLSYNC_SHIFT)
+#define CSR_BRBTDIS_SHIFT 5
+#define CSR_BRBTDIS (_ULCAST_(1) << CSR_BRBTDIS_SHIFT)
+#define CSR_RASDIS_SHIFT 4
+#define CSR_RASDIS (_ULCAST_(1) << CSR_RASDIS_SHIFT)
+#define CSR_STPRE_SHIFT 2
+#define CSR_STPRE_WIDTH 2
+#define CSR_STPRE (_ULCAST_(3) << CSR_STPRE_SHIFT)
+#define CSR_INSTPRE_SHIFT 1
+#define CSR_INSTPRE (_ULCAST_(1) << CSR_INSTPRE_SHIFT)
+#define CSR_DATAPRE_SHIFT 0
+#define CSR_DATAPRE (_ULCAST_(1) << CSR_DATAPRE_SHIFT)
+
+#define LOONGARCH_CSR_IMPCTL2 0x81 /* Loongson config2 */
+#define CSR_FLUSH_MTLB_SHIFT 0
+#define CSR_FLUSH_MTLB (_ULCAST_(1) << CSR_FLUSH_MTLB_SHIFT)
+#define CSR_FLUSH_STLB_SHIFT 1
+#define CSR_FLUSH_STLB (_ULCAST_(1) << CSR_FLUSH_STLB_SHIFT)
+#define CSR_FLUSH_DTLB_SHIFT 2
+#define CSR_FLUSH_DTLB (_ULCAST_(1) << CSR_FLUSH_DTLB_SHIFT)
+#define CSR_FLUSH_ITLB_SHIFT 3
+#define CSR_FLUSH_ITLB (_ULCAST_(1) << CSR_FLUSH_ITLB_SHIFT)
+#define CSR_FLUSH_BTAC_SHIFT 4
+#define CSR_FLUSH_BTAC (_ULCAST_(1) << CSR_FLUSH_BTAC_SHIFT)
+
+#define LOONGARCH_CSR_GNMI 0x82
+
+/* TLB Refill registers */
+#define LOONGARCH_CSR_TLBRENTRY 0x88 /* TLB refill exception entry */
+#define LOONGARCH_CSR_TLBRBADV 0x89 /* TLB refill badvaddr */
+#define LOONGARCH_CSR_TLBRERA 0x8a /* TLB refill ERA */
+#define LOONGARCH_CSR_TLBRSAVE 0x8b /* KSave for TLB refill exception */
+#define LOONGARCH_CSR_TLBRELO0 0x8c /* TLB refill entrylo0 */
+#define LOONGARCH_CSR_TLBRELO1 0x8d /* TLB refill entrylo1 */
+#define LOONGARCH_CSR_TLBREHI 0x8e /* TLB refill entryhi */
+#define CSR_TLBREHI_PS_SHIFT 0
+#define CSR_TLBREHI_PS (_ULCAST_(0x3f) << CSR_TLBREHI_PS_SHIFT)
+#define LOONGARCH_CSR_TLBRPRMD 0x8f /* TLB refill mode info */
+
+/* Machine Error registers */
+#define LOONGARCH_CSR_MERRCTL 0x90 /* MERRCTL */
+#define LOONGARCH_CSR_MERRINFO1 0x91 /* MError info1 */
+#define LOONGARCH_CSR_MERRINFO2 0x92 /* MError info2 */
+#define LOONGARCH_CSR_MERRENTRY 0x93 /* MError exception entry */
+#define LOONGARCH_CSR_MERRERA 0x94 /* MError exception ERA */
+#define LOONGARCH_CSR_MERRSAVE 0x95 /* KSave for machine error exception */
+
+#define LOONGARCH_CSR_CTAG 0x98 /* TagLo + TagHi */
+
+#define LOONGARCH_CSR_PRID 0xc0
+
+/* Shadow MCSR : 0xc0 ~ 0xff */
+#define LOONGARCH_CSR_MCSR0 0xc0 /* CPUCFG0 and CPUCFG1 */
+#define MCSR0_INT_IMPL_SHIFT 58
+#define MCSR0_INT_IMPL 0
+#define MCSR0_IOCSR_BRD_SHIFT 57
+#define MCSR0_IOCSR_BRD (_ULCAST_(1) << MCSR0_IOCSR_BRD_SHIFT)
+#define MCSR0_HUGEPG_SHIFT 56
+#define MCSR0_HUGEPG (_ULCAST_(1) << MCSR0_HUGEPG_SHIFT)
+#define MCSR0_RPLMTLB_SHIFT 55
+#define MCSR0_RPLMTLB (_ULCAST_(1) << MCSR0_RPLMTLB_SHIFT)
+#define MCSR0_EP_SHIFT 54
+#define MCSR0_EP (_ULCAST_(1) << MCSR0_EP_SHIFT)
+#define MCSR0_RI_SHIFT 53
+#define MCSR0_RI (_ULCAST_(1) << MCSR0_RI_SHIFT)
+#define MCSR0_UAL_SHIFT 52
+#define MCSR0_UAL (_ULCAST_(1) << MCSR0_UAL_SHIFT)
+#define MCSR0_VABIT_SHIFT 44
+#define MCSR0_VABIT_WIDTH 8
+#define MCSR0_VABIT (_ULCAST_(0xff) << MCSR0_VABIT_SHIFT)
+#define VABIT_DEFAULT 0x2f
+#define MCSR0_PABIT_SHIFT 36
+#define MCSR0_PABIT_WIDTH 8
+#define MCSR0_PABIT (_ULCAST_(0xff) << MCSR0_PABIT_SHIFT)
+#define PABIT_DEFAULT 0x2f
+#define MCSR0_IOCSR_SHIFT 35
+#define MCSR0_IOCSR (_ULCAST_(1) << MCSR0_IOCSR_SHIFT)
+#define MCSR0_PAGING_SHIFT 34
+#define MCSR0_PAGING (_ULCAST_(1) << MCSR0_PAGING_SHIFT)
+#define MCSR0_GR64_SHIFT 33
+#define MCSR0_GR64 (_ULCAST_(1) << MCSR0_GR64_SHIFT)
+#define GR64_DEFAULT 1
+#define MCSR0_GR32_SHIFT 32
+#define MCSR0_GR32 (_ULCAST_(1) << MCSR0_GR32_SHIFT)
+#define GR32_DEFAULT 0
+#define MCSR0_PRID_WIDTH 32
+#define MCSR0_PRID 0x14C010
+
+#define LOONGARCH_CSR_MCSR1 0xc1 /* CPUCFG2 and CPUCFG3 */
+#define MCSR1_HPFOLD_SHIFT 43
+#define MCSR1_HPFOLD (_ULCAST_(1) << MCSR1_HPFOLD_SHIFT)
+#define MCSR1_SPW_LVL_SHIFT 40
+#define MCSR1_SPW_LVL_WIDTH 3
+#define MCSR1_SPW_LVL (_ULCAST_(7) << MCSR1_SPW_LVL_SHIFT)
+#define MCSR1_ICACHET_SHIFT 39
+#define MCSR1_ICACHET (_ULCAST_(1) << MCSR1_ICACHET_SHIFT)
+#define MCSR1_ITLBT_SHIFT 38
+#define MCSR1_ITLBT (_ULCAST_(1) << MCSR1_ITLBT_SHIFT)
+#define MCSR1_LLDBAR_SHIFT 37
+#define MCSR1_LLDBAR (_ULCAST_(1) << MCSR1_LLDBAR_SHIFT)
+#define MCSR1_SCDLY_SHIFT 36
+#define MCSR1_SCDLY (_ULCAST_(1) << MCSR1_SCDLY_SHIFT)
+#define MCSR1_LLEXC_SHIFT 35
+#define MCSR1_LLEXC (_ULCAST_(1) << MCSR1_LLEXC_SHIFT)
+#define MCSR1_UCACC_SHIFT 34
+#define MCSR1_UCACC (_ULCAST_(1) << MCSR1_UCACC_SHIFT)
+#define MCSR1_SFB_SHIFT 33
+#define MCSR1_SFB (_ULCAST_(1) << MCSR1_SFB_SHIFT)
+#define MCSR1_CCDMA_SHIFT 32
+#define MCSR1_CCDMA (_ULCAST_(1) << MCSR1_CCDMA_SHIFT)
+#define MCSR1_LAMO_SHIFT 22
+#define MCSR1_LAMO (_ULCAST_(1) << MCSR1_LAMO_SHIFT)
+#define MCSR1_LSPW_SHIFT 21
+#define MCSR1_LSPW (_ULCAST_(1) << MCSR1_LSPW_SHIFT)
+#define MCSR1_MIPSBT_SHIFT 20
+#define MCSR1_MIPSBT (_ULCAST_(1) << MCSR1_MIPSBT_SHIFT)
+#define MCSR1_ARMBT_SHIFT 19
+#define MCSR1_ARMBT (_ULCAST_(1) << MCSR1_ARMBT_SHIFT)
+#define MCSR1_X86BT_SHIFT 18
+#define MCSR1_X86BT (_ULCAST_(1) << MCSR1_X86BT_SHIFT)
+#define MCSR1_LLFTPVERS_SHIFT 15
+#define MCSR1_LLFTPVERS_WIDTH 3
+#define MCSR1_LLFTPVERS (_ULCAST_(7) << MCSR1_LLFTPVERS_SHIFT)
+#define MCSR1_LLFTP_SHIFT 14
+#define MCSR1_LLFTP (_ULCAST_(1) << MCSR1_LLFTP_SHIFT)
+#define MCSR1_VZVERS_SHIFT 11
+#define MCSR1_VZVERS_WIDTH 3
+#define MCSR1_VZVERS (_ULCAST_(7) << MCSR1_VZVERS_SHIFT)
+#define MCSR1_VZ_SHIFT 10
+#define MCSR1_VZ (_ULCAST_(1) << MCSR1_VZ_SHIFT)
+#define MCSR1_CRYPTO_SHIFT 9
+#define MCSR1_CRYPTO (_ULCAST_(1) << MCSR1_CRYPTO_SHIFT)
+#define MCSR1_COMPLEX_SHIFT 8
+#define MCSR1_COMPLEX (_ULCAST_(1) << MCSR1_COMPLEX_SHIFT)
+#define MCSR1_LASX_SHIFT 7
+#define MCSR1_LASX (_ULCAST_(1) << MCSR1_LASX_SHIFT)
+#define MCSR1_LSX_SHIFT 6
+#define MCSR1_LSX (_ULCAST_(1) << MCSR1_LSX_SHIFT)
+#define MCSR1_FPVERS_SHIFT 3
+#define MCSR1_FPVERS_WIDTH 3
+#define MCSR1_FPVERS (_ULCAST_(7) << MCSR1_FPVERS_SHIFT)
+#define MCSR1_FPDP_SHIFT 2
+#define MCSR1_FPDP (_ULCAST_(1) << MCSR1_FPDP_SHIFT)
+#define MCSR1_FPSP_SHIFT 1
+#define MCSR1_FPSP (_ULCAST_(1) << MCSR1_FPSP_SHIFT)
+#define MCSR1_FP_SHIFT 0
+#define MCSR1_FP (_ULCAST_(1) << MCSR1_FP_SHIFT)
+
+#define LOONGARCH_CSR_MCSR2 0xc2 /* CPUCFG4 and CPUCFG5 */
+#define MCSR2_CCDIV_SHIFT 48
+#define MCSR2_CCDIV_WIDTH 16
+#define MCSR2_CCDIV (_ULCAST_(0xffff) << MCSR2_CCDIV_SHIFT)
+#define MCSR2_CCMUL_SHIFT 32
+#define MCSR2_CCMUL_WIDTH 16
+#define MCSR2_CCMUL (_ULCAST_(0xffff) << MCSR2_CCMUL_SHIFT)
+#define MCSR2_CCFREQ_WIDTH 32
+#define MCSR2_CCFREQ (_ULCAST_(0xffffffff))
+#define CCFREQ_DEFAULT 0x5f5e100 /* 100MHz */
+
+#define LOONGARCH_CSR_MCSR3 0xc3 /* CPUCFG6 */
+#define MCSR3_UPM_SHIFT 14
+#define MCSR3_UPM (_ULCAST_(1) << MCSR3_UPM_SHIFT)
+#define MCSR3_PMBITS_SHIFT 8
+#define MCSR3_PMBITS_WIDTH 6
+#define MCSR3_PMBITS (_ULCAST_(0x3f) << MCSR3_PMBITS_SHIFT)
+#define PMBITS_DEFAULT 0x40
+#define MCSR3_PMNUM_SHIFT 4
+#define MCSR3_PMNUM_WIDTH 4
+#define MCSR3_PMNUM (_ULCAST_(0xf) << MCSR3_PMNUM_SHIFT)
+#define MCSR3_PAMVER_SHIFT 1
+#define MCSR3_PAMVER_WIDTH 3
+#define MCSR3_PAMVER (_ULCAST_(0x7) << MCSR3_PAMVER_SHIFT)
+#define MCSR3_PMP_SHIFT 0
+#define MCSR3_PMP (_ULCAST_(1) << MCSR3_PMP_SHIFT)
+
+#define LOONGARCH_CSR_MCSR8 0xc8 /* CPUCFG16 and CPUCFG17 */
+#define MCSR8_L1I_SIZE_SHIFT 56
+#define MCSR8_L1I_SIZE_WIDTH 7
+#define MCSR8_L1I_SIZE (_ULCAST_(0x7f) << MCSR8_L1I_SIZE_SHIFT)
+#define MCSR8_L1I_IDX_SHIFT 48
+#define MCSR8_L1I_IDX_WIDTH 8
+#define MCSR8_L1I_IDX (_ULCAST_(0xff) << MCSR8_L1I_IDX_SHIFT)
+#define MCSR8_L1I_WAY_SHIFT 32
+#define MCSR8_L1I_WAY_WIDTH 16
+#define MCSR8_L1I_WAY (_ULCAST_(0xffff) << MCSR8_L1I_WAY_SHIFT)
+#define MCSR8_L3DINCL_SHIFT 16
+#define MCSR8_L3DINCL (_ULCAST_(1) << MCSR8_L3DINCL_SHIFT)
+#define MCSR8_L3DPRIV_SHIFT 15
+#define MCSR8_L3DPRIV (_ULCAST_(1) << MCSR8_L3DPRIV_SHIFT)
+#define MCSR8_L3DPRE_SHIFT 14
+#define MCSR8_L3DPRE (_ULCAST_(1) << MCSR8_L3DPRE_SHIFT)
+#define MCSR8_L3IUINCL_SHIFT 13
+#define MCSR8_L3IUINCL (_ULCAST_(1) << MCSR8_L3IUINCL_SHIFT)
+#define MCSR8_L3IUPRIV_SHIFT 12
+#define MCSR8_L3IUPRIV (_ULCAST_(1) << MCSR8_L3IUPRIV_SHIFT)
+#define MCSR8_L3IUUNIFY_SHIFT 11
+#define MCSR8_L3IUUNIFY (_ULCAST_(1) << MCSR8_L3IUUNIFY_SHIFT)
+#define MCSR8_L3IUPRE_SHIFT 10
+#define MCSR8_L3IUPRE (_ULCAST_(1) << MCSR8_L3IUPRE_SHIFT)
+#define MCSR8_L2DINCL_SHIFT 9
+#define MCSR8_L2DINCL (_ULCAST_(1) << MCSR8_L2DINCL_SHIFT)
+#define MCSR8_L2DPRIV_SHIFT 8
+#define MCSR8_L2DPRIV (_ULCAST_(1) << MCSR8_L2DPRIV_SHIFT)
+#define MCSR8_L2DPRE_SHIFT 7
+#define MCSR8_L2DPRE (_ULCAST_(1) << MCSR8_L2DPRE_SHIFT)
+#define MCSR8_L2IUINCL_SHIFT 6
+#define MCSR8_L2IUINCL (_ULCAST_(1) << MCSR8_L2IUINCL_SHIFT)
+#define MCSR8_L2IUPRIV_SHIFT 5
+#define MCSR8_L2IUPRIV (_ULCAST_(1) << MCSR8_L2IUPRIV_SHIFT)
+#define MCSR8_L2IUUNIFY_SHIFT 4
+#define MCSR8_L2IUUNIFY (_ULCAST_(1) << MCSR8_L2IUUNIFY_SHIFT)
+#define MCSR8_L2IUPRE_SHIFT 3
+#define MCSR8_L2IUPRE (_ULCAST_(1) << MCSR8_L2IUPRE_SHIFT)
+#define MCSR8_L1DPRE_SHIFT 2
+#define MCSR8_L1DPRE (_ULCAST_(1) << MCSR8_L1DPRE_SHIFT)
+#define MCSR8_L1IUUNIFY_SHIFT 1
+#define MCSR8_L1IUUNIFY (_ULCAST_(1) << MCSR8_L1IUUNIFY_SHIFT)
+#define MCSR8_L1IUPRE_SHIFT 0
+#define MCSR8_L1IUPRE (_ULCAST_(1) << MCSR8_L1IUPRE_SHIFT)
+
+#define LOONGARCH_CSR_MCSR9 0xc9 /* CPUCFG18 and CPUCFG19 */
+#define MCSR9_L2U_SIZE_SHIFT 56
+#define MCSR9_L2U_SIZE_WIDTH 7
+#define MCSR9_L2U_SIZE (_ULCAST_(0x7f) << MCSR9_L2U_SIZE_SHIFT)
+#define MCSR9_L2U_IDX_SHIFT 48
+#define MCSR9_L2U_IDX_WIDTH 8
+#define MCSR9_L2U_IDX (_ULCAST_(0xff) << MCSR9_IDX_LOG_SHIFT)
+#define MCSR9_L2U_WAY_SHIFT 32
+#define MCSR9_L2U_WAY_WIDTH 16
+#define MCSR9_L2U_WAY (_ULCAST_(0xffff) << MCSR9_L2U_WAY_SHIFT)
+#define MCSR9_L1D_SIZE_SHIFT 24
+#define MCSR9_L1D_SIZE_WIDTH 7
+#define MCSR9_L1D_SIZE (_ULCAST_(0x7f) << MCSR9_L1D_SIZE_SHIFT)
+#define MCSR9_L1D_IDX_SHIFT 16
+#define MCSR9_L1D_IDX_WIDTH 8
+#define MCSR9_L1D_IDX (_ULCAST_(0xff) << MCSR9_L1D_IDX_SHIFT)
+#define MCSR9_L1D_WAY_SHIFT 0
+#define MCSR9_L1D_WAY_WIDTH 16
+#define MCSR9_L1D_WAY (_ULCAST_(0xffff) << MCSR9_L1D_WAY_SHIFT)
+
+#define LOONGARCH_CSR_MCSR10 0xca /* CPUCFG20 */
+#define MCSR10_L3U_SIZE_SHIFT 24
+#define MCSR10_L3U_SIZE_WIDTH 7
+#define MCSR10_L3U_SIZE (_ULCAST_(0x7f) << MCSR10_L3U_SIZE_SHIFT)
+#define MCSR10_L3U_IDX_SHIFT 16
+#define MCSR10_L3U_IDX_WIDTH 8
+#define MCSR10_L3U_IDX (_ULCAST_(0xff) << MCSR10_L3U_IDX_SHIFT)
+#define MCSR10_L3U_WAY_SHIFT 0
+#define MCSR10_L3U_WAY_WIDTH 16
+#define MCSR10_L3U_WAY (_ULCAST_(0xffff) << MCSR10_L3U_WAY_SHIFT)
+
+#define LOONGARCH_CSR_MCSR24 0xf0 /* cpucfg48 */
+#define MCSR24_RAMCG_SHIFT 3
+#define MCSR24_RAMCG (_ULCAST_(1) << MCSR24_RAMCG_SHIFT)
+#define MCSR24_VFPUCG_SHIFT 2
+#define MCSR24_VFPUCG (_ULCAST_(1) << MCSR24_VFPUCG_SHIFT)
+#define MCSR24_NAPEN_SHIFT 1
+#define MCSR24_NAPEN (_ULCAST_(1) << MCSR24_NAPEN_SHIFT)
+#define MCSR24_MCSRLOCK_SHIFT 0
+#define MCSR24_MCSRLOCK (_ULCAST_(1) << MCSR24_MCSRLOCK_SHIFT)
+
+/* Uncached accelerate windows registers */
+#define LOONGARCH_CSR_UCAWIN 0x100
+#define LOONGARCH_CSR_UCAWIN0_LO 0x102
+#define LOONGARCH_CSR_UCAWIN0_HI 0x103
+#define LOONGARCH_CSR_UCAWIN1_LO 0x104
+#define LOONGARCH_CSR_UCAWIN1_HI 0x105
+#define LOONGARCH_CSR_UCAWIN2_LO 0x106
+#define LOONGARCH_CSR_UCAWIN2_HI 0x107
+#define LOONGARCH_CSR_UCAWIN3_LO 0x108
+#define LOONGARCH_CSR_UCAWIN3_HI 0x109
+
+/* Direct Map windows registers */
+#define LOONGARCH_CSR_DMWIN0 0x180 /* 64 direct map win0: MEM & IF */
+#define LOONGARCH_CSR_DMWIN1 0x181 /* 64 direct map win1: MEM & IF */
+#define LOONGARCH_CSR_DMWIN2 0x182 /* 64 direct map win2: MEM */
+#define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */
+
+/* Direct Map window 0/1 */
+#define CSR_DMW0_PLV0 _CONST64_(1 << 0)
+#define CSR_DMW0_VSEG _CONST64_(0x8000)
+#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS)
+#define CSR_DMW0_INIT (CSR_DMW0_BASE | CSR_DMW0_PLV0)
+
+#define CSR_DMW1_PLV0 _CONST64_(1 << 0)
+#define CSR_DMW1_MAT _CONST64_(1 << 4)
+#define CSR_DMW1_VSEG _CONST64_(0x9000)
+#define CSR_DMW1_BASE (CSR_DMW1_VSEG << DMW_PABITS)
+#define CSR_DMW1_INIT (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0)
+
+/* Performance Counter registers */
+#define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */
+#define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */
+#define LOONGARCH_CSR_PERFCTRL1 0x202 /* 32 perf event 1 config */
+#define LOONGARCH_CSR_PERFCNTR1 0x203 /* 64 perf event 1 count value */
+#define LOONGARCH_CSR_PERFCTRL2 0x204 /* 32 perf event 2 config */
+#define LOONGARCH_CSR_PERFCNTR2 0x205 /* 64 perf event 2 count value */
+#define LOONGARCH_CSR_PERFCTRL3 0x206 /* 32 perf event 3 config */
+#define LOONGARCH_CSR_PERFCNTR3 0x207 /* 64 perf event 3 count value */
+#define CSR_PERFCTRL_PLV0 (_ULCAST_(1) << 16)
+#define CSR_PERFCTRL_PLV1 (_ULCAST_(1) << 17)
+#define CSR_PERFCTRL_PLV2 (_ULCAST_(1) << 18)
+#define CSR_PERFCTRL_PLV3 (_ULCAST_(1) << 19)
+#define CSR_PERFCTRL_IE (_ULCAST_(1) << 20)
+#define CSR_PERFCTRL_EVENT 0x3ff
+
+/* Debug registers */
+#define LOONGARCH_CSR_MWPC 0x300 /* data breakpoint config */
+#define LOONGARCH_CSR_MWPS 0x301 /* data breakpoint status */
+
+#define LOONGARCH_CSR_DB0ADDR 0x310 /* data breakpoint 0 address */
+#define LOONGARCH_CSR_DB0MASK 0x311 /* data breakpoint 0 mask */
+#define LOONGARCH_CSR_DB0CTL 0x312 /* data breakpoint 0 control */
+#define LOONGARCH_CSR_DB0ASID 0x313 /* data breakpoint 0 asid */
+
+#define LOONGARCH_CSR_DB1ADDR 0x318 /* data breakpoint 1 address */
+#define LOONGARCH_CSR_DB1MASK 0x319 /* data breakpoint 1 mask */
+#define LOONGARCH_CSR_DB1CTL 0x31a /* data breakpoint 1 control */
+#define LOONGARCH_CSR_DB1ASID 0x31b /* data breakpoint 1 asid */
+
+#define LOONGARCH_CSR_DB2ADDR 0x320 /* data breakpoint 2 address */
+#define LOONGARCH_CSR_DB2MASK 0x321 /* data breakpoint 2 mask */
+#define LOONGARCH_CSR_DB2CTL 0x322 /* data breakpoint 2 control */
+#define LOONGARCH_CSR_DB2ASID 0x323 /* data breakpoint 2 asid */
+
+#define LOONGARCH_CSR_DB3ADDR 0x328 /* data breakpoint 3 address */
+#define LOONGARCH_CSR_DB3MASK 0x329 /* data breakpoint 3 mask */
+#define LOONGARCH_CSR_DB3CTL 0x32a /* data breakpoint 3 control */
+#define LOONGARCH_CSR_DB3ASID 0x32b /* data breakpoint 3 asid */
+
+#define LOONGARCH_CSR_DB4ADDR 0x330 /* data breakpoint 4 address */
+#define LOONGARCH_CSR_DB4MASK 0x331 /* data breakpoint 4 maks */
+#define LOONGARCH_CSR_DB4CTL 0x332 /* data breakpoint 4 control */
+#define LOONGARCH_CSR_DB4ASID 0x333 /* data breakpoint 4 asid */
+
+#define LOONGARCH_CSR_DB5ADDR 0x338 /* data breakpoint 5 address */
+#define LOONGARCH_CSR_DB5MASK 0x339 /* data breakpoint 5 mask */
+#define LOONGARCH_CSR_DB5CTL 0x33a /* data breakpoint 5 control */
+#define LOONGARCH_CSR_DB5ASID 0x33b /* data breakpoint 5 asid */
+
+#define LOONGARCH_CSR_DB6ADDR 0x340 /* data breakpoint 6 address */
+#define LOONGARCH_CSR_DB6MASK 0x341 /* data breakpoint 6 mask */
+#define LOONGARCH_CSR_DB6CTL 0x342 /* data breakpoint 6 control */
+#define LOONGARCH_CSR_DB6ASID 0x343 /* data breakpoint 6 asid */
+
+#define LOONGARCH_CSR_DB7ADDR 0x348 /* data breakpoint 7 address */
+#define LOONGARCH_CSR_DB7MASK 0x349 /* data breakpoint 7 mask */
+#define LOONGARCH_CSR_DB7CTL 0x34a /* data breakpoint 7 control */
+#define LOONGARCH_CSR_DB7ASID 0x34b /* data breakpoint 7 asid */
+
+#define LOONGARCH_CSR_FWPC 0x380 /* instruction breakpoint config */
+#define LOONGARCH_CSR_FWPS 0x381 /* instruction breakpoint status */
+
+#define LOONGARCH_CSR_IB0ADDR 0x390 /* inst breakpoint 0 address */
+#define LOONGARCH_CSR_IB0MASK 0x391 /* inst breakpoint 0 mask */
+#define LOONGARCH_CSR_IB0CTL 0x392 /* inst breakpoint 0 control */
+#define LOONGARCH_CSR_IB0ASID 0x393 /* inst breakpoint 0 asid */
+
+#define LOONGARCH_CSR_IB1ADDR 0x398 /* inst breakpoint 1 address */
+#define LOONGARCH_CSR_IB1MASK 0x399 /* inst breakpoint 1 mask */
+#define LOONGARCH_CSR_IB1CTL 0x39a /* inst breakpoint 1 control */
+#define LOONGARCH_CSR_IB1ASID 0x39b /* inst breakpoint 1 asid */
+
+#define LOONGARCH_CSR_IB2ADDR 0x3a0 /* inst breakpoint 2 address */
+#define LOONGARCH_CSR_IB2MASK 0x3a1 /* inst breakpoint 2 mask */
+#define LOONGARCH_CSR_IB2CTL 0x3a2 /* inst breakpoint 2 control */
+#define LOONGARCH_CSR_IB2ASID 0x3a3 /* inst breakpoint 2 asid */
+
+#define LOONGARCH_CSR_IB3ADDR 0x3a8 /* inst breakpoint 3 address */
+#define LOONGARCH_CSR_IB3MASK 0x3a9 /* breakpoint 3 mask */
+#define LOONGARCH_CSR_IB3CTL 0x3aa /* inst breakpoint 3 control */
+#define LOONGARCH_CSR_IB3ASID 0x3ab /* inst breakpoint 3 asid */
+
+#define LOONGARCH_CSR_IB4ADDR 0x3b0 /* inst breakpoint 4 address */
+#define LOONGARCH_CSR_IB4MASK 0x3b1 /* inst breakpoint 4 mask */
+#define LOONGARCH_CSR_IB4CTL 0x3b2 /* inst breakpoint 4 control */
+#define LOONGARCH_CSR_IB4ASID 0x3b3 /* inst breakpoint 4 asid */
+
+#define LOONGARCH_CSR_IB5ADDR 0x3b8 /* inst breakpoint 5 address */
+#define LOONGARCH_CSR_IB5MASK 0x3b9 /* inst breakpoint 5 mask */
+#define LOONGARCH_CSR_IB5CTL 0x3ba /* inst breakpoint 5 control */
+#define LOONGARCH_CSR_IB5ASID 0x3bb /* inst breakpoint 5 asid */
+
+#define LOONGARCH_CSR_IB6ADDR 0x3c0 /* inst breakpoint 6 address */
+#define LOONGARCH_CSR_IB6MASK 0x3c1 /* inst breakpoint 6 mask */
+#define LOONGARCH_CSR_IB6CTL 0x3c2 /* inst breakpoint 6 control */
+#define LOONGARCH_CSR_IB6ASID 0x3c3 /* inst breakpoint 6 asid */
+
+#define LOONGARCH_CSR_IB7ADDR 0x3c8 /* inst breakpoint 7 address */
+#define LOONGARCH_CSR_IB7MASK 0x3c9 /* inst breakpoint 7 mask */
+#define LOONGARCH_CSR_IB7CTL 0x3ca /* inst breakpoint 7 control */
+#define LOONGARCH_CSR_IB7ASID 0x3cb /* inst breakpoint 7 asid */
+
+#define LOONGARCH_CSR_DEBUG 0x500 /* debug config */
+#define LOONGARCH_CSR_DERA 0x501 /* debug era */
+#define LOONGARCH_CSR_DESAVE 0x502 /* debug save */
+
+/*
+ * CSR_ECFG IM
+ */
+#define ECFG0_IM 0x00001fff
+#define ECFGB_SIP0 0
+#define ECFGF_SIP0 (_ULCAST_(1) << ECFGB_SIP0)
+#define ECFGB_SIP1 1
+#define ECFGF_SIP1 (_ULCAST_(1) << ECFGB_SIP1)
+#define ECFGB_IP0 2
+#define ECFGF_IP0 (_ULCAST_(1) << ECFGB_IP0)
+#define ECFGB_IP1 3
+#define ECFGF_IP1 (_ULCAST_(1) << ECFGB_IP1)
+#define ECFGB_IP2 4
+#define ECFGF_IP2 (_ULCAST_(1) << ECFGB_IP2)
+#define ECFGB_IP3 5
+#define ECFGF_IP3 (_ULCAST_(1) << ECFGB_IP3)
+#define ECFGB_IP4 6
+#define ECFGF_IP4 (_ULCAST_(1) << ECFGB_IP4)
+#define ECFGB_IP5 7
+#define ECFGF_IP5 (_ULCAST_(1) << ECFGB_IP5)
+#define ECFGB_IP6 8
+#define ECFGF_IP6 (_ULCAST_(1) << ECFGB_IP6)
+#define ECFGB_IP7 9
+#define ECFGF_IP7 (_ULCAST_(1) << ECFGB_IP7)
+#define ECFGB_PMC 10
+#define ECFGF_PMC (_ULCAST_(1) << ECFGB_PMC)
+#define ECFGB_TIMER 11
+#define ECFGF_TIMER (_ULCAST_(1) << ECFGB_TIMER)
+#define ECFGB_IPI 12
+#define ECFGF_IPI (_ULCAST_(1) << ECFGB_IPI)
+#define ECFGF(hwirq) (_ULCAST_(1) << hwirq)
+
+#define ESTATF_IP 0x00001fff
+
+#define LOONGARCH_IOCSR_FEATURES 0x8
+#define IOCSRF_TEMP BIT_ULL(0)
+#define IOCSRF_NODECNT BIT_ULL(1)
+#define IOCSRF_MSI BIT_ULL(2)
+#define IOCSRF_EXTIOI BIT_ULL(3)
+#define IOCSRF_CSRIPI BIT_ULL(4)
+#define IOCSRF_FREQCSR BIT_ULL(5)
+#define IOCSRF_FREQSCALE BIT_ULL(6)
+#define IOCSRF_DVFSV1 BIT_ULL(7)
+#define IOCSRF_EIODECODE BIT_ULL(9)
+#define IOCSRF_FLATMODE BIT_ULL(10)
+#define IOCSRF_VM BIT_ULL(11)
+
+#define LOONGARCH_IOCSR_VENDOR 0x10
+
+#define LOONGARCH_IOCSR_CPUNAME 0x20
+
+#define LOONGARCH_IOCSR_NODECNT 0x408
+
+#define LOONGARCH_IOCSR_MISC_FUNC 0x420
+#define IOCSR_MISC_FUNC_TIMER_RESET BIT_ULL(21)
+#define IOCSR_MISC_FUNC_EXT_IOI_EN BIT_ULL(48)
+
+#define LOONGARCH_IOCSR_CPUTEMP 0x428
+
+/* PerCore CSR, only accessible by local cores */
+#define LOONGARCH_IOCSR_IPI_STATUS 0x1000
+#define LOONGARCH_IOCSR_IPI_EN 0x1004
+#define LOONGARCH_IOCSR_IPI_SET 0x1008
+#define LOONGARCH_IOCSR_IPI_CLEAR 0x100c
+#define LOONGARCH_IOCSR_MBUF0 0x1020
+#define LOONGARCH_IOCSR_MBUF1 0x1028
+#define LOONGARCH_IOCSR_MBUF2 0x1030
+#define LOONGARCH_IOCSR_MBUF3 0x1038
+
+#define LOONGARCH_IOCSR_IPI_SEND 0x1040
+#define IOCSR_IPI_SEND_IP_SHIFT 0
+#define IOCSR_IPI_SEND_CPU_SHIFT 16
+#define IOCSR_IPI_SEND_BLOCKING BIT(31)
+
+#define LOONGARCH_IOCSR_MBUF_SEND 0x1048
+#define IOCSR_MBUF_SEND_BLOCKING BIT_ULL(31)
+#define IOCSR_MBUF_SEND_BOX_SHIFT 2
+#define IOCSR_MBUF_SEND_BOX_LO(box) (box << 1)
+#define IOCSR_MBUF_SEND_BOX_HI(box) ((box << 1) + 1)
+#define IOCSR_MBUF_SEND_CPU_SHIFT 16
+#define IOCSR_MBUF_SEND_BUF_SHIFT 32
+#define IOCSR_MBUF_SEND_H32_MASK 0xFFFFFFFF00000000ULL
+
+#define LOONGARCH_IOCSR_ANY_SEND 0x1158
+#define IOCSR_ANY_SEND_BLOCKING BIT_ULL(31)
+#define IOCSR_ANY_SEND_CPU_SHIFT 16
+#define IOCSR_ANY_SEND_MASK_SHIFT 27
+#define IOCSR_ANY_SEND_BUF_SHIFT 32
+#define IOCSR_ANY_SEND_H32_MASK 0xFFFFFFFF00000000ULL
+
+/* Register offset and bit definition for CSR access */
+#define LOONGARCH_IOCSR_TIMER_CFG 0x1060
+#define LOONGARCH_IOCSR_TIMER_TICK 0x1070
+#define IOCSR_TIMER_CFG_RESERVED (_ULCAST_(1) << 63)
+#define IOCSR_TIMER_CFG_PERIODIC (_ULCAST_(1) << 62)
+#define IOCSR_TIMER_CFG_EN (_ULCAST_(1) << 61)
+#define IOCSR_TIMER_MASK 0x0ffffffffffffULL
+#define IOCSR_TIMER_INITVAL_RST (_ULCAST_(0xffff) << 48)
+
+#define LOONGARCH_IOCSR_EXTIOI_NODEMAP_BASE 0x14a0
+#define LOONGARCH_IOCSR_EXTIOI_IPMAP_BASE 0x14c0
+#define LOONGARCH_IOCSR_EXTIOI_EN_BASE 0x1600
+#define LOONGARCH_IOCSR_EXTIOI_BOUNCE_BASE 0x1680
+#define LOONGARCH_IOCSR_EXTIOI_ISR_BASE 0x1800
+#define LOONGARCH_IOCSR_EXTIOI_ROUTE_BASE 0x1c00
+#define IOCSR_EXTIOI_VECTOR_NUM 256
+
+#ifndef __ASSEMBLY__
+
+static inline u64 drdtime(void)
+{
+ int rID = 0;
+ u64 val = 0;
+
+ __asm__ __volatile__(
+ "rdtime.d %0, %1 \n\t"
+ : "=r"(val), "=r"(rID)
+ :
+ );
+ return val;
+}
+
+static inline unsigned int get_csr_cpuid(void)
+{
+ return csr_read32(LOONGARCH_CSR_CPUID);
+}
+
+static inline void csr_any_send(unsigned int addr, unsigned int data,
+ unsigned int data_mask, unsigned int cpu)
+{
+ uint64_t val = 0;
+
+ val = IOCSR_ANY_SEND_BLOCKING | addr;
+ val |= (cpu << IOCSR_ANY_SEND_CPU_SHIFT);
+ val |= (data_mask << IOCSR_ANY_SEND_MASK_SHIFT);
+ val |= ((uint64_t)data << IOCSR_ANY_SEND_BUF_SHIFT);
+ iocsr_write64(val, LOONGARCH_IOCSR_ANY_SEND);
+}
+
+static inline unsigned int read_csr_excode(void)
+{
+ return (csr_read32(LOONGARCH_CSR_ESTAT) & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
+}
+
+static inline void write_csr_index(unsigned int idx)
+{
+ csr_xchg32(idx, CSR_TLBIDX_IDXM, LOONGARCH_CSR_TLBIDX);
+}
+
+static inline unsigned int read_csr_pagesize(void)
+{
+ return (csr_read32(LOONGARCH_CSR_TLBIDX) & CSR_TLBIDX_SIZEM) >> CSR_TLBIDX_SIZE;
+}
+
+static inline void write_csr_pagesize(unsigned int size)
+{
+ csr_xchg32(size << CSR_TLBIDX_SIZE, CSR_TLBIDX_SIZEM, LOONGARCH_CSR_TLBIDX);
+}
+
+static inline unsigned int read_csr_tlbrefill_pagesize(void)
+{
+ return (csr_read64(LOONGARCH_CSR_TLBREHI) & CSR_TLBREHI_PS) >> CSR_TLBREHI_PS_SHIFT;
+}
+
+static inline void write_csr_tlbrefill_pagesize(unsigned int size)
+{
+ csr_xchg64(size << CSR_TLBREHI_PS_SHIFT, CSR_TLBREHI_PS, LOONGARCH_CSR_TLBREHI);
+}
+
+#define read_csr_asid() csr_read32(LOONGARCH_CSR_ASID)
+#define write_csr_asid(val) csr_write32(val, LOONGARCH_CSR_ASID)
+#define read_csr_entryhi() csr_read64(LOONGARCH_CSR_TLBEHI)
+#define write_csr_entryhi(val) csr_write64(val, LOONGARCH_CSR_TLBEHI)
+#define read_csr_entrylo0() csr_read64(LOONGARCH_CSR_TLBELO0)
+#define write_csr_entrylo0(val) csr_write64(val, LOONGARCH_CSR_TLBELO0)
+#define read_csr_entrylo1() csr_read64(LOONGARCH_CSR_TLBELO1)
+#define write_csr_entrylo1(val) csr_write64(val, LOONGARCH_CSR_TLBELO1)
+#define read_csr_ecfg() csr_read32(LOONGARCH_CSR_ECFG)
+#define write_csr_ecfg(val) csr_write32(val, LOONGARCH_CSR_ECFG)
+#define read_csr_estat() csr_read32(LOONGARCH_CSR_ESTAT)
+#define write_csr_estat(val) csr_write32(val, LOONGARCH_CSR_ESTAT)
+#define read_csr_tlbidx() csr_read32(LOONGARCH_CSR_TLBIDX)
+#define write_csr_tlbidx(val) csr_write32(val, LOONGARCH_CSR_TLBIDX)
+#define read_csr_euen() csr_read32(LOONGARCH_CSR_EUEN)
+#define write_csr_euen(val) csr_write32(val, LOONGARCH_CSR_EUEN)
+#define read_csr_cpuid() csr_read32(LOONGARCH_CSR_CPUID)
+#define read_csr_prcfg1() csr_read64(LOONGARCH_CSR_PRCFG1)
+#define write_csr_prcfg1(val) csr_write64(val, LOONGARCH_CSR_PRCFG1)
+#define read_csr_prcfg2() csr_read64(LOONGARCH_CSR_PRCFG2)
+#define write_csr_prcfg2(val) csr_write64(val, LOONGARCH_CSR_PRCFG2)
+#define read_csr_prcfg3() csr_read64(LOONGARCH_CSR_PRCFG3)
+#define write_csr_prcfg3(val) csr_write64(val, LOONGARCH_CSR_PRCFG3)
+#define read_csr_stlbpgsize() csr_read32(LOONGARCH_CSR_STLBPGSIZE)
+#define write_csr_stlbpgsize(val) csr_write32(val, LOONGARCH_CSR_STLBPGSIZE)
+#define read_csr_rvacfg() csr_read32(LOONGARCH_CSR_RVACFG)
+#define write_csr_rvacfg(val) csr_write32(val, LOONGARCH_CSR_RVACFG)
+#define write_csr_tintclear(val) csr_write32(val, LOONGARCH_CSR_TINTCLR)
+#define read_csr_impctl1() csr_read64(LOONGARCH_CSR_IMPCTL1)
+#define write_csr_impctl1(val) csr_write64(val, LOONGARCH_CSR_IMPCTL1)
+#define write_csr_impctl2(val) csr_write64(val, LOONGARCH_CSR_IMPCTL2)
+
+#define read_csr_perfctrl0() csr_read64(LOONGARCH_CSR_PERFCTRL0)
+#define read_csr_perfcntr0() csr_read64(LOONGARCH_CSR_PERFCNTR0)
+#define read_csr_perfctrl1() csr_read64(LOONGARCH_CSR_PERFCTRL1)
+#define read_csr_perfcntr1() csr_read64(LOONGARCH_CSR_PERFCNTR1)
+#define read_csr_perfctrl2() csr_read64(LOONGARCH_CSR_PERFCTRL2)
+#define read_csr_perfcntr2() csr_read64(LOONGARCH_CSR_PERFCNTR2)
+#define read_csr_perfctrl3() csr_read64(LOONGARCH_CSR_PERFCTRL3)
+#define read_csr_perfcntr3() csr_read64(LOONGARCH_CSR_PERFCNTR3)
+#define write_csr_perfctrl0(val) csr_write64(val, LOONGARCH_CSR_PERFCTRL0)
+#define write_csr_perfcntr0(val) csr_write64(val, LOONGARCH_CSR_PERFCNTR0)
+#define write_csr_perfctrl1(val) csr_write64(val, LOONGARCH_CSR_PERFCTRL1)
+#define write_csr_perfcntr1(val) csr_write64(val, LOONGARCH_CSR_PERFCNTR1)
+#define write_csr_perfctrl2(val) csr_write64(val, LOONGARCH_CSR_PERFCTRL2)
+#define write_csr_perfcntr2(val) csr_write64(val, LOONGARCH_CSR_PERFCNTR2)
+#define write_csr_perfctrl3(val) csr_write64(val, LOONGARCH_CSR_PERFCTRL3)
+#define write_csr_perfcntr3(val) csr_write64(val, LOONGARCH_CSR_PERFCNTR3)
+
+/*
+ * Manipulate bits in a register.
+ */
+#define __BUILD_CSR_COMMON(name) \
+static inline unsigned long \
+set_##name(unsigned long set) \
+{ \
+ unsigned long res, new; \
+ \
+ res = read_##name(); \
+ new = res | set; \
+ write_##name(new); \
+ \
+ return res; \
+} \
+ \
+static inline unsigned long \
+clear_##name(unsigned long clear) \
+{ \
+ unsigned long res, new; \
+ \
+ res = read_##name(); \
+ new = res & ~clear; \
+ write_##name(new); \
+ \
+ return res; \
+} \
+ \
+static inline unsigned long \
+change_##name(unsigned long change, unsigned long val) \
+{ \
+ unsigned long res, new; \
+ \
+ res = read_##name(); \
+ new = res & ~change; \
+ new |= (val & change); \
+ write_##name(new); \
+ \
+ return res; \
+}
+
+#define __BUILD_CSR_OP(name) __BUILD_CSR_COMMON(csr_##name)
+
+__BUILD_CSR_OP(euen)
+__BUILD_CSR_OP(ecfg)
+__BUILD_CSR_OP(tlbidx)
+
+#define set_csr_estat(val) \
+ csr_xchg32(val, val, LOONGARCH_CSR_ESTAT)
+#define clear_csr_estat(val) \
+ csr_xchg32(~(val), val, LOONGARCH_CSR_ESTAT)
+
+#endif /* __ASSEMBLY__ */
+
+/* Generic EntryLo bit definitions */
+#define ENTRYLO_V (_ULCAST_(1) << 0)
+#define ENTRYLO_D (_ULCAST_(1) << 1)
+#define ENTRYLO_PLV_SHIFT 2
+#define ENTRYLO_PLV (_ULCAST_(3) << ENTRYLO_PLV_SHIFT)
+#define ENTRYLO_C_SHIFT 4
+#define ENTRYLO_C (_ULCAST_(3) << ENTRYLO_C_SHIFT)
+#define ENTRYLO_G (_ULCAST_(1) << 6)
+#define ENTRYLO_NR (_ULCAST_(1) << 61)
+#define ENTRYLO_NX (_ULCAST_(1) << 62)
+
+/* Values for PageSize register */
+#define PS_4K 0x0000000c
+#define PS_8K 0x0000000d
+#define PS_16K 0x0000000e
+#define PS_32K 0x0000000f
+#define PS_64K 0x00000010
+#define PS_128K 0x00000011
+#define PS_256K 0x00000012
+#define PS_512K 0x00000013
+#define PS_1M 0x00000014
+#define PS_2M 0x00000015
+#define PS_4M 0x00000016
+#define PS_8M 0x00000017
+#define PS_16M 0x00000018
+#define PS_32M 0x00000019
+#define PS_64M 0x0000001a
+#define PS_128M 0x0000001b
+#define PS_256M 0x0000001c
+#define PS_512M 0x0000001d
+#define PS_1G 0x0000001e
+
+/* Default page size for a given kernel configuration */
+#ifdef CONFIG_PAGE_SIZE_4KB
+#define PS_DEFAULT_SIZE PS_4K
+#elif defined(CONFIG_PAGE_SIZE_16KB)
+#define PS_DEFAULT_SIZE PS_16K
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+#define PS_DEFAULT_SIZE PS_64K
+#else
+#error Bad page size configuration!
+#endif
+
+/* Default huge tlb size for a given kernel configuration */
+#ifdef CONFIG_PAGE_SIZE_4KB
+#define PS_HUGE_SIZE PS_1M
+#elif defined(CONFIG_PAGE_SIZE_16KB)
+#define PS_HUGE_SIZE PS_16M
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+#define PS_HUGE_SIZE PS_256M
+#else
+#error Bad page size configuration for hugetlbfs!
+#endif
+
+/* ExStatus.ExcCode */
+#define EXCCODE_RSV 0 /* Reserved */
+#define EXCCODE_TLBL 1 /* TLB miss on a load */
+#define EXCCODE_TLBS 2 /* TLB miss on a store */
+#define EXCCODE_TLBI 3 /* TLB miss on a ifetch */
+#define EXCCODE_TLBM 4 /* TLB modified fault */
+#define EXCCODE_TLBNR 5 /* TLB Read-Inhibit exception */
+#define EXCCODE_TLBNX 6 /* TLB Execution-Inhibit exception */
+#define EXCCODE_TLBPE 7 /* TLB Privilege Error */
+#define EXCCODE_ADE 8 /* Address Error */
+ #define EXSUBCODE_ADEF 0 /* Fetch Instruction */
+ #define EXSUBCODE_ADEM 1 /* Access Memory*/
+#define EXCCODE_ALE 9 /* Unalign Access */
+#define EXCCODE_OOB 10 /* Out of bounds */
+#define EXCCODE_SYS 11 /* System call */
+#define EXCCODE_BP 12 /* Breakpoint */
+#define EXCCODE_INE 13 /* Inst. Not Exist */
+#define EXCCODE_IPE 14 /* Inst. Privileged Error */
+#define EXCCODE_FPDIS 15 /* FPU Disabled */
+#define EXCCODE_LSXDIS 16 /* LSX Disabled */
+#define EXCCODE_LASXDIS 17 /* LASX Disabled */
+#define EXCCODE_FPE 18 /* Floating Point Exception */
+ #define EXCSUBCODE_FPE 0 /* Floating Point Exception */
+ #define EXCSUBCODE_VFPE 1 /* Vector Exception */
+#define EXCCODE_WATCH 19 /* Watch address reference */
+#define EXCCODE_BTDIS 20 /* Binary Trans. Disabled */
+#define EXCCODE_BTE 21 /* Binary Trans. Exception */
+#define EXCCODE_PSI 22 /* Guest Privileged Error */
+#define EXCCODE_HYP 23 /* Hypercall */
+#define EXCCODE_GCM 24 /* Guest CSR modified */
+ #define EXCSUBCODE_GCSC 0 /* Software caused */
+ #define EXCSUBCODE_GCHC 1 /* Hardware caused */
+#define EXCCODE_SE 25 /* Security */
+
+#define EXCCODE_INT_START 64
+#define EXCCODE_SIP0 64
+#define EXCCODE_SIP1 65
+#define EXCCODE_IP0 66
+#define EXCCODE_IP1 67
+#define EXCCODE_IP2 68
+#define EXCCODE_IP3 69
+#define EXCCODE_IP4 70
+#define EXCCODE_IP5 71
+#define EXCCODE_IP6 72
+#define EXCCODE_IP7 73
+#define EXCCODE_PMC 74 /* Performance Counter */
+#define EXCCODE_TIMER 75
+#define EXCCODE_IPI 76
+#define EXCCODE_NMI 77
+#define EXCCODE_INT_END 78
+#define EXCCODE_INT_NUM (EXCCODE_INT_END - EXCCODE_INT_START)
+
+/* FPU register names */
+#define LOONGARCH_FCSR0 $r0
+#define LOONGARCH_FCSR1 $r1
+#define LOONGARCH_FCSR2 $r2
+#define LOONGARCH_FCSR3 $r3
+
+/* FPU Status Register Values */
+#define FPU_CSR_RSVD 0xe0e0fce0
+
+/*
+ * X the exception cause indicator
+ * E the exception enable
+ * S the sticky/flag bit
+ */
+#define FPU_CSR_ALL_X 0x1f000000
+#define FPU_CSR_INV_X 0x10000000
+#define FPU_CSR_DIV_X 0x08000000
+#define FPU_CSR_OVF_X 0x04000000
+#define FPU_CSR_UDF_X 0x02000000
+#define FPU_CSR_INE_X 0x01000000
+
+#define FPU_CSR_ALL_S 0x001f0000
+#define FPU_CSR_INV_S 0x00100000
+#define FPU_CSR_DIV_S 0x00080000
+#define FPU_CSR_OVF_S 0x00040000
+#define FPU_CSR_UDF_S 0x00020000
+#define FPU_CSR_INE_S 0x00010000
+
+#define FPU_CSR_ALL_E 0x0000001f
+#define FPU_CSR_INV_E 0x00000010
+#define FPU_CSR_DIV_E 0x00000008
+#define FPU_CSR_OVF_E 0x00000004
+#define FPU_CSR_UDF_E 0x00000002
+#define FPU_CSR_INE_E 0x00000001
+
+/* Bits 8 and 9 of FPU Status Register specify the rounding mode */
+#define FPU_CSR_RM 0x300
+#define FPU_CSR_RN 0x000 /* nearest */
+#define FPU_CSR_RZ 0x100 /* towards zero */
+#define FPU_CSR_RU 0x200 /* towards +Infinity */
+#define FPU_CSR_RD 0x300 /* towards -Infinity */
+
+#define read_fcsr(source) \
+({ \
+ unsigned int __res; \
+\
+ __asm__ __volatile__( \
+ " movfcsr2gr %0, "__stringify(source)" \n" \
+ : "=r" (__res)); \
+ __res; \
+})
+
+#define write_fcsr(dest, val) \
+do { \
+ __asm__ __volatile__( \
+ " movgr2fcsr %0, "__stringify(dest)" \n" \
+ : : "r" (val)); \
+} while (0)
+
+#endif /* _ASM_LOONGARCH_H */
diff --git a/arch/loongarch/include/asm/loongson.h b/arch/loongarch/include/asm/loongson.h
new file mode 100644
index 000000000000..6a8038725ba7
--- /dev/null
+++ b/arch/loongarch/include/asm/loongson.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGSON_H
+#define __ASM_LOONGSON_H
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+
+extern const struct plat_smp_ops loongson3_smp_ops;
+
+#define LOONGSON_REG(x) \
+ (*(volatile u32 *)((char *)TO_UNCACHE(LOONGSON_REG_BASE) + (x)))
+
+#define LOONGSON_LIO_BASE 0x18000000
+#define LOONGSON_LIO_SIZE 0x00100000 /* 1M */
+#define LOONGSON_LIO_TOP (LOONGSON_LIO_BASE+LOONGSON_LIO_SIZE-1)
+
+#define LOONGSON_BOOT_BASE 0x1c000000
+#define LOONGSON_BOOT_SIZE 0x02000000 /* 32M */
+#define LOONGSON_BOOT_TOP (LOONGSON_BOOT_BASE+LOONGSON_BOOT_SIZE-1)
+
+#define LOONGSON_REG_BASE 0x1fe00000
+#define LOONGSON_REG_SIZE 0x00100000 /* 1M */
+#define LOONGSON_REG_TOP (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1)
+
+/* GPIO Regs - r/w */
+
+#define LOONGSON_GPIODATA LOONGSON_REG(0x11c)
+#define LOONGSON_GPIOIE LOONGSON_REG(0x120)
+#define LOONGSON_REG_GPIO_BASE (LOONGSON_REG_BASE + 0x11c)
+
+#define MAX_PACKAGES 16
+
+/* Chip Config register of each physical cpu package */
+extern u64 loongson_chipcfg[MAX_PACKAGES];
+#define LOONGSON_CHIPCFG(id) (*(volatile u32 *)(loongson_chipcfg[id]))
+
+/* Chip Temperature register of each physical cpu package */
+extern u64 loongson_chiptemp[MAX_PACKAGES];
+#define LOONGSON_CHIPTEMP(id) (*(volatile u32 *)(loongson_chiptemp[id]))
+
+/* Freq Control register of each physical cpu package */
+extern u64 loongson_freqctrl[MAX_PACKAGES];
+#define LOONGSON_FREQCTRL(id) (*(volatile u32 *)(loongson_freqctrl[id]))
+
+#define xconf_readl(addr) readl(addr)
+#define xconf_readq(addr) readq(addr)
+
+static inline void xconf_writel(u32 val, volatile void __iomem *addr)
+{
+ asm volatile (
+ " st.w %[v], %[hw], 0 \n"
+ " ld.b $r0, %[hw], 0 \n"
+ :
+ : [hw] "r" (addr), [v] "r" (val)
+ );
+}
+
+static inline void xconf_writeq(u64 val64, volatile void __iomem *addr)
+{
+ asm volatile (
+ " st.d %[v], %[hw], 0 \n"
+ " ld.b $r0, %[hw], 0 \n"
+ :
+ : [hw] "r" (addr), [v] "r" (val64)
+ );
+}
+
+/* ============== LS7A registers =============== */
+#define LS7A_PCH_REG_BASE 0x10000000UL
+/* LPC regs */
+#define LS7A_LPC_REG_BASE (LS7A_PCH_REG_BASE + 0x00002000)
+/* CHIPCFG regs */
+#define LS7A_CHIPCFG_REG_BASE (LS7A_PCH_REG_BASE + 0x00010000)
+/* MISC reg base */
+#define LS7A_MISC_REG_BASE (LS7A_PCH_REG_BASE + 0x00080000)
+/* ACPI regs */
+#define LS7A_ACPI_REG_BASE (LS7A_MISC_REG_BASE + 0x00050000)
+/* RTC regs */
+#define LS7A_RTC_REG_BASE (LS7A_MISC_REG_BASE + 0x00050100)
+
+#define LS7A_DMA_CFG (volatile void *)TO_UNCACHE(LS7A_CHIPCFG_REG_BASE + 0x041c)
+#define LS7A_DMA_NODE_SHF 8
+#define LS7A_DMA_NODE_MASK 0x1F00
+
+#define LS7A_INT_MASK_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x020)
+#define LS7A_INT_EDGE_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x060)
+#define LS7A_INT_CLEAR_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x080)
+#define LS7A_INT_HTMSI_EN_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x040)
+#define LS7A_INT_ROUTE_ENTRY_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x100)
+#define LS7A_INT_HTMSI_VEC_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x200)
+#define LS7A_INT_STATUS_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x3a0)
+#define LS7A_INT_POL_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x3e0)
+#define LS7A_LPC_INT_CTL (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2000)
+#define LS7A_LPC_INT_ENA (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2004)
+#define LS7A_LPC_INT_STS (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2008)
+#define LS7A_LPC_INT_CLR (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x200c)
+#define LS7A_LPC_INT_POL (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2010)
+
+#define LS7A_PMCON_SOC_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x000)
+#define LS7A_PMCON_RESUME_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x004)
+#define LS7A_PMCON_RTC_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x008)
+#define LS7A_PM1_EVT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x00c)
+#define LS7A_PM1_ENA_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x010)
+#define LS7A_PM1_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x014)
+#define LS7A_PM1_TMR_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x018)
+#define LS7A_P_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x01c)
+#define LS7A_GPE0_STS_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x028)
+#define LS7A_GPE0_ENA_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x02c)
+#define LS7A_RST_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x030)
+#define LS7A_WD_SET_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x034)
+#define LS7A_WD_TIMER_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x038)
+#define LS7A_THSENS_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x04c)
+#define LS7A_GEN_RTC_1_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x050)
+#define LS7A_GEN_RTC_2_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x054)
+#define LS7A_DPM_CFG_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x400)
+#define LS7A_DPM_STS_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x404)
+#define LS7A_DPM_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x408)
+
+typedef enum {
+ ACPI_PCI_HOTPLUG_STATUS = 1 << 1,
+ ACPI_CPU_HOTPLUG_STATUS = 1 << 2,
+ ACPI_MEM_HOTPLUG_STATUS = 1 << 3,
+ ACPI_POWERBUTTON_STATUS = 1 << 8,
+ ACPI_RTC_WAKE_STATUS = 1 << 10,
+ ACPI_PCI_WAKE_STATUS = 1 << 14,
+ ACPI_ANY_WAKE_STATUS = 1 << 15,
+} AcpiEventStatusBits;
+
+#define HT1LO_OFFSET 0xe0000000000UL
+
+/* PCI Configuration Space Base */
+#define MCFG_EXT_PCICFG_BASE 0xefe00000000UL
+
+/* REG ACCESS*/
+#define ls7a_readb(addr) (*(volatile unsigned char *)TO_UNCACHE(addr))
+#define ls7a_readw(addr) (*(volatile unsigned short *)TO_UNCACHE(addr))
+#define ls7a_readl(addr) (*(volatile unsigned int *)TO_UNCACHE(addr))
+#define ls7a_readq(addr) (*(volatile unsigned long *)TO_UNCACHE(addr))
+#define ls7a_writeb(val, addr) *(volatile unsigned char *)TO_UNCACHE(addr) = (val)
+#define ls7a_writew(val, addr) *(volatile unsigned short *)TO_UNCACHE(addr) = (val)
+#define ls7a_writel(val, addr) *(volatile unsigned int *)TO_UNCACHE(addr) = (val)
+#define ls7a_writeq(val, addr) *(volatile unsigned long *)TO_UNCACHE(addr) = (val)
+
+#endif /* __ASM_LOONGSON_H */
diff --git a/arch/loongarch/include/asm/mmu.h b/arch/loongarch/include/asm/mmu.h
new file mode 100644
index 000000000000..0cc2d0803537
--- /dev/null
+++ b/arch/loongarch/include/asm/mmu.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_MMU_H
+#define __ASM_MMU_H
+
+#include <linux/atomic.h>
+#include <linux/spinlock.h>
+
+typedef struct {
+ u64 asid[NR_CPUS];
+ void *vdso;
+} mm_context_t;
+
+#endif /* __ASM_MMU_H */
diff --git a/arch/loongarch/include/asm/mmu_context.h b/arch/loongarch/include/asm/mmu_context.h
new file mode 100644
index 000000000000..9f97c3453b9c
--- /dev/null
+++ b/arch/loongarch/include/asm/mmu_context.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Switch a MMU context.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_MMU_CONTEXT_H
+#define _ASM_MMU_CONTEXT_H
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm_types.h>
+#include <linux/smp.h>
+#include <linux/slab.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm-generic/mm_hooks.h>
+
+/*
+ * All unused by hardware upper bits will be considered
+ * as a software asid extension.
+ */
+static inline u64 asid_version_mask(unsigned int cpu)
+{
+ return ~(u64)(cpu_asid_mask(&cpu_data[cpu]));
+}
+
+static inline u64 asid_first_version(unsigned int cpu)
+{
+ return cpu_asid_mask(&cpu_data[cpu]) + 1;
+}
+
+#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
+#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
+#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
+
+static inline int asid_valid(struct mm_struct *mm, unsigned int cpu)
+{
+ if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & asid_version_mask(cpu))
+ return 0;
+
+ return 1;
+}
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+/* Normal, classic get_new_mmu_context */
+static inline void
+get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
+{
+ u64 asid = asid_cache(cpu);
+
+ if (!((++asid) & cpu_asid_mask(&cpu_data[cpu])))
+ local_flush_tlb_user(); /* start new asid cycle */
+
+ cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ cpu_context(i, mm) = 0;
+
+ return 0;
+}
+
+static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /* Check if our ASID is of an older version and thus invalid */
+ if (!asid_valid(next, cpu))
+ get_new_mmu_context(next, cpu);
+
+ write_csr_asid(cpu_asid(cpu, next));
+
+ if (next != &init_mm)
+ csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL);
+ else
+ csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
+
+ /*
+ * Mark current->active_mm as not "active" anymore.
+ * We don't want to mislead possible IPI tlb flush routines.
+ */
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+}
+
+#define switch_mm_irqs_off switch_mm_irqs_off
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch_mm_irqs_off(prev, next, tsk);
+ local_irq_restore(flags);
+}
+
+/*
+ * Destroy context related info for an mm_struct that is about
+ * to be put to rest.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+}
+
+#define activate_mm(prev, next) switch_mm(prev, next, current)
+#define deactivate_mm(task, mm) do { } while (0)
+
+/*
+ * If mm is currently active, we can't really drop it.
+ * Instead, we will get a new one for it.
+ */
+static inline void
+drop_mmu_context(struct mm_struct *mm, unsigned int cpu)
+{
+ int asid;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ asid = read_csr_asid() & cpu_asid_mask(&current_cpu_data);
+
+ if (asid == cpu_asid(cpu, mm)) {
+ if (!current->mm || (current->mm == mm)) {
+ get_new_mmu_context(mm, cpu);
+ write_csr_asid(cpu_asid(cpu, mm));
+ goto out;
+ }
+ }
+
+ /* Will get a new context next time */
+ cpu_context(cpu, mm) = 0;
+ cpumask_clear_cpu(cpu, mm_cpumask(mm));
+out:
+ local_irq_restore(flags);
+}
+
+#endif /* _ASM_MMU_CONTEXT_H */
diff --git a/arch/loongarch/include/asm/mmzone.h b/arch/loongarch/include/asm/mmzone.h
new file mode 100644
index 000000000000..fe67d0b4b33d
--- /dev/null
+++ b/arch/loongarch/include/asm/mmzone.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen (chenhuacai@loongson.cn)
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_MMZONE_H_
+#define _ASM_MMZONE_H_
+
+#include <asm/page.h>
+#include <asm/numa.h>
+
+extern struct pglist_data *node_data[];
+
+#define NODE_DATA(nid) (node_data[(nid)])
+
+extern void setup_zero_pages(void);
+
+#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/loongarch/include/asm/module.h b/arch/loongarch/include/asm/module.h
new file mode 100644
index 000000000000..9f6718df1854
--- /dev/null
+++ b/arch/loongarch/include/asm/module.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_MODULE_H
+#define _ASM_MODULE_H
+
+#include <asm/inst.h>
+#include <asm-generic/module.h>
+
+#define RELA_STACK_DEPTH 16
+
+struct mod_section {
+ Elf_Shdr *shdr;
+ int num_entries;
+ int max_entries;
+};
+
+struct mod_arch_specific {
+ struct mod_section plt;
+ struct mod_section plt_idx;
+};
+
+struct plt_entry {
+ u32 inst_lu12iw;
+ u32 inst_lu32id;
+ u32 inst_lu52id;
+ u32 inst_jirl;
+};
+
+struct plt_idx_entry {
+ unsigned long symbol_addr;
+};
+
+Elf_Addr module_emit_plt_entry(struct module *mod, unsigned long val);
+
+static inline struct plt_entry emit_plt_entry(unsigned long val)
+{
+ u32 lu12iw, lu32id, lu52id, jirl;
+
+ lu12iw = (lu12iw_op << 25 | (((val >> 12) & 0xfffff) << 5) | LOONGARCH_GPR_T1);
+ lu32id = larch_insn_gen_lu32id(LOONGARCH_GPR_T1, ADDR_IMM(val, LU32ID));
+ lu52id = larch_insn_gen_lu52id(LOONGARCH_GPR_T1, LOONGARCH_GPR_T1, ADDR_IMM(val, LU52ID));
+ jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, 0, (val & 0xfff));
+
+ return (struct plt_entry) { lu12iw, lu32id, lu52id, jirl };
+}
+
+static inline struct plt_idx_entry emit_plt_idx_entry(unsigned long val)
+{
+ return (struct plt_idx_entry) { val };
+}
+
+static inline int get_plt_idx(unsigned long val, const struct mod_section *sec)
+{
+ int i;
+ struct plt_idx_entry *plt_idx = (struct plt_idx_entry *)sec->shdr->sh_addr;
+
+ for (i = 0; i < sec->num_entries; i++) {
+ if (plt_idx[i].symbol_addr == val)
+ return i;
+ }
+
+ return -1;
+}
+
+static inline struct plt_entry *get_plt_entry(unsigned long val,
+ const struct mod_section *sec_plt,
+ const struct mod_section *sec_plt_idx)
+{
+ int plt_idx = get_plt_idx(val, sec_plt_idx);
+ struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr;
+
+ if (plt_idx < 0)
+ return NULL;
+
+ return plt + plt_idx;
+}
+
+#endif /* _ASM_MODULE_H */
diff --git a/arch/loongarch/include/asm/module.lds.h b/arch/loongarch/include/asm/module.lds.h
new file mode 100644
index 000000000000..31c1c0db11a3
--- /dev/null
+++ b/arch/loongarch/include/asm/module.lds.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020-2022 Loongson Technology Corporation Limited */
+SECTIONS {
+ . = ALIGN(4);
+ .plt : { BYTE(0) }
+ .plt.idx : { BYTE(0) }
+}
diff --git a/arch/loongarch/include/asm/numa.h b/arch/loongarch/include/asm/numa.h
new file mode 100644
index 000000000000..27f319b49862
--- /dev/null
+++ b/arch/loongarch/include/asm/numa.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_NUMA_H
+#define _ASM_LOONGARCH_NUMA_H
+
+#include <linux/nodemask.h>
+
+#define NODE_ADDRSPACE_SHIFT 44
+
+#define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
+#define nid_to_addrbase(nid) (_ULCAST_(nid) << NODE_ADDRSPACE_SHIFT)
+
+#ifdef CONFIG_NUMA
+
+extern int numa_off;
+extern s16 __cpuid_to_node[CONFIG_NR_CPUS];
+extern nodemask_t numa_nodes_parsed __initdata;
+
+struct numa_memblk {
+ u64 start;
+ u64 end;
+ int nid;
+};
+
+#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
+struct numa_meminfo {
+ int nr_blks;
+ struct numa_memblk blk[NR_NODE_MEMBLKS];
+};
+
+extern int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+
+extern void __init early_numa_add_cpu(int cpuid, s16 node);
+extern void numa_add_cpu(unsigned int cpu);
+extern void numa_remove_cpu(unsigned int cpu);
+
+static inline void numa_clear_node(int cpu)
+{
+}
+
+static inline void set_cpuid_to_node(int cpuid, s16 node)
+{
+ __cpuid_to_node[cpuid] = node;
+}
+
+extern int early_cpu_to_node(int cpu);
+
+#else
+
+static inline void early_numa_add_cpu(int cpuid, s16 node) { }
+static inline void numa_add_cpu(unsigned int cpu) { }
+static inline void numa_remove_cpu(unsigned int cpu) { }
+
+static inline int early_cpu_to_node(int cpu)
+{
+ return 0;
+}
+
+#endif /* CONFIG_NUMA */
+
+#endif /* _ASM_LOONGARCH_NUMA_H */
diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h
new file mode 100644
index 000000000000..3dba4986f6c9
--- /dev/null
+++ b/arch/loongarch/include/asm/page.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_PAGE_H
+#define _ASM_PAGE_H
+
+#include <linux/const.h>
+
+/*
+ * PAGE_SHIFT determines the page size
+ */
+#ifdef CONFIG_PAGE_SIZE_4KB
+#define PAGE_SHIFT 12
+#endif
+#ifdef CONFIG_PAGE_SIZE_16KB
+#define PAGE_SHIFT 14
+#endif
+#ifdef CONFIG_PAGE_SIZE_64KB
+#define PAGE_SHIFT 16
+#endif
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/kernel.h>
+#include <linux/pfn.h>
+
+#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
+
+/*
+ * It's normally defined only for FLATMEM config but it's
+ * used in our early mem init code for all memory models.
+ * So always define it.
+ */
+#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
+
+extern void clear_page(void *page);
+extern void copy_page(void *to, void *from);
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+extern unsigned long shm_align_mask;
+
+struct page;
+struct vm_area_struct;
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+typedef struct { unsigned long pte; } pte_t;
+#define pte_val(x) ((x).pte)
+#define __pte(x) ((pte_t) { (x) })
+typedef struct page *pgtable_t;
+
+typedef struct { unsigned long pgd; } pgd_t;
+#define pgd_val(x) ((x).pgd)
+#define __pgd(x) ((pgd_t) { (x) })
+
+/*
+ * Manipulate page protection bits
+ */
+typedef struct { unsigned long pgprot; } pgprot_t;
+#define pgprot_val(x) ((x).pgprot)
+#define __pgprot(x) ((pgprot_t) { (x) })
+#define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK)
+
+#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
+
+/*
+ * __pa()/__va() should be used only during mem init.
+ */
+#define __pa(x) PHYSADDR(x)
+#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
+
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+#ifdef CONFIG_FLATMEM
+
+static inline int pfn_valid(unsigned long pfn)
+{
+ /* avoid <linux/mm.h> include hell */
+ extern unsigned long max_mapnr;
+ unsigned long pfn_offset = ARCH_PFN_OFFSET;
+
+ return pfn >= pfn_offset && pfn < max_mapnr;
+}
+
+#endif
+
+#define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr)))
+#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
+
+extern int __virt_addr_valid(volatile void *kaddr);
+#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
+
+#define VM_DATA_DEFAULT_FLAGS \
+ (VM_READ | VM_WRITE | \
+ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_PAGE_H */
diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
new file mode 100644
index 000000000000..e6569f18c6dd
--- /dev/null
+++ b/arch/loongarch/include/asm/percpu.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_PERCPU_H
+#define __ASM_PERCPU_H
+
+#include <asm/cmpxchg.h>
+#include <asm/loongarch.h>
+
+/* Use r21 for fast access */
+register unsigned long __my_cpu_offset __asm__("$r21");
+
+static inline void set_my_cpu_offset(unsigned long off)
+{
+ __my_cpu_offset = off;
+ csr_write64(off, PERCPU_BASE_KS);
+}
+#define __my_cpu_offset __my_cpu_offset
+
+#define PERCPU_OP(op, asm_op, c_op) \
+static inline unsigned long __percpu_##op(void *ptr, \
+ unsigned long val, int size) \
+{ \
+ unsigned long ret; \
+ \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__( \
+ "am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \
+ : [ret] "=&r" (ret), [ptr] "+ZB"(*(u32 *)ptr) \
+ : [val] "r" (val)); \
+ break; \
+ case 8: \
+ __asm__ __volatile__( \
+ "am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \
+ : [ret] "=&r" (ret), [ptr] "+ZB"(*(u64 *)ptr) \
+ : [val] "r" (val)); \
+ break; \
+ default: \
+ ret = 0; \
+ BUILD_BUG(); \
+ } \
+ \
+ return ret c_op val; \
+}
+
+PERCPU_OP(add, add, +)
+PERCPU_OP(and, and, &)
+PERCPU_OP(or, or, |)
+#undef PERCPU_OP
+
+static inline unsigned long __percpu_read(void *ptr, int size)
+{
+ unsigned long ret;
+
+ switch (size) {
+ case 1:
+ __asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr] \n"
+ : [ret] "=&r"(ret)
+ : [ptr] "r"(ptr)
+ : "memory");
+ break;
+ case 2:
+ __asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr] \n"
+ : [ret] "=&r"(ret)
+ : [ptr] "r"(ptr)
+ : "memory");
+ break;
+ case 4:
+ __asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr] \n"
+ : [ret] "=&r"(ret)
+ : [ptr] "r"(ptr)
+ : "memory");
+ break;
+ case 8:
+ __asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr] \n"
+ : [ret] "=&r"(ret)
+ : [ptr] "r"(ptr)
+ : "memory");
+ break;
+ default:
+ ret = 0;
+ BUILD_BUG();
+ }
+
+ return ret;
+}
+
+static inline void __percpu_write(void *ptr, unsigned long val, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("stx.b %[val], $r21, %[ptr] \n"
+ :
+ : [val] "r" (val), [ptr] "r" (ptr)
+ : "memory");
+ break;
+ case 2:
+ __asm__ __volatile__("stx.h %[val], $r21, %[ptr] \n"
+ :
+ : [val] "r" (val), [ptr] "r" (ptr)
+ : "memory");
+ break;
+ case 4:
+ __asm__ __volatile__("stx.w %[val], $r21, %[ptr] \n"
+ :
+ : [val] "r" (val), [ptr] "r" (ptr)
+ : "memory");
+ break;
+ case 8:
+ __asm__ __volatile__("stx.d %[val], $r21, %[ptr] \n"
+ :
+ : [val] "r" (val), [ptr] "r" (ptr)
+ : "memory");
+ break;
+ default:
+ BUILD_BUG();
+ }
+}
+
+static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
+ int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val);
+
+ case 8:
+ return __xchg_asm("amswap.d", (volatile u64 *)ptr, (u64)val);
+
+ default:
+ BUILD_BUG();
+ }
+
+ return 0;
+}
+
+/* this_cpu_cmpxchg */
+#define _protect_cmpxchg_local(pcp, o, n) \
+({ \
+ typeof(*raw_cpu_ptr(&(pcp))) __ret; \
+ preempt_disable_notrace(); \
+ __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
+ preempt_enable_notrace(); \
+ __ret; \
+})
+
+#define _percpu_read(pcp) \
+({ \
+ typeof(pcp) __retval; \
+ __retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp)); \
+ __retval; \
+})
+
+#define _percpu_write(pcp, val) \
+do { \
+ __percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp)); \
+} while (0) \
+
+#define _pcp_protect(operation, pcp, val) \
+({ \
+ typeof(pcp) __retval; \
+ preempt_disable_notrace(); \
+ __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
+ (val), sizeof(pcp)); \
+ preempt_enable_notrace(); \
+ __retval; \
+})
+
+#define _percpu_add(pcp, val) \
+ _pcp_protect(__percpu_add, pcp, val)
+
+#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
+
+#define _percpu_and(pcp, val) \
+ _pcp_protect(__percpu_and, pcp, val)
+
+#define _percpu_or(pcp, val) \
+ _pcp_protect(__percpu_or, pcp, val)
+
+#define _percpu_xchg(pcp, val) ((typeof(pcp)) \
+ _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)))
+
+#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
+#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
+
+#define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val)
+#define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val)
+
+#define this_cpu_and_4(pcp, val) _percpu_and(pcp, val)
+#define this_cpu_and_8(pcp, val) _percpu_and(pcp, val)
+
+#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
+#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
+
+#define this_cpu_read_1(pcp) _percpu_read(pcp)
+#define this_cpu_read_2(pcp) _percpu_read(pcp)
+#define this_cpu_read_4(pcp) _percpu_read(pcp)
+#define this_cpu_read_8(pcp) _percpu_read(pcp)
+
+#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
+
+#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
+#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
+
+#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+
+#include <asm-generic/percpu.h>
+
+#endif /* __ASM_PERCPU_H */
diff --git a/arch/loongarch/include/asm/perf_event.h b/arch/loongarch/include/asm/perf_event.h
new file mode 100644
index 000000000000..dcb3b17053a8
--- /dev/null
+++ b/arch/loongarch/include/asm/perf_event.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LOONGARCH_PERF_EVENT_H__
+#define __LOONGARCH_PERF_EVENT_H__
+/* Nothing to show here; the file is required by linux/perf_event.h. */
+#endif /* __LOONGARCH_PERF_EVENT_H__ */
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
new file mode 100644
index 000000000000..b0a57b25c131
--- /dev/null
+++ b/arch/loongarch/include/asm/pgalloc.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_PGALLOC_H
+#define _ASM_PGALLOC_H
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#define __HAVE_ARCH_PMD_ALLOC_ONE
+#define __HAVE_ARCH_PUD_ALLOC_ONE
+#include <asm-generic/pgalloc.h>
+
+static inline void pmd_populate_kernel(struct mm_struct *mm,
+ pmd_t *pmd, pte_t *pte)
+{
+ set_pmd(pmd, __pmd((unsigned long)pte));
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
+{
+ set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
+}
+
+#ifndef __PAGETABLE_PMD_FOLDED
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ set_pud(pud, __pud((unsigned long)pmd));
+}
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
+{
+ set_p4d(p4d, __p4d((unsigned long)pud));
+}
+
+#endif /* __PAGETABLE_PUD_FOLDED */
+
+extern void pagetable_init(void);
+
+/*
+ * Initialize a new pmd table with invalid pointers.
+ */
+extern void pmd_init(unsigned long page, unsigned long pagetable);
+
+/*
+ * Initialize a new pgd / pmd table with invalid pointers.
+ */
+extern void pgd_init(unsigned long page);
+extern pgd_t *pgd_alloc(struct mm_struct *mm);
+
+#define __pte_free_tlb(tlb, pte, address) \
+do { \
+ pgtable_pte_page_dtor(pte); \
+ tlb_remove_page((tlb), pte); \
+} while (0)
+
+#ifndef __PAGETABLE_PMD_FOLDED
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pmd_t *pmd;
+ struct page *pg;
+
+ pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER);
+ if (!pg)
+ return NULL;
+
+ if (!pgtable_pmd_page_ctor(pg)) {
+ __free_pages(pg, PMD_ORDER);
+ return NULL;
+ }
+
+ pmd = (pmd_t *)page_address(pg);
+ pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
+ return pmd;
+}
+
+#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
+
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pud_t *pud;
+
+ pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER);
+ if (pud)
+ pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
+ return pud;
+}
+
+#define __pud_free_tlb(tlb, x, addr) pud_free((tlb)->mm, x)
+
+#endif /* __PAGETABLE_PUD_FOLDED */
+
+#endif /* _ASM_PGALLOC_H */
diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
new file mode 100644
index 000000000000..3badd112d9ab
--- /dev/null
+++ b/arch/loongarch/include/asm/pgtable-bits.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_PGTABLE_BITS_H
+#define _ASM_PGTABLE_BITS_H
+
+/* Page table bits */
+#define _PAGE_VALID_SHIFT 0
+#define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */
+#define _PAGE_DIRTY_SHIFT 1
+#define _PAGE_PLV_SHIFT 2 /* 2~3, two bits */
+#define _CACHE_SHIFT 4 /* 4~5, two bits */
+#define _PAGE_GLOBAL_SHIFT 6
+#define _PAGE_HUGE_SHIFT 6 /* HUGE is a PMD bit */
+#define _PAGE_PRESENT_SHIFT 7
+#define _PAGE_WRITE_SHIFT 8
+#define _PAGE_MODIFIED_SHIFT 9
+#define _PAGE_PROTNONE_SHIFT 10
+#define _PAGE_SPECIAL_SHIFT 11
+#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */
+#define _PAGE_PFN_SHIFT 12
+#define _PAGE_PFN_END_SHIFT 48
+#define _PAGE_NO_READ_SHIFT 61
+#define _PAGE_NO_EXEC_SHIFT 62
+#define _PAGE_RPLV_SHIFT 63
+
+/* Used by software */
+#define _PAGE_PRESENT (_ULCAST_(1) << _PAGE_PRESENT_SHIFT)
+#define _PAGE_WRITE (_ULCAST_(1) << _PAGE_WRITE_SHIFT)
+#define _PAGE_ACCESSED (_ULCAST_(1) << _PAGE_ACCESSED_SHIFT)
+#define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT)
+#define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT)
+#define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT)
+
+/* Used by TLB hardware (placed in EntryLo*) */
+#define _PAGE_VALID (_ULCAST_(1) << _PAGE_VALID_SHIFT)
+#define _PAGE_DIRTY (_ULCAST_(1) << _PAGE_DIRTY_SHIFT)
+#define _PAGE_PLV (_ULCAST_(3) << _PAGE_PLV_SHIFT)
+#define _PAGE_GLOBAL (_ULCAST_(1) << _PAGE_GLOBAL_SHIFT)
+#define _PAGE_HUGE (_ULCAST_(1) << _PAGE_HUGE_SHIFT)
+#define _PAGE_HGLOBAL (_ULCAST_(1) << _PAGE_HGLOBAL_SHIFT)
+#define _PAGE_NO_READ (_ULCAST_(1) << _PAGE_NO_READ_SHIFT)
+#define _PAGE_NO_EXEC (_ULCAST_(1) << _PAGE_NO_EXEC_SHIFT)
+#define _PAGE_RPLV (_ULCAST_(1) << _PAGE_RPLV_SHIFT)
+#define _CACHE_MASK (_ULCAST_(3) << _CACHE_SHIFT)
+#define _PFN_SHIFT (PAGE_SHIFT - 12 + _PAGE_PFN_SHIFT)
+
+#define _PAGE_USER (PLV_USER << _PAGE_PLV_SHIFT)
+#define _PAGE_KERN (PLV_KERN << _PAGE_PLV_SHIFT)
+
+#define _PFN_MASK (~((_ULCAST_(1) << (_PFN_SHIFT)) - 1) & \
+ ((_ULCAST_(1) << (_PAGE_PFN_END_SHIFT)) - 1))
+
+/*
+ * Cache attributes
+ */
+#ifndef _CACHE_SUC
+#define _CACHE_SUC (0<<_CACHE_SHIFT) /* Strong-ordered UnCached */
+#endif
+#ifndef _CACHE_CC
+#define _CACHE_CC (1<<_CACHE_SHIFT) /* Coherent Cached */
+#endif
+#ifndef _CACHE_WUC
+#define _CACHE_WUC (2<<_CACHE_SHIFT) /* Weak-ordered UnCached */
+#endif
+
+#define __READABLE (_PAGE_VALID)
+#define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE)
+
+#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV)
+#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE)
+
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \
+ _PAGE_USER | _CACHE_CC)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
+ _PAGE_USER | _CACHE_CC)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _CACHE_CC)
+
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
+ _PAGE_GLOBAL | _PAGE_KERN | _CACHE_CC)
+#define PAGE_KERNEL_SUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
+ _PAGE_GLOBAL | _PAGE_KERN | _CACHE_SUC)
+#define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
+ _PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC)
+
+#define __P000 __pgprot(_CACHE_CC | _PAGE_USER | _PAGE_PROTNONE | _PAGE_NO_EXEC | _PAGE_NO_READ)
+#define __P001 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
+#define __P010 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
+#define __P011 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
+#define __P100 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+#define __P101 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+#define __P110 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+#define __P111 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+
+#define __S000 __pgprot(_CACHE_CC | _PAGE_USER | _PAGE_PROTNONE | _PAGE_NO_EXEC | _PAGE_NO_READ)
+#define __S001 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
+#define __S010 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE)
+#define __S011 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE)
+#define __S100 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+#define __S101 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+#define __S110 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_WRITE)
+#define __S111 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_WRITE)
+
+#ifndef __ASSEMBLY__
+
+#define pgprot_noncached pgprot_noncached
+
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot = (prot & ~_CACHE_MASK) | _CACHE_SUC;
+
+ return __pgprot(prot);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+
+static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot = (prot & ~_CACHE_MASK) | _CACHE_WUC;
+
+ return __pgprot(prot);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_PGTABLE_BITS_H */
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
new file mode 100644
index 000000000000..5dc84d8f18d6
--- /dev/null
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -0,0 +1,565 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_PGTABLE_H
+#define _ASM_PGTABLE_H
+
+#include <linux/compiler.h>
+#include <asm/addrspace.h>
+#include <asm/pgtable-bits.h>
+
+#if CONFIG_PGTABLE_LEVELS == 2
+#include <asm-generic/pgtable-nopmd.h>
+#elif CONFIG_PGTABLE_LEVELS == 3
+#include <asm-generic/pgtable-nopud.h>
+#else
+#include <asm-generic/pgtable-nop4d.h>
+#endif
+
+#define PGD_ORDER 0
+#define PUD_ORDER 0
+#define PMD_ORDER 0
+#define PTE_ORDER 0
+
+#if CONFIG_PGTABLE_LEVELS == 2
+#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#elif CONFIG_PGTABLE_LEVELS == 3
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#elif CONFIG_PGTABLE_LEVELS == 4
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
+#endif
+
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT + PGD_ORDER - 3))
+
+#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) >> 3)
+#if CONFIG_PGTABLE_LEVELS > 3
+#define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) >> 3)
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) >> 3)
+#endif
+#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) >> 3)
+
+#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/mm_types.h>
+#include <linux/mmzone.h>
+#include <asm/fixmap.h>
+#include <asm/io.h>
+
+struct mm_struct;
+struct vm_area_struct;
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero; used
+ * for zero-mapped memory areas etc..
+ */
+
+extern unsigned long empty_zero_page;
+extern unsigned long zero_page_mask;
+
+#define ZERO_PAGE(vaddr) \
+ (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
+#define __HAVE_COLOR_ZERO_PAGE
+
+/*
+ * TLB refill handlers may also map the vmalloc area into xkvrange.
+ * Avoid the first couple of pages so NULL pointer dereferences will
+ * still reliably trap.
+ */
+#define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
+#define MODULES_END (MODULES_VADDR + SZ_256M)
+
+#define VMALLOC_START MODULES_END
+#define VMALLOC_END \
+ (vm_map_base + \
+ min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE)
+
+#define pte_ERROR(e) \
+ pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
+#ifndef __PAGETABLE_PMD_FOLDED
+#define pmd_ERROR(e) \
+ pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+#endif
+#ifndef __PAGETABLE_PUD_FOLDED
+#define pud_ERROR(e) \
+ pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
+#endif
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+extern pte_t invalid_pte_table[PTRS_PER_PTE];
+
+#ifndef __PAGETABLE_PUD_FOLDED
+
+typedef struct { unsigned long pud; } pud_t;
+#define pud_val(x) ((x).pud)
+#define __pud(x) ((pud_t) { (x) })
+
+extern pud_t invalid_pud_table[PTRS_PER_PUD];
+
+/*
+ * Empty pgd/p4d entries point to the invalid_pud_table.
+ */
+static inline int p4d_none(p4d_t p4d)
+{
+ return p4d_val(p4d) == (unsigned long)invalid_pud_table;
+}
+
+static inline int p4d_bad(p4d_t p4d)
+{
+ return p4d_val(p4d) & ~PAGE_MASK;
+}
+
+static inline int p4d_present(p4d_t p4d)
+{
+ return p4d_val(p4d) != (unsigned long)invalid_pud_table;
+}
+
+static inline void p4d_clear(p4d_t *p4dp)
+{
+ p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
+}
+
+static inline pud_t *p4d_pgtable(p4d_t p4d)
+{
+ return (pud_t *)p4d_val(p4d);
+}
+
+static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
+{
+ *p4d = p4dval;
+}
+
+#define p4d_phys(p4d) virt_to_phys((void *)p4d_val(p4d))
+#define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
+
+#endif
+
+#ifndef __PAGETABLE_PMD_FOLDED
+
+typedef struct { unsigned long pmd; } pmd_t;
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) })
+
+extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
+
+/*
+ * Empty pud entries point to the invalid_pmd_table.
+ */
+static inline int pud_none(pud_t pud)
+{
+ return pud_val(pud) == (unsigned long)invalid_pmd_table;
+}
+
+static inline int pud_bad(pud_t pud)
+{
+ return pud_val(pud) & ~PAGE_MASK;
+}
+
+static inline int pud_present(pud_t pud)
+{
+ return pud_val(pud) != (unsigned long)invalid_pmd_table;
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+ pud_val(*pudp) = ((unsigned long)invalid_pmd_table);
+}
+
+static inline pmd_t *pud_pgtable(pud_t pud)
+{
+ return (pmd_t *)pud_val(pud);
+}
+
+#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
+
+#define pud_phys(pud) virt_to_phys((void *)pud_val(pud))
+#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
+
+#endif
+
+/*
+ * Empty pmd entries point to the invalid_pte_table.
+ */
+static inline int pmd_none(pmd_t pmd)
+{
+ return pmd_val(pmd) == (unsigned long)invalid_pte_table;
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+ return (pmd_val(pmd) & ~PAGE_MASK);
+}
+
+static inline int pmd_present(pmd_t pmd)
+{
+ if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
+ return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE));
+
+ return pmd_val(pmd) != (unsigned long)invalid_pte_table;
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ pmd_val(*pmdp) = ((unsigned long)invalid_pte_table);
+}
+
+#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
+
+#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
+
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#define pmd_page_vaddr(pmd) pmd_val(pmd)
+
+extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
+extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
+
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define pte_pfn(x) ((unsigned long)(((x).pte & _PFN_MASK) >> _PFN_SHIFT))
+#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
+
+/*
+ * Initialize a new pgd / pmd table with invalid pointers.
+ */
+extern void pgd_init(unsigned long page);
+extern void pud_init(unsigned long page, unsigned long pagetable);
+extern void pmd_init(unsigned long page, unsigned long pagetable);
+
+/*
+ * Non-present pages: high 40 bits are offset, next 8 bits type,
+ * low 16 bits zero.
+ */
+static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
+{ pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
+
+#define __swp_type(x) (((x).val >> 16) & 0xff)
+#define __swp_offset(x) ((x).val >> 24)
+#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
+#define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE })
+
+extern void paging_init(void);
+
+#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
+#define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
+
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+ if (pte_val(pteval) & _PAGE_GLOBAL) {
+ pte_t *buddy = ptep_buddy(ptep);
+ /*
+ * Make sure the buddy is global too (if it's !none,
+ * it better already be global)
+ */
+#ifdef CONFIG_SMP
+ /*
+ * For SMP, multiple CPUs can race, so we need to do
+ * this atomically.
+ */
+ unsigned long page_global = _PAGE_GLOBAL;
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ "1:" __LL "%[tmp], %[buddy] \n"
+ " bnez %[tmp], 2f \n"
+ " or %[tmp], %[tmp], %[global] \n"
+ __SC "%[tmp], %[buddy] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ "2: \n"
+ __WEAK_LLSC_MB
+ : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
+ : [global] "r" (page_global));
+#else /* !CONFIG_SMP */
+ if (pte_none(*buddy))
+ pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
+ }
+}
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+{
+ set_pte(ptep, pteval);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ /* Preserve global status for the pair */
+ if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
+ set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
+ else
+ set_pte_at(mm, addr, ptep, __pte(0));
+}
+
+#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
+#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
+#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
+
+extern pgd_t swapper_pg_dir[];
+extern pgd_t invalid_pg_dir[];
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_ACCESSED;
+ return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_ACCESSED;
+ return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
+ return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte_val(pte) |= (_PAGE_DIRTY | _PAGE_MODIFIED);
+ return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ pte_val(pte) |= (_PAGE_WRITE | _PAGE_DIRTY);
+ return pte;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
+ return pte;
+}
+
+static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_HUGE;
+ return pte;
+}
+
+#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
+static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
+static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
+#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
+
+#define pte_accessible pte_accessible
+static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
+{
+ if (pte_val(a) & _PAGE_PRESENT)
+ return true;
+
+ if ((pte_val(a) & _PAGE_PROTNONE) &&
+ atomic_read(&mm->tlb_flush_pending))
+ return true;
+
+ return false;
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
+ (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
+}
+
+extern void __update_tlb(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep);
+
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ __update_tlb(vma, address, ptep);
+}
+
+static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ __update_tlb(vma, address, (pte_t *)pmdp);
+}
+
+#define kern_addr_valid(addr) (1)
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
+#define pmdp_establish generic_pmdp_establish
+
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd);
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+ pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) |
+ ((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
+ pmd_val(pmd) |= _PAGE_HUGE;
+
+ return pmd;
+}
+
+#define pmd_write pmd_write
+static inline int pmd_write(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_WRITE);
+}
+
+static inline pmd_t pmd_mkwrite(pmd_t pmd)
+{
+ pmd_val(pmd) |= (_PAGE_WRITE | _PAGE_DIRTY);
+ return pmd;
+}
+
+static inline pmd_t pmd_wrprotect(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
+ return pmd;
+}
+
+static inline int pmd_dirty(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_MODIFIED);
+}
+
+static inline pmd_t pmd_mkclean(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
+ return pmd;
+}
+
+static inline pmd_t pmd_mkdirty(pmd_t pmd)
+{
+ pmd_val(pmd) |= (_PAGE_DIRTY | _PAGE_MODIFIED);
+ return pmd;
+}
+
+static inline int pmd_young(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_ACCESSED);
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~_PAGE_ACCESSED;
+ return pmd;
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+ pmd_val(pmd) |= _PAGE_ACCESSED;
+ return pmd;
+}
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+ return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT;
+}
+
+static inline struct page *pmd_page(pmd_t pmd)
+{
+ if (pmd_trans_huge(pmd))
+ return pfn_to_page(pmd_pfn(pmd));
+
+ return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
+}
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
+ (pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
+ return pmd;
+}
+
+static inline pmd_t pmd_mkinvalid(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
+
+ return pmd;
+}
+
+/*
+ * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
+ * different prototype.
+ */
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t old = *pmdp;
+
+ pmd_clear(pmdp);
+
+ return old;
+}
+
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifdef CONFIG_NUMA_BALANCING
+static inline long pte_protnone(pte_t pte)
+{
+ return (pte_val(pte) & _PAGE_PROTNONE);
+}
+
+static inline long pmd_protnone(pmd_t pmd)
+{
+ return (pmd_val(pmd) & _PAGE_PROTNONE);
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+/*
+ * We provide our own get_unmapped area to cope with the virtual aliasing
+ * constraints placed on us by the cache architecture.
+ */
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_PGTABLE_H */
diff --git a/arch/loongarch/include/asm/prefetch.h b/arch/loongarch/include/asm/prefetch.h
new file mode 100644
index 000000000000..1672262a5e2e
--- /dev/null
+++ b/arch/loongarch/include/asm/prefetch.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_PREFETCH_H
+#define __ASM_PREFETCH_H
+
+#define Pref_Load 0
+#define Pref_Store 8
+
+#ifdef __ASSEMBLY__
+
+ .macro __pref hint addr
+#ifdef CONFIG_CPU_HAS_PREFETCH
+ preld \hint, \addr, 0
+#endif
+ .endm
+
+ .macro pref_load addr
+ __pref Pref_Load, \addr
+ .endm
+
+ .macro pref_store addr
+ __pref Pref_Store, \addr
+ .endm
+
+#endif
+
+#endif /* __ASM_PREFETCH_H */
diff --git a/arch/loongarch/include/asm/processor.h b/arch/loongarch/include/asm/processor.h
new file mode 100644
index 000000000000..1d63c934b289
--- /dev/null
+++ b/arch/loongarch/include/asm/processor.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_PROCESSOR_H
+#define _ASM_PROCESSOR_H
+
+#include <linux/atomic.h>
+#include <linux/cpumask.h>
+#include <linux/sizes.h>
+
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/loongarch.h>
+#include <asm/vdso/processor.h>
+#include <uapi/asm/ptrace.h>
+#include <uapi/asm/sigcontext.h>
+
+#ifdef CONFIG_32BIT
+
+#define TASK_SIZE 0x80000000UL
+#define TASK_SIZE_MIN TASK_SIZE
+#define STACK_TOP_MAX TASK_SIZE
+
+#define TASK_IS_32BIT_ADDR 1
+
+#endif
+
+#ifdef CONFIG_64BIT
+
+#define TASK_SIZE32 0x100000000UL
+#define TASK_SIZE64 (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits))
+
+#define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
+#define TASK_SIZE_MIN TASK_SIZE32
+#define STACK_TOP_MAX TASK_SIZE64
+
+#define TASK_SIZE_OF(tsk) \
+ (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
+
+#define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
+
+#endif
+
+#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
+
+unsigned long stack_top(void);
+#define STACK_TOP stack_top()
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
+
+#define FPU_REG_WIDTH 256
+#define FPU_ALIGN __attribute__((aligned(32)))
+
+union fpureg {
+ __u32 val32[FPU_REG_WIDTH / 32];
+ __u64 val64[FPU_REG_WIDTH / 64];
+};
+
+#define FPR_IDX(width, idx) (idx)
+
+#define BUILD_FPR_ACCESS(width) \
+static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
+{ \
+ return fpr->val##width[FPR_IDX(width, idx)]; \
+} \
+ \
+static inline void set_fpr##width(union fpureg *fpr, unsigned int idx, \
+ u##width val) \
+{ \
+ fpr->val##width[FPR_IDX(width, idx)] = val; \
+}
+
+BUILD_FPR_ACCESS(32)
+BUILD_FPR_ACCESS(64)
+
+struct loongarch_fpu {
+ unsigned int fcsr;
+ unsigned int vcsr;
+ uint64_t fcc; /* 8x8 */
+ union fpureg fpr[NUM_FPU_REGS];
+};
+
+#define INIT_CPUMASK { \
+ {0,} \
+}
+
+#define ARCH_MIN_TASKALIGN 32
+
+struct loongarch_vdso_info;
+
+/*
+ * If you change thread_struct remember to change the #defines below too!
+ */
+struct thread_struct {
+ /* Main processor registers. */
+ unsigned long reg01, reg03, reg22; /* ra sp fp */
+ unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */
+ unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */
+
+ /* CSR registers */
+ unsigned long csr_prmd;
+ unsigned long csr_crmd;
+ unsigned long csr_euen;
+ unsigned long csr_ecfg;
+ unsigned long csr_badvaddr; /* Last user fault */
+
+ /* Scratch registers */
+ unsigned long scr0;
+ unsigned long scr1;
+ unsigned long scr2;
+ unsigned long scr3;
+
+ /* Eflags register */
+ unsigned long eflags;
+
+ /* Other stuff associated with the thread. */
+ unsigned long trap_nr;
+ unsigned long error_code;
+ struct loongarch_vdso_info *vdso;
+
+ /*
+ * FPU & vector registers, must be at last because
+ * they are conditionally copied at fork().
+ */
+ struct loongarch_fpu fpu FPU_ALIGN;
+};
+
+#define INIT_THREAD { \
+ /* \
+ * Main processor registers \
+ */ \
+ .reg01 = 0, \
+ .reg03 = 0, \
+ .reg22 = 0, \
+ .reg23 = 0, \
+ .reg24 = 0, \
+ .reg25 = 0, \
+ .reg26 = 0, \
+ .reg27 = 0, \
+ .reg28 = 0, \
+ .reg29 = 0, \
+ .reg30 = 0, \
+ .reg31 = 0, \
+ .csr_crmd = 0, \
+ .csr_prmd = 0, \
+ .csr_euen = 0, \
+ .csr_ecfg = 0, \
+ .csr_badvaddr = 0, \
+ /* \
+ * Other stuff associated with the process \
+ */ \
+ .trap_nr = 0, \
+ .error_code = 0, \
+ /* \
+ * FPU & vector registers \
+ */ \
+ .fpu = { \
+ .fcsr = 0, \
+ .vcsr = 0, \
+ .fcc = 0, \
+ .fpr = {{{0,},},}, \
+ }, \
+}
+
+struct task_struct;
+
+/* Free all resources held by a thread. */
+#define release_thread(thread) do { } while (0)
+
+enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_HALT, IDLE_NOMWAIT, IDLE_POLL};
+
+extern unsigned long boot_option_idle_override;
+/*
+ * Do necessary setup to start up a newly executed thread.
+ */
+extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp);
+
+static inline void flush_thread(void)
+{
+}
+
+unsigned long __get_wchan(struct task_struct *p);
+
+#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
+ THREAD_SIZE - 32 - sizeof(struct pt_regs))
+#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->csr_era)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[3])
+#define KSTK_EUEN(tsk) (task_pt_regs(tsk)->csr_euen)
+#define KSTK_ECFG(tsk) (task_pt_regs(tsk)->csr_ecfg)
+
+#define return_address() ({__asm__ __volatile__("":::"$1"); __builtin_return_address(0);})
+
+#ifdef CONFIG_CPU_HAS_PREFETCH
+
+#define ARCH_HAS_PREFETCH
+#define prefetch(x) __builtin_prefetch((x), 0, 1)
+
+#define ARCH_HAS_PREFETCHW
+#define prefetchw(x) __builtin_prefetch((x), 1, 1)
+
+#endif
+
+#endif /* _ASM_PROCESSOR_H */
diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h
new file mode 100644
index 000000000000..17838c6b7ccd
--- /dev/null
+++ b/arch/loongarch/include/asm/ptrace.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_PTRACE_H
+#define _ASM_PTRACE_H
+
+#include <asm/page.h>
+#include <asm/thread_info.h>
+#include <uapi/asm/ptrace.h>
+
+/*
+ * This struct defines the way the registers are stored on the stack during
+ * a system call/exception. If you add a register here, please also add it to
+ * regoffset_table[] in arch/loongarch/kernel/ptrace.c.
+ */
+struct pt_regs {
+ /* Main processor registers. */
+ unsigned long regs[32];
+
+ /* Original syscall arg0. */
+ unsigned long orig_a0;
+
+ /* Special CSR registers. */
+ unsigned long csr_era;
+ unsigned long csr_badvaddr;
+ unsigned long csr_crmd;
+ unsigned long csr_prmd;
+ unsigned long csr_euen;
+ unsigned long csr_ecfg;
+ unsigned long csr_estat;
+ unsigned long __last[0];
+} __aligned(8);
+
+static inline int regs_irqs_disabled(struct pt_regs *regs)
+{
+ return arch_irqs_disabled_flags(regs->csr_prmd);
+}
+
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->regs[3];
+}
+
+/*
+ * Don't use asm-generic/ptrace.h it defines FP accessors that don't make
+ * sense on LoongArch. We rather want an error if they get invoked.
+ */
+
+static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long val)
+{
+ regs->csr_era = val;
+}
+
+/* Query offset/name of register from its name/offset */
+extern int regs_query_register_offset(const char *name);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten.
+ * @offset: offset number of the register.
+ *
+ * regs_get_register returns the value of a register. The @offset is the
+ * offset of the register in struct pt_regs address which specified by @regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static inline int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
+{
+ return ((addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
+
+struct task_struct;
+
+/*
+ * Does the process account for user or for system time?
+ */
+#define user_mode(regs) (((regs)->csr_prmd & PLV_MASK) == PLV_USER)
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ return regs->regs[4];
+}
+
+#define instruction_pointer(regs) ((regs)->csr_era)
+#define profile_pc(regs) instruction_pointer(regs)
+
+extern void die(const char *, struct pt_regs *) __noreturn;
+
+static inline void die_if_kernel(const char *str, struct pt_regs *regs)
+{
+ if (unlikely(!user_mode(regs)))
+ die(str, regs);
+}
+
+#define current_pt_regs() \
+({ \
+ unsigned long sp = (unsigned long)__builtin_frame_address(0); \
+ (struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1 - 32) - 1; \
+})
+
+/* Helpers for working with the user stack pointer */
+
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ return regs->regs[3];
+}
+
+static inline void user_stack_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->regs[3] = val;
+}
+
+#endif /* _ASM_PTRACE_H */
diff --git a/arch/loongarch/include/asm/reboot.h b/arch/loongarch/include/asm/reboot.h
new file mode 100644
index 000000000000..51151749d8f0
--- /dev/null
+++ b/arch/loongarch/include/asm/reboot.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_REBOOT_H
+#define _ASM_REBOOT_H
+
+extern void (*pm_restart)(void);
+
+#endif /* _ASM_REBOOT_H */
diff --git a/arch/loongarch/include/asm/regdef.h b/arch/loongarch/include/asm/regdef.h
new file mode 100644
index 000000000000..49a374c2612c
--- /dev/null
+++ b/arch/loongarch/include/asm/regdef.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_REGDEF_H
+#define _ASM_REGDEF_H
+
+#define zero $r0 /* wired zero */
+#define ra $r1 /* return address */
+#define tp $r2
+#define sp $r3 /* stack pointer */
+#define a0 $r4 /* argument registers, a0/a1 reused as v0/v1 for return value */
+#define a1 $r5
+#define a2 $r6
+#define a3 $r7
+#define a4 $r8
+#define a5 $r9
+#define a6 $r10
+#define a7 $r11
+#define t0 $r12 /* caller saved */
+#define t1 $r13
+#define t2 $r14
+#define t3 $r15
+#define t4 $r16
+#define t5 $r17
+#define t6 $r18
+#define t7 $r19
+#define t8 $r20
+#define u0 $r21
+#define fp $r22 /* frame pointer */
+#define s0 $r23 /* callee saved */
+#define s1 $r24
+#define s2 $r25
+#define s3 $r26
+#define s4 $r27
+#define s5 $r28
+#define s6 $r29
+#define s7 $r30
+#define s8 $r31
+
+#endif /* _ASM_REGDEF_H */
diff --git a/arch/loongarch/include/asm/seccomp.h b/arch/loongarch/include/asm/seccomp.h
new file mode 100644
index 000000000000..31d6ab42e43e
--- /dev/null
+++ b/arch/loongarch/include/asm/seccomp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm/unistd.h>
+
+#include <asm-generic/seccomp.h>
+
+#ifdef CONFIG_32BIT
+# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_LOONGARCH32
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "loongarch32"
+#else
+# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_LOONGARCH64
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "loongarch64"
+#endif
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/loongarch/include/asm/serial.h b/arch/loongarch/include/asm/serial.h
new file mode 100644
index 000000000000..3fb550eb9115
--- /dev/null
+++ b/arch/loongarch/include/asm/serial.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM__SERIAL_H
+#define __ASM__SERIAL_H
+
+#define BASE_BAUD 0
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
+
+#endif /* __ASM__SERIAL_H */
diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h
new file mode 100644
index 000000000000..6d7d2a3e23dd
--- /dev/null
+++ b/arch/loongarch/include/asm/setup.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _LOONGARCH_SETUP_H
+#define _LOONGARCH_SETUP_H
+
+#include <linux/types.h>
+#include <uapi/asm/setup.h>
+
+#define VECSIZE 0x200
+
+extern unsigned long eentry;
+extern unsigned long tlbrentry;
+extern void cpu_cache_init(void);
+extern void per_cpu_trap_init(int cpu);
+extern void set_handler(unsigned long offset, void *addr, unsigned long len);
+extern void set_merr_handler(unsigned long offset, void *addr, unsigned long len);
+
+#endif /* __SETUP_H */
diff --git a/arch/loongarch/include/asm/shmparam.h b/arch/loongarch/include/asm/shmparam.h
new file mode 100644
index 000000000000..c9554f48d2df
--- /dev/null
+++ b/arch/loongarch/include/asm/shmparam.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_SHMPARAM_H
+#define _ASM_SHMPARAM_H
+
+#define __ARCH_FORCE_SHMLBA 1
+
+#define SHMLBA SZ_64K /* attach addr a multiple of this */
+
+#endif /* _ASM_SHMPARAM_H */
diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
new file mode 100644
index 000000000000..71189b28bfb2
--- /dev/null
+++ b/arch/loongarch/include/asm/smp.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/linkage.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+
+extern int smp_num_siblings;
+extern int num_processors;
+extern int disabled_cpus;
+extern cpumask_t cpu_sibling_map[];
+extern cpumask_t cpu_core_map[];
+extern cpumask_t cpu_foreign_map[];
+
+void loongson3_smp_setup(void);
+void loongson3_prepare_cpus(unsigned int max_cpus);
+void loongson3_boot_secondary(int cpu, struct task_struct *idle);
+void loongson3_init_secondary(void);
+void loongson3_smp_finish(void);
+void loongson3_send_ipi_single(int cpu, unsigned int action);
+void loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action);
+#ifdef CONFIG_HOTPLUG_CPU
+int loongson3_cpu_disable(void);
+void loongson3_cpu_die(unsigned int cpu);
+#endif
+
+static inline void plat_smp_setup(void)
+{
+ loongson3_smp_setup();
+}
+
+static inline int raw_smp_processor_id(void)
+{
+#if defined(__VDSO__)
+ extern int vdso_smp_processor_id(void)
+ __compiletime_error("VDSO should not call smp_processor_id()");
+ return vdso_smp_processor_id();
+#else
+ return current_thread_info()->cpu;
+#endif
+}
+#define raw_smp_processor_id raw_smp_processor_id
+
+/* Map from cpu id to sequential logical cpu number. This will only
+ * not be idempotent when cpus failed to come on-line. */
+extern int __cpu_number_map[NR_CPUS];
+#define cpu_number_map(cpu) __cpu_number_map[cpu]
+
+/* The reverse map from sequential logical cpu number to cpu id. */
+extern int __cpu_logical_map[NR_CPUS];
+#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
+
+#define cpu_physical_id(cpu) cpu_logical_map(cpu)
+
+#define SMP_BOOT_CPU 0x1
+#define SMP_RESCHEDULE 0x2
+#define SMP_CALL_FUNCTION 0x4
+
+struct secondary_data {
+ unsigned long stack;
+ unsigned long thread_info;
+};
+extern struct secondary_data cpuboot_data;
+
+extern asmlinkage void smpboot_entry(void);
+
+extern void calculate_cpu_foreign_map(void);
+
+/*
+ * Generate IPI list text
+ */
+extern void show_ipi_list(struct seq_file *p, int prec);
+
+/*
+ * This function sends a 'reschedule' IPI to another CPU.
+ * it goes straight through and wastes no time serializing
+ * anything. Worst case is that we lose a reschedule ...
+ */
+static inline void smp_send_reschedule(int cpu)
+{
+ loongson3_send_ipi_single(cpu, SMP_RESCHEDULE);
+}
+
+static inline void arch_send_call_function_single_ipi(int cpu)
+{
+ loongson3_send_ipi_single(cpu, SMP_CALL_FUNCTION);
+}
+
+static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ loongson3_send_ipi_mask(mask, SMP_CALL_FUNCTION);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static inline int __cpu_disable(void)
+{
+ return loongson3_cpu_disable();
+}
+
+static inline void __cpu_die(unsigned int cpu)
+{
+ loongson3_cpu_die(cpu);
+}
+
+extern void play_dead(void);
+#endif
+
+#endif /* __ASM_SMP_H */
diff --git a/arch/loongarch/include/asm/sparsemem.h b/arch/loongarch/include/asm/sparsemem.h
new file mode 100644
index 000000000000..3d18cdf1b069
--- /dev/null
+++ b/arch/loongarch/include/asm/sparsemem.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LOONGARCH_SPARSEMEM_H
+#define _LOONGARCH_SPARSEMEM_H
+
+#ifdef CONFIG_SPARSEMEM
+
+/*
+ * SECTION_SIZE_BITS 2^N: how big each section will be
+ * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
+ */
+#define SECTION_SIZE_BITS 29 /* 2^29 = Largest Huge Page Size */
+#define MAX_PHYSMEM_BITS 48
+
+#endif /* CONFIG_SPARSEMEM */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int memory_add_physaddr_to_nid(u64 addr);
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+#endif
+
+#define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS)
+
+#endif /* _LOONGARCH_SPARSEMEM_H */
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
new file mode 100644
index 000000000000..4ca953062b5b
--- /dev/null
+++ b/arch/loongarch/include/asm/stackframe.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_STACKFRAME_H
+#define _ASM_STACKFRAME_H
+
+#include <linux/threads.h>
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/asm-offsets.h>
+#include <asm/loongarch.h>
+#include <asm/thread_info.h>
+
+/* Make the addition of cfi info a little easier. */
+ .macro cfi_rel_offset reg offset=0 docfi=0
+ .if \docfi
+ .cfi_rel_offset \reg, \offset
+ .endif
+ .endm
+
+ .macro cfi_st reg offset=0 docfi=0
+ cfi_rel_offset \reg, \offset, \docfi
+ LONG_S \reg, sp, \offset
+ .endm
+
+ .macro cfi_restore reg offset=0 docfi=0
+ .if \docfi
+ .cfi_restore \reg
+ .endif
+ .endm
+
+ .macro cfi_ld reg offset=0 docfi=0
+ LONG_L \reg, sp, \offset
+ cfi_restore \reg \offset \docfi
+ .endm
+
+ .macro BACKUP_T0T1
+ csrwr t0, EXCEPTION_KS0
+ csrwr t1, EXCEPTION_KS1
+ .endm
+
+ .macro RELOAD_T0T1
+ csrrd t0, EXCEPTION_KS0
+ csrrd t1, EXCEPTION_KS1
+ .endm
+
+ .macro SAVE_TEMP docfi=0
+ RELOAD_T0T1
+ cfi_st t0, PT_R12, \docfi
+ cfi_st t1, PT_R13, \docfi
+ cfi_st t2, PT_R14, \docfi
+ cfi_st t3, PT_R15, \docfi
+ cfi_st t4, PT_R16, \docfi
+ cfi_st t5, PT_R17, \docfi
+ cfi_st t6, PT_R18, \docfi
+ cfi_st t7, PT_R19, \docfi
+ cfi_st t8, PT_R20, \docfi
+ .endm
+
+ .macro SAVE_STATIC docfi=0
+ cfi_st s0, PT_R23, \docfi
+ cfi_st s1, PT_R24, \docfi
+ cfi_st s2, PT_R25, \docfi
+ cfi_st s3, PT_R26, \docfi
+ cfi_st s4, PT_R27, \docfi
+ cfi_st s5, PT_R28, \docfi
+ cfi_st s6, PT_R29, \docfi
+ cfi_st s7, PT_R30, \docfi
+ cfi_st s8, PT_R31, \docfi
+ .endm
+
+/*
+ * get_saved_sp returns the SP for the current CPU by looking in the
+ * kernelsp array for it. It stores the current sp in t0 and loads the
+ * new value in sp.
+ */
+ .macro get_saved_sp docfi=0
+ la.abs t1, kernelsp
+#ifdef CONFIG_SMP
+ csrrd t0, PERCPU_BASE_KS
+ LONG_ADD t1, t1, t0
+#endif
+ move t0, sp
+ .if \docfi
+ .cfi_register sp, t0
+ .endif
+ LONG_L sp, t1, 0
+ .endm
+
+ .macro set_saved_sp stackp temp temp2
+ la.abs \temp, kernelsp
+#ifdef CONFIG_SMP
+ LONG_ADD \temp, \temp, u0
+#endif
+ LONG_S \stackp, \temp, 0
+ .endm
+
+ .macro SAVE_SOME docfi=0
+ csrrd t1, LOONGARCH_CSR_PRMD
+ andi t1, t1, 0x3 /* extract pplv bit */
+ move t0, sp
+ beqz t1, 8f
+ /* Called from user mode, new stack. */
+ get_saved_sp docfi=\docfi
+8:
+ PTR_ADDI sp, sp, -PT_SIZE
+ .if \docfi
+ .cfi_def_cfa sp, 0
+ .endif
+ cfi_st t0, PT_R3, \docfi
+ cfi_rel_offset sp, PT_R3, \docfi
+ LONG_S zero, sp, PT_R0
+ csrrd t0, LOONGARCH_CSR_PRMD
+ LONG_S t0, sp, PT_PRMD
+ csrrd t0, LOONGARCH_CSR_CRMD
+ LONG_S t0, sp, PT_CRMD
+ csrrd t0, LOONGARCH_CSR_EUEN
+ LONG_S t0, sp, PT_EUEN
+ csrrd t0, LOONGARCH_CSR_ECFG
+ LONG_S t0, sp, PT_ECFG
+ csrrd t0, LOONGARCH_CSR_ESTAT
+ PTR_S t0, sp, PT_ESTAT
+ cfi_st ra, PT_R1, \docfi
+ cfi_st a0, PT_R4, \docfi
+ cfi_st a1, PT_R5, \docfi
+ cfi_st a2, PT_R6, \docfi
+ cfi_st a3, PT_R7, \docfi
+ cfi_st a4, PT_R8, \docfi
+ cfi_st a5, PT_R9, \docfi
+ cfi_st a6, PT_R10, \docfi
+ cfi_st a7, PT_R11, \docfi
+ csrrd ra, LOONGARCH_CSR_ERA
+ LONG_S ra, sp, PT_ERA
+ .if \docfi
+ .cfi_rel_offset ra, PT_ERA
+ .endif
+ cfi_st tp, PT_R2, \docfi
+ cfi_st fp, PT_R22, \docfi
+
+ /* Set thread_info if we're coming from user mode */
+ csrrd t0, LOONGARCH_CSR_PRMD
+ andi t0, t0, 0x3 /* extract pplv bit */
+ beqz t0, 9f
+
+ li.d tp, ~_THREAD_MASK
+ and tp, tp, sp
+ cfi_st u0, PT_R21, \docfi
+ csrrd u0, PERCPU_BASE_KS
+9:
+ .endm
+
+ .macro SAVE_ALL docfi=0
+ SAVE_SOME \docfi
+ SAVE_TEMP \docfi
+ SAVE_STATIC \docfi
+ .endm
+
+ .macro RESTORE_TEMP docfi=0
+ cfi_ld t0, PT_R12, \docfi
+ cfi_ld t1, PT_R13, \docfi
+ cfi_ld t2, PT_R14, \docfi
+ cfi_ld t3, PT_R15, \docfi
+ cfi_ld t4, PT_R16, \docfi
+ cfi_ld t5, PT_R17, \docfi
+ cfi_ld t6, PT_R18, \docfi
+ cfi_ld t7, PT_R19, \docfi
+ cfi_ld t8, PT_R20, \docfi
+ .endm
+
+ .macro RESTORE_STATIC docfi=0
+ cfi_ld s0, PT_R23, \docfi
+ cfi_ld s1, PT_R24, \docfi
+ cfi_ld s2, PT_R25, \docfi
+ cfi_ld s3, PT_R26, \docfi
+ cfi_ld s4, PT_R27, \docfi
+ cfi_ld s5, PT_R28, \docfi
+ cfi_ld s6, PT_R29, \docfi
+ cfi_ld s7, PT_R30, \docfi
+ cfi_ld s8, PT_R31, \docfi
+ .endm
+
+ .macro RESTORE_SOME docfi=0
+ LONG_L a0, sp, PT_PRMD
+ andi a0, a0, 0x3 /* extract pplv bit */
+ beqz a0, 8f
+ cfi_ld u0, PT_R21, \docfi
+8:
+ LONG_L a0, sp, PT_ERA
+ csrwr a0, LOONGARCH_CSR_ERA
+ LONG_L a0, sp, PT_PRMD
+ csrwr a0, LOONGARCH_CSR_PRMD
+ cfi_ld ra, PT_R1, \docfi
+ cfi_ld a0, PT_R4, \docfi
+ cfi_ld a1, PT_R5, \docfi
+ cfi_ld a2, PT_R6, \docfi
+ cfi_ld a3, PT_R7, \docfi
+ cfi_ld a4, PT_R8, \docfi
+ cfi_ld a5, PT_R9, \docfi
+ cfi_ld a6, PT_R10, \docfi
+ cfi_ld a7, PT_R11, \docfi
+ cfi_ld tp, PT_R2, \docfi
+ cfi_ld fp, PT_R22, \docfi
+ .endm
+
+ .macro RESTORE_SP_AND_RET docfi=0
+ cfi_ld sp, PT_R3, \docfi
+ ertn
+ .endm
+
+ .macro RESTORE_ALL_AND_RET docfi=0
+ RESTORE_STATIC \docfi
+ RESTORE_TEMP \docfi
+ RESTORE_SOME \docfi
+ RESTORE_SP_AND_RET \docfi
+ .endm
+
+#endif /* _ASM_STACKFRAME_H */
diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h
new file mode 100644
index 000000000000..26483e396ad1
--- /dev/null
+++ b/arch/loongarch/include/asm/stacktrace.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_STACKTRACE_H
+#define _ASM_STACKTRACE_H
+
+#include <asm/asm.h>
+#include <asm/ptrace.h>
+#include <asm/loongarch.h>
+#include <linux/stringify.h>
+
+#define STR_LONG_L __stringify(LONG_L)
+#define STR_LONG_S __stringify(LONG_S)
+#define STR_LONGSIZE __stringify(LONGSIZE)
+
+#define STORE_ONE_REG(r) \
+ STR_LONG_S " $r" __stringify(r)", %1, "STR_LONGSIZE"*"__stringify(r)"\n\t"
+
+#define CSRRD_ONE_REG(reg) \
+ __stringify(csrrd) " %0, "__stringify(reg)"\n\t"
+
+static __always_inline void prepare_frametrace(struct pt_regs *regs)
+{
+ __asm__ __volatile__(
+ /* Save $r1 */
+ STORE_ONE_REG(1)
+ /* Use $r1 to save PC */
+ "pcaddi $r1, 0\n\t"
+ STR_LONG_S " $r1, %0\n\t"
+ /* Restore $r1 */
+ STR_LONG_L " $r1, %1, "STR_LONGSIZE"\n\t"
+ STORE_ONE_REG(2)
+ STORE_ONE_REG(3)
+ STORE_ONE_REG(4)
+ STORE_ONE_REG(5)
+ STORE_ONE_REG(6)
+ STORE_ONE_REG(7)
+ STORE_ONE_REG(8)
+ STORE_ONE_REG(9)
+ STORE_ONE_REG(10)
+ STORE_ONE_REG(11)
+ STORE_ONE_REG(12)
+ STORE_ONE_REG(13)
+ STORE_ONE_REG(14)
+ STORE_ONE_REG(15)
+ STORE_ONE_REG(16)
+ STORE_ONE_REG(17)
+ STORE_ONE_REG(18)
+ STORE_ONE_REG(19)
+ STORE_ONE_REG(20)
+ STORE_ONE_REG(21)
+ STORE_ONE_REG(22)
+ STORE_ONE_REG(23)
+ STORE_ONE_REG(24)
+ STORE_ONE_REG(25)
+ STORE_ONE_REG(26)
+ STORE_ONE_REG(27)
+ STORE_ONE_REG(28)
+ STORE_ONE_REG(29)
+ STORE_ONE_REG(30)
+ STORE_ONE_REG(31)
+ : "=m" (regs->csr_era)
+ : "r" (regs->regs)
+ : "memory");
+ __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_BADV) : "=r" (regs->csr_badvaddr));
+ __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_CRMD) : "=r" (regs->csr_crmd));
+ __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_PRMD) : "=r" (regs->csr_prmd));
+ __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_EUEN) : "=r" (regs->csr_euen));
+ __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_ECFG) : "=r" (regs->csr_ecfg));
+ __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_ESTAT) : "=r" (regs->csr_estat));
+}
+
+#endif /* _ASM_STACKTRACE_H */
diff --git a/arch/loongarch/include/asm/string.h b/arch/loongarch/include/asm/string.h
new file mode 100644
index 000000000000..b07e60ded957
--- /dev/null
+++ b/arch/loongarch/include/asm/string.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_STRING_H
+#define _ASM_STRING_H
+
+extern void *memset(void *__s, int __c, size_t __count);
+extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+
+#endif /* _ASM_STRING_H */
diff --git a/arch/loongarch/include/asm/switch_to.h b/arch/loongarch/include/asm/switch_to.h
new file mode 100644
index 000000000000..2a8d04375574
--- /dev/null
+++ b/arch/loongarch/include/asm/switch_to.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_SWITCH_TO_H
+#define _ASM_SWITCH_TO_H
+
+#include <asm/cpu-features.h>
+#include <asm/fpu.h>
+
+struct task_struct;
+
+/**
+ * __switch_to - switch execution of a task
+ * @prev: The task previously executed.
+ * @next: The task to begin executing.
+ * @next_ti: task_thread_info(next).
+ *
+ * This function is used whilst scheduling to save the context of prev & load
+ * the context of next. Returns prev.
+ */
+extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev,
+ struct task_struct *next, struct thread_info *next_ti);
+
+/*
+ * For newly created kernel threads switch_to() will return to
+ * ret_from_kernel_thread, newly created user threads to ret_from_fork.
+ * That is, everything following __switch_to() will be skipped for new threads.
+ * So everything that matters to new threads should be placed before __switch_to().
+ */
+#define switch_to(prev, next, last) \
+do { \
+ lose_fpu_inatomic(1, prev); \
+ (last) = __switch_to(prev, next, task_thread_info(next)); \
+} while (0)
+
+#endif /* _ASM_SWITCH_TO_H */
diff --git a/arch/loongarch/include/asm/syscall.h b/arch/loongarch/include/asm/syscall.h
new file mode 100644
index 000000000000..e286dc58476e
--- /dev/null
+++ b/arch/loongarch/include/asm/syscall.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGARCH_SYSCALL_H
+#define __ASM_LOONGARCH_SYSCALL_H
+
+#include <linux/compiler.h>
+#include <uapi/linux/audit.h>
+#include <linux/elf-em.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/unistd.h>
+
+extern void *sys_call_table[];
+
+static inline long syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->regs[11];
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->regs[4] = regs->orig_a0;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ unsigned long error = regs->regs[4];
+
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->regs[4];
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->regs[4] = (long) error ? error : val;
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ args[0] = regs->orig_a0;
+ memcpy(&args[1], &regs->regs[5], 5 * sizeof(long));
+}
+
+static inline int syscall_get_arch(struct task_struct *task)
+{
+ return AUDIT_ARCH_LOONGARCH64;
+}
+
+static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
+{
+ return false;
+}
+
+#endif /* __ASM_LOONGARCH_SYSCALL_H */
diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h
new file mode 100644
index 000000000000..99beb11c2fa8
--- /dev/null
+++ b/arch/loongarch/include/asm/thread_info.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * thread_info.h: LoongArch low-level thread information
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <asm/processor.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ unsigned long flags; /* low level flags */
+ unsigned long tp_value; /* thread pointer */
+ __u32 cpu; /* current CPU */
+ int preempt_count; /* 0 => preemptible, <0 => BUG */
+ struct pt_regs *regs;
+ unsigned long syscall; /* syscall number */
+ unsigned long syscall_work; /* SYSCALL_WORK_ flags */
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+}
+
+/* How to get the thread information struct from C. */
+register struct thread_info *__current_thread_info __asm__("$r2");
+
+static inline struct thread_info *current_thread_info(void)
+{
+ return __current_thread_info;
+}
+
+register unsigned long current_stack_pointer __asm__("$r3");
+
+#endif /* !__ASSEMBLY__ */
+
+/* thread information allocation */
+#define THREAD_SIZE SZ_16K
+#define THREAD_MASK (THREAD_SIZE - 1UL)
+#define THREAD_SIZE_ORDER ilog2(THREAD_SIZE / PAGE_SIZE)
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SIGPENDING 1 /* signal pending */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_NOTIFY_RESUME 3 /* callback before returning to user */
+#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */
+#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
+#define TIF_NOHZ 6 /* in adaptive nohz mode */
+#define TIF_UPROBE 7 /* breakpointed or singlestepping */
+#define TIF_USEDFPU 8 /* FPU was used by this task this quantum (SMP) */
+#define TIF_USEDSIMD 9 /* SIMD has been used this quantum */
+#define TIF_MEMDIE 10 /* is terminating due to OOM killer */
+#define TIF_FIXADE 11 /* Fix address errors in software */
+#define TIF_LOGADE 12 /* Log address errors to syslog */
+#define TIF_32BIT_REGS 13 /* 32-bit general purpose registers */
+#define TIF_32BIT_ADDR 14 /* 32-bit address space */
+#define TIF_LOAD_WATCH 15 /* If set, load watch registers */
+#define TIF_SINGLESTEP 16 /* Single Step */
+#define TIF_LSX_CTX_LIVE 17 /* LSX context must be preserved */
+#define TIF_LASX_CTX_LIVE 18 /* LASX context must be preserved */
+
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
+#define _TIF_NOHZ (1<<TIF_NOHZ)
+#define _TIF_UPROBE (1<<TIF_UPROBE)
+#define _TIF_USEDFPU (1<<TIF_USEDFPU)
+#define _TIF_USEDSIMD (1<<TIF_USEDSIMD)
+#define _TIF_FIXADE (1<<TIF_FIXADE)
+#define _TIF_LOGADE (1<<TIF_LOGADE)
+#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS)
+#define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
+#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
+#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
+#define _TIF_LSX_CTX_LIVE (1<<TIF_LSX_CTX_LIVE)
+#define _TIF_LASX_CTX_LIVE (1<<TIF_LASX_CTX_LIVE)
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/loongarch/include/asm/time.h b/arch/loongarch/include/asm/time.h
new file mode 100644
index 000000000000..2eae219301d0
--- /dev/null
+++ b/arch/loongarch/include/asm/time.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_TIME_H
+#define _ASM_TIME_H
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <asm/loongarch.h>
+
+extern u64 cpu_clock_freq;
+extern u64 const_clock_freq;
+
+extern void sync_counter(void);
+
+static inline unsigned int calc_const_freq(void)
+{
+ unsigned int res;
+ unsigned int base_freq;
+ unsigned int cfm, cfd;
+
+ res = read_cpucfg(LOONGARCH_CPUCFG2);
+ if (!(res & CPUCFG2_LLFTP))
+ return 0;
+
+ base_freq = read_cpucfg(LOONGARCH_CPUCFG4);
+ res = read_cpucfg(LOONGARCH_CPUCFG5);
+ cfm = res & 0xffff;
+ cfd = (res >> 16) & 0xffff;
+
+ if (!base_freq || !cfm || !cfd)
+ return 0;
+
+ return (base_freq * cfm / cfd);
+}
+
+/*
+ * Initialize the calling CPU's timer interrupt as clockevent device
+ */
+extern int constant_clockevent_init(void);
+extern int constant_clocksource_init(void);
+
+static inline void clockevent_set_clock(struct clock_event_device *cd,
+ unsigned int clock)
+{
+ clockevents_calc_mult_shift(cd, clock, 4);
+}
+
+#endif /* _ASM_TIME_H */
diff --git a/arch/loongarch/include/asm/timex.h b/arch/loongarch/include/asm/timex.h
new file mode 100644
index 000000000000..fb41e9e7a222
--- /dev/null
+++ b/arch/loongarch/include/asm/timex.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_TIMEX_H
+#define _ASM_TIMEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+
+typedef unsigned long cycles_t;
+
+#define get_cycles get_cycles
+
+static inline cycles_t get_cycles(void)
+{
+ return drdtime();
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_TIMEX_H */
diff --git a/arch/loongarch/include/asm/tlb.h b/arch/loongarch/include/asm/tlb.h
new file mode 100644
index 000000000000..4f629ae9d5a9
--- /dev/null
+++ b/arch/loongarch/include/asm/tlb.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_TLB_H
+#define __ASM_TLB_H
+
+#include <linux/mm_types.h>
+#include <asm/cpu-features.h>
+#include <asm/loongarch.h>
+
+/*
+ * TLB Invalidate Flush
+ */
+static inline void tlbclr(void)
+{
+ __asm__ __volatile__("tlbclr");
+}
+
+static inline void tlbflush(void)
+{
+ __asm__ __volatile__("tlbflush");
+}
+
+/*
+ * TLB R/W operations.
+ */
+static inline void tlb_probe(void)
+{
+ __asm__ __volatile__("tlbsrch");
+}
+
+static inline void tlb_read(void)
+{
+ __asm__ __volatile__("tlbrd");
+}
+
+static inline void tlb_write_indexed(void)
+{
+ __asm__ __volatile__("tlbwr");
+}
+
+static inline void tlb_write_random(void)
+{
+ __asm__ __volatile__("tlbfill");
+}
+
+enum invtlb_ops {
+ /* Invalid all tlb */
+ INVTLB_ALL = 0x0,
+ /* Invalid current tlb */
+ INVTLB_CURRENT_ALL = 0x1,
+ /* Invalid all global=1 lines in current tlb */
+ INVTLB_CURRENT_GTRUE = 0x2,
+ /* Invalid all global=0 lines in current tlb */
+ INVTLB_CURRENT_GFALSE = 0x3,
+ /* Invalid global=0 and matched asid lines in current tlb */
+ INVTLB_GFALSE_AND_ASID = 0x4,
+ /* Invalid addr with global=0 and matched asid in current tlb */
+ INVTLB_ADDR_GFALSE_AND_ASID = 0x5,
+ /* Invalid addr with global=1 or matched asid in current tlb */
+ INVTLB_ADDR_GTRUE_OR_ASID = 0x6,
+ /* Invalid matched gid in guest tlb */
+ INVGTLB_GID = 0x9,
+ /* Invalid global=1, matched gid in guest tlb */
+ INVGTLB_GID_GTRUE = 0xa,
+ /* Invalid global=0, matched gid in guest tlb */
+ INVGTLB_GID_GFALSE = 0xb,
+ /* Invalid global=0, matched gid and asid in guest tlb */
+ INVGTLB_GID_GFALSE_ASID = 0xc,
+ /* Invalid global=0 , matched gid, asid and addr in guest tlb */
+ INVGTLB_GID_GFALSE_ASID_ADDR = 0xd,
+ /* Invalid global=1 , matched gid, asid and addr in guest tlb */
+ INVGTLB_GID_GTRUE_ASID_ADDR = 0xe,
+ /* Invalid all gid gva-->gpa guest tlb */
+ INVGTLB_ALLGID_GVA_TO_GPA = 0x10,
+ /* Invalid all gid gpa-->hpa tlb */
+ INVTLB_ALLGID_GPA_TO_HPA = 0x11,
+ /* Invalid all gid tlb, including gva-->gpa and gpa-->hpa */
+ INVTLB_ALLGID = 0x12,
+ /* Invalid matched gid gva-->gpa guest tlb */
+ INVGTLB_GID_GVA_TO_GPA = 0x13,
+ /* Invalid matched gid gpa-->hpa tlb */
+ INVTLB_GID_GPA_TO_HPA = 0x14,
+ /* Invalid matched gid tlb,including gva-->gpa and gpa-->hpa */
+ INVTLB_GID_ALL = 0x15,
+ /* Invalid matched gid and addr gpa-->hpa tlb */
+ INVTLB_GID_ADDR = 0x16,
+};
+
+/*
+ * invtlb op info addr
+ * (0x1 << 26) | (0x24 << 20) | (0x13 << 15) |
+ * (addr << 10) | (info << 5) | op
+ */
+static inline void invtlb(u32 op, u32 info, u64 addr)
+{
+ __asm__ __volatile__(
+ "parse_r addr,%0\n\t"
+ "parse_r info,%1\n\t"
+ ".word ((0x6498000) | (addr << 10) | (info << 5) | %2)\n\t"
+ :
+ : "r"(addr), "r"(info), "i"(op)
+ :
+ );
+}
+
+static inline void invtlb_addr(u32 op, u32 info, u64 addr)
+{
+ __asm__ __volatile__(
+ "parse_r addr,%0\n\t"
+ ".word ((0x6498000) | (addr << 10) | (0 << 5) | %1)\n\t"
+ :
+ : "r"(addr), "i"(op)
+ :
+ );
+}
+
+static inline void invtlb_info(u32 op, u32 info, u64 addr)
+{
+ __asm__ __volatile__(
+ "parse_r info,%0\n\t"
+ ".word ((0x6498000) | (0 << 10) | (info << 5) | %1)\n\t"
+ :
+ : "r"(info), "i"(op)
+ :
+ );
+}
+
+static inline void invtlb_all(u32 op, u32 info, u64 addr)
+{
+ __asm__ __volatile__(
+ ".word ((0x6498000) | (0 << 10) | (0 << 5) | %0)\n\t"
+ :
+ : "i"(op)
+ :
+ );
+}
+
+/*
+ * LoongArch doesn't need any special per-pte or per-vma handling, except
+ * we need to flush cache for area to be unmapped.
+ */
+#define tlb_start_vma(tlb, vma) \
+ do { \
+ if (!(tlb)->fullmm) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+ } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+
+static void tlb_flush(struct mmu_gather *tlb);
+
+#define tlb_flush tlb_flush
+#include <asm-generic/tlb.h>
+
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ struct vm_area_struct vma;
+
+ vma.vm_mm = tlb->mm;
+ vma.vm_flags = 0;
+ if (tlb->fullmm) {
+ flush_tlb_mm(tlb->mm);
+ return;
+ }
+
+ flush_tlb_range(&vma, tlb->start, tlb->end);
+}
+
+extern void handle_tlb_load(void);
+extern void handle_tlb_store(void);
+extern void handle_tlb_modify(void);
+extern void handle_tlb_refill(void);
+extern void handle_tlb_protect(void);
+
+extern void dump_tlb_all(void);
+extern void dump_tlb_regs(void);
+
+#endif /* __ASM_TLB_H */
diff --git a/arch/loongarch/include/asm/tlbflush.h b/arch/loongarch/include/asm/tlbflush.h
new file mode 100644
index 000000000000..a0785e590681
--- /dev/null
+++ b/arch/loongarch/include/asm/tlbflush.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_TLBFLUSH_H
+#define __ASM_TLBFLUSH_H
+
+#include <linux/mm.h>
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb_all() flushes all processes TLB entries
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ */
+extern void local_flush_tlb_all(void);
+extern void local_flush_tlb_user(void);
+extern void local_flush_tlb_kernel(void);
+extern void local_flush_tlb_mm(struct mm_struct *mm);
+extern void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void local_flush_tlb_one(unsigned long vaddr);
+
+#ifdef CONFIG_SMP
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long, unsigned long);
+extern void flush_tlb_kernel_range(unsigned long, unsigned long);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_one(unsigned long vaddr);
+
+#else /* CONFIG_SMP */
+
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end)
+#define flush_tlb_kernel_range(vmaddr, end) local_flush_tlb_kernel_range(vmaddr, end)
+#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
+#define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr)
+
+#endif /* CONFIG_SMP */
+
+#endif /* __ASM_TLBFLUSH_H */
diff --git a/arch/loongarch/include/asm/topology.h b/arch/loongarch/include/asm/topology.h
new file mode 100644
index 000000000000..66128dec0bf6
--- /dev/null
+++ b/arch/loongarch/include/asm/topology.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_TOPOLOGY_H
+#define __ASM_TOPOLOGY_H
+
+#include <linux/smp.h>
+
+#ifdef CONFIG_NUMA
+
+extern cpumask_t cpus_on_node[];
+
+#define cpumask_of_node(node) (&cpus_on_node[node])
+
+struct pci_bus;
+extern int pcibus_to_node(struct pci_bus *);
+
+#define cpumask_of_pcibus(bus) (cpu_online_mask)
+
+extern unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES];
+
+void numa_set_distance(int from, int to, int distance);
+
+#define node_distance(from, to) (node_distances[(from)][(to)])
+
+#else
+#define pcibus_to_node(bus) 0
+#endif
+
+#ifdef CONFIG_SMP
+#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
+#define topology_core_id(cpu) (cpu_data[cpu].core)
+#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
+#define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu])
+#endif
+
+#include <asm-generic/topology.h>
+
+static inline void arch_fix_phys_package_id(int num, u32 slot) { }
+#endif /* __ASM_TOPOLOGY_H */
diff --git a/arch/loongarch/include/asm/types.h b/arch/loongarch/include/asm/types.h
new file mode 100644
index 000000000000..baf15a0dcf8b
--- /dev/null
+++ b/arch/loongarch/include/asm/types.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_TYPES_H
+#define _ASM_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
+
+#ifdef __ASSEMBLY__
+#define _ULCAST_
+#define _U64CAST_
+#else
+#define _ULCAST_ (unsigned long)
+#define _U64CAST_ (u64)
+#endif
+
+#endif /* _ASM_TYPES_H */
diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h
new file mode 100644
index 000000000000..217c6a3727b1
--- /dev/null
+++ b/arch/loongarch/include/asm/uaccess.h
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2007 Maciej W. Rozycki
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ */
+#ifndef _ASM_UACCESS_H
+#define _ASM_UACCESS_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/extable.h>
+#include <asm/pgtable.h>
+#include <asm-generic/extable.h>
+#include <asm-generic/access_ok.h>
+
+extern u64 __ua_limit;
+
+#define __UA_ADDR ".dword"
+#define __UA_LA "la.abs"
+#define __UA_LIMIT __ua_limit
+
+/*
+ * get_user: - Get a simple variable from user space.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define get_user(x, ptr) \
+({ \
+ const __typeof__(*(ptr)) __user *__p = (ptr); \
+ \
+ might_fault(); \
+ access_ok(__p, sizeof(*__p)) ? __get_user((x), __p) : \
+ ((x) = 0, -EFAULT); \
+})
+
+/*
+ * put_user: - Write a simple value into user space.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __user *__p = (ptr); \
+ \
+ might_fault(); \
+ access_ok(__p, sizeof(*__p)) ? __put_user((x), __p) : -EFAULT; \
+})
+
+/*
+ * __get_user: - Get a simple variable from user space, with less checking.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define __get_user(x, ptr) \
+({ \
+ int __gu_err = 0; \
+ \
+ __chk_user_ptr(ptr); \
+ __get_user_common((x), sizeof(*(ptr)), ptr); \
+ __gu_err; \
+})
+
+/*
+ * __put_user: - Write a simple value into user space, with less checking.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define __put_user(x, ptr) \
+({ \
+ int __pu_err = 0; \
+ __typeof__(*(ptr)) __pu_val; \
+ \
+ __pu_val = (x); \
+ __chk_user_ptr(ptr); \
+ __put_user_common(ptr, sizeof(*(ptr))); \
+ __pu_err; \
+})
+
+struct __large_struct { unsigned long buf[100]; };
+#define __m(x) (*(struct __large_struct __user *)(x))
+
+#define __get_user_common(val, size, ptr) \
+do { \
+ switch (size) { \
+ case 1: __get_data_asm(val, "ld.b", ptr); break; \
+ case 2: __get_data_asm(val, "ld.h", ptr); break; \
+ case 4: __get_data_asm(val, "ld.w", ptr); break; \
+ case 8: __get_data_asm(val, "ld.d", ptr); break; \
+ default: BUILD_BUG(); break; \
+ } \
+} while (0)
+
+#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
+
+#define __get_data_asm(val, insn, ptr) \
+{ \
+ long __gu_tmp; \
+ \
+ __asm__ __volatile__( \
+ "1: " insn " %1, %2 \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ "3: li.w %0, %3 \n" \
+ " or %1, $r0, $r0 \n" \
+ " b 2b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "__UA_ADDR "\t1b, 3b \n" \
+ " .previous \n" \
+ : "+r" (__gu_err), "=r" (__gu_tmp) \
+ : "m" (__m(ptr)), "i" (-EFAULT)); \
+ \
+ (val) = (__typeof__(*(ptr))) __gu_tmp; \
+}
+
+#define __put_user_common(ptr, size) \
+do { \
+ switch (size) { \
+ case 1: __put_data_asm("st.b", ptr); break; \
+ case 2: __put_data_asm("st.h", ptr); break; \
+ case 4: __put_data_asm("st.w", ptr); break; \
+ case 8: __put_data_asm("st.d", ptr); break; \
+ default: BUILD_BUG(); break; \
+ } \
+} while (0)
+
+#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
+
+#define __put_data_asm(insn, ptr) \
+{ \
+ __asm__ __volatile__( \
+ "1: " insn " %z2, %1 # __put_user_asm\n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ "3: li.w %0, %3 \n" \
+ " b 2b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " " __UA_ADDR " 1b, 3b \n" \
+ " .previous \n" \
+ : "+r" (__pu_err), "=m" (__m(ptr)) \
+ : "Jr" (__pu_val), "i" (-EFAULT)); \
+}
+
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ int __gu_err = 0; \
+ \
+ __get_kernel_common(*((type *)(dst)), sizeof(type), \
+ (__force type *)(src)); \
+ if (unlikely(__gu_err)) \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ type __pu_val; \
+ int __pu_err = 0; \
+ \
+ __pu_val = *(__force type *)(src); \
+ __put_kernel_common(((type *)(dst)), sizeof(type)); \
+ if (unlikely(__pu_err)) \
+ goto err_label; \
+} while (0)
+
+extern unsigned long __copy_user(void *to, const void *from, __kernel_size_t n);
+
+static inline unsigned long __must_check
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ return __copy_user(to, from, n);
+}
+
+static inline unsigned long __must_check
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ return __copy_user(to, from, n);
+}
+
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+
+/*
+ * __clear_user: - Zero a block of memory in user space, with less checking.
+ * @addr: Destination address, in user space.
+ * @size: Number of bytes to zero.
+ *
+ * Zero a block of memory in user space. Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be cleared.
+ * On success, this will be zero.
+ */
+extern unsigned long __clear_user(void __user *addr, __kernel_size_t size);
+
+#define clear_user(addr, n) \
+({ \
+ void __user *__cl_addr = (addr); \
+ unsigned long __cl_size = (n); \
+ if (__cl_size && access_ok(__cl_addr, __cl_size)) \
+ __cl_size = __clear_user(__cl_addr, __cl_size); \
+ __cl_size; \
+})
+
+extern long strncpy_from_user(char *to, const char __user *from, long n);
+extern long strnlen_user(const char __user *str, long n);
+
+#endif /* _ASM_UACCESS_H */
diff --git a/arch/loongarch/include/asm/unistd.h b/arch/loongarch/include/asm/unistd.h
new file mode 100644
index 000000000000..cfddb0116a8c
--- /dev/null
+++ b/arch/loongarch/include/asm/unistd.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <uapi/asm/unistd.h>
+
+#define NR_syscalls (__NR_syscalls)
diff --git a/arch/loongarch/include/asm/vdso.h b/arch/loongarch/include/asm/vdso.h
new file mode 100644
index 000000000000..8f8a0f9a4953
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_VDSO_H
+#define __ASM_VDSO_H
+
+#include <linux/mm_types.h>
+#include <vdso/datapage.h>
+
+#include <asm/barrier.h>
+
+/*
+ * struct loongarch_vdso_info - Details of a VDSO image.
+ * @vdso: Pointer to VDSO image (page-aligned).
+ * @size: Size of the VDSO image (page-aligned).
+ * @off_rt_sigreturn: Offset of the rt_sigreturn() trampoline.
+ * @code_mapping: Special mapping structure for vdso code.
+ * @code_mapping: Special mapping structure for vdso data.
+ *
+ * This structure contains details of a VDSO image, including the image data
+ * and offsets of certain symbols required by the kernel. It is generated as
+ * part of the VDSO build process, aside from the mapping page array, which is
+ * populated at runtime.
+ */
+struct loongarch_vdso_info {
+ void *vdso;
+ unsigned long size;
+ unsigned long offset_sigreturn;
+ struct vm_special_mapping code_mapping;
+ struct vm_special_mapping data_mapping;
+};
+
+extern struct loongarch_vdso_info vdso_info;
+
+#endif /* __ASM_VDSO_H */
diff --git a/arch/loongarch/include/asm/vdso/clocksource.h b/arch/loongarch/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000000..13cd580d406d
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_CPU
+
+#endif /* __ASM_VDSOCLOCKSOURCE_H */
diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h
new file mode 100644
index 000000000000..7b2cd37641e2
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/unistd.h>
+#include <asm/vdso/vdso.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+static __always_inline long gettimeofday_fallback(
+ struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct __kernel_old_timeval *tv asm("a0") = _tv;
+ register struct timezone *tz asm("a1") = _tz;
+ register long nr asm("a7") = __NR_gettimeofday;
+ register long ret asm("a0");
+
+ asm volatile(
+ " syscall 0\n"
+ : "+r" (ret)
+ : "r" (nr), "r" (tv), "r" (tz)
+ : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
+ "$t8", "memory");
+
+ return ret;
+}
+
+static __always_inline long clock_gettime_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long nr asm("a7") = __NR_clock_gettime;
+ register long ret asm("a0");
+
+ asm volatile(
+ " syscall 0\n"
+ : "+r" (ret)
+ : "r" (nr), "r" (clkid), "r" (ts)
+ : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
+ "$t8", "memory");
+
+ return ret;
+}
+
+static __always_inline int clock_getres_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long nr asm("a7") = __NR_clock_getres;
+ register long ret asm("a0");
+
+ asm volatile(
+ " syscall 0\n"
+ : "+r" (ret)
+ : "r" (nr), "r" (clkid), "r" (ts)
+ : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
+ "$t8", "memory");
+
+ return ret;
+}
+
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
+ const struct vdso_data *vd)
+{
+ uint64_t count;
+
+ __asm__ __volatile__(
+ " rdtime.d %0, $zero\n"
+ : "=r" (count));
+
+ return count;
+}
+
+static inline bool loongarch_vdso_hres_capable(void)
+{
+ return true;
+}
+#define __arch_vdso_hres_capable loongarch_vdso_hres_capable
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+ return get_vdso_data();
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/loongarch/include/asm/vdso/processor.h b/arch/loongarch/include/asm/vdso/processor.h
new file mode 100644
index 000000000000..ef5770b343a0
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/processor.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#define cpu_relax() barrier()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/loongarch/include/asm/vdso/vdso.h b/arch/loongarch/include/asm/vdso/vdso.h
new file mode 100644
index 000000000000..5a01643a65b3
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/vdso.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <asm/asm.h>
+#include <asm/page.h>
+
+static inline unsigned long get_vdso_base(void)
+{
+ unsigned long addr;
+
+ __asm__(
+ " la.pcrel %0, _start\n"
+ : "=r" (addr)
+ :
+ :);
+
+ return addr;
+}
+
+static inline const struct vdso_data *get_vdso_data(void)
+{
+ return (const struct vdso_data *)(get_vdso_base() - PAGE_SIZE);
+}
+
+#endif /* __ASSEMBLY__ */
diff --git a/arch/loongarch/include/asm/vdso/vsyscall.h b/arch/loongarch/include/asm/vdso/vsyscall.h
new file mode 100644
index 000000000000..5de615383a22
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/vsyscall.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+
+extern struct vdso_data *vdso_data;
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline
+struct vdso_data *__loongarch_get_k_vdso_data(void)
+{
+ return vdso_data;
+}
+#define __arch_get_k_vdso_data __loongarch_get_k_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/loongarch/include/asm/vermagic.h b/arch/loongarch/include/asm/vermagic.h
new file mode 100644
index 000000000000..8b47ccfe3aad
--- /dev/null
+++ b/arch/loongarch/include/asm/vermagic.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#define MODULE_PROC_FAMILY "LOONGARCH "
+
+#ifdef CONFIG_32BIT
+#define MODULE_KERNEL_TYPE "32BIT "
+#elif defined CONFIG_64BIT
+#define MODULE_KERNEL_TYPE "64BIT "
+#endif
+
+#define MODULE_ARCH_VERMAGIC \
+ MODULE_PROC_FAMILY MODULE_KERNEL_TYPE
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/loongarch/include/asm/vmalloc.h b/arch/loongarch/include/asm/vmalloc.h
new file mode 100644
index 000000000000..965a0d41ac2d
--- /dev/null
+++ b/arch/loongarch/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_LOONGARCH_VMALLOC_H
+#define _ASM_LOONGARCH_VMALLOC_H
+
+#endif /* _ASM_LOONGARCH_VMALLOC_H */
diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/loongarch/include/uapi/asm/Kbuild
new file mode 100644
index 000000000000..4aa680ca2e5f
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+generic-y += kvm_para.h
diff --git a/arch/loongarch/include/uapi/asm/auxvec.h b/arch/loongarch/include/uapi/asm/auxvec.h
new file mode 100644
index 000000000000..922d9e6b5058
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/auxvec.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_AUXVEC_H
+#define __ASM_AUXVEC_H
+
+/* Location of VDSO image. */
+#define AT_SYSINFO_EHDR 33
+
+#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
+
+#endif /* __ASM_AUXVEC_H */
diff --git a/arch/loongarch/include/uapi/asm/bitsperlong.h b/arch/loongarch/include/uapi/asm/bitsperlong.h
new file mode 100644
index 000000000000..00b4ba1e5cdf
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_LOONGARCH_BITSPERLONG_H
+#define __ASM_LOONGARCH_BITSPERLONG_H
+
+#define __BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_LOONGARCH_BITSPERLONG_H */
diff --git a/arch/loongarch/include/uapi/asm/break.h b/arch/loongarch/include/uapi/asm/break.h
new file mode 100644
index 000000000000..bb9b82ba59f2
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/break.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __UAPI_ASM_BREAK_H
+#define __UAPI_ASM_BREAK_H
+
+#define BRK_DEFAULT 0 /* Used as default */
+#define BRK_BUG 1 /* Used by BUG() */
+#define BRK_KDB 2 /* Used in KDB_ENTER() */
+#define BRK_MATHEMU 3 /* Used by FPU emulator */
+#define BRK_USERBP 4 /* User bp (used by debuggers) */
+#define BRK_SSTEPBP 5 /* User bp (used by debuggers) */
+#define BRK_OVERFLOW 6 /* Overflow check */
+#define BRK_DIVZERO 7 /* Divide by zero check */
+#define BRK_RANGE 8 /* Range error check */
+#define BRK_MULOVFL 9 /* Multiply overflow */
+#define BRK_KPROBE_BP 10 /* Kprobe break */
+#define BRK_KPROBE_SSTEPBP 11 /* Kprobe single step break */
+#define BRK_UPROBE_BP 12 /* See <asm/uprobes.h> */
+#define BRK_UPROBE_XOLBP 13 /* See <asm/uprobes.h> */
+
+#endif /* __UAPI_ASM_BREAK_H */
diff --git a/arch/loongarch/include/uapi/asm/byteorder.h b/arch/loongarch/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000000..b1722d890deb
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/byteorder.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_BYTEORDER_H
+#define _ASM_BYTEORDER_H
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* _ASM_BYTEORDER_H */
diff --git a/arch/loongarch/include/uapi/asm/hwcap.h b/arch/loongarch/include/uapi/asm/hwcap.h
new file mode 100644
index 000000000000..8840b72fa8e8
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/hwcap.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_HWCAP_H
+#define _UAPI_ASM_HWCAP_H
+
+/* HWCAP flags */
+#define HWCAP_LOONGARCH_CPUCFG (1 << 0)
+#define HWCAP_LOONGARCH_LAM (1 << 1)
+#define HWCAP_LOONGARCH_UAL (1 << 2)
+#define HWCAP_LOONGARCH_FPU (1 << 3)
+#define HWCAP_LOONGARCH_LSX (1 << 4)
+#define HWCAP_LOONGARCH_LASX (1 << 5)
+#define HWCAP_LOONGARCH_CRC32 (1 << 6)
+#define HWCAP_LOONGARCH_COMPLEX (1 << 7)
+#define HWCAP_LOONGARCH_CRYPTO (1 << 8)
+#define HWCAP_LOONGARCH_LVZ (1 << 9)
+#define HWCAP_LOONGARCH_LBT_X86 (1 << 10)
+#define HWCAP_LOONGARCH_LBT_ARM (1 << 11)
+#define HWCAP_LOONGARCH_LBT_MIPS (1 << 12)
+
+#endif /* _UAPI_ASM_HWCAP_H */
diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000000..083193f4a5d5
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/ptrace.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _UAPI_ASM_PTRACE_H
+#define _UAPI_ASM_PTRACE_H
+
+#include <linux/types.h>
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#endif
+
+/*
+ * For PTRACE_{POKE,PEEK}USR. 0 - 31 are GPRs,
+ * 32 is syscall's original ARG0, 33 is PC, 34 is BADVADDR.
+ */
+#define GPR_BASE 0
+#define GPR_NUM 32
+#define GPR_END (GPR_BASE + GPR_NUM - 1)
+#define ARG0 (GPR_END + 1)
+#define PC (GPR_END + 2)
+#define BADVADDR (GPR_END + 3)
+
+#define NUM_FPU_REGS 32
+
+struct user_pt_regs {
+ /* Main processor registers. */
+ unsigned long regs[32];
+
+ /* Original syscall arg0. */
+ unsigned long orig_a0;
+
+ /* Special CSR registers. */
+ unsigned long csr_era;
+ unsigned long csr_badv;
+ unsigned long reserved[10];
+} __attribute__((aligned(8)));
+
+struct user_fp_state {
+ uint64_t fpr[32];
+ uint64_t fcc;
+ uint32_t fcsr;
+};
+
+#define PTRACE_SYSEMU 0x1f
+#define PTRACE_SYSEMU_SINGLESTEP 0x20
+
+#endif /* _UAPI_ASM_PTRACE_H */
diff --git a/arch/loongarch/include/uapi/asm/reg.h b/arch/loongarch/include/uapi/asm/reg.h
new file mode 100644
index 000000000000..90ad910c60eb
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/reg.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Various register offset definitions for debuggers, core file
+ * examiners and whatnot.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __UAPI_ASM_LOONGARCH_REG_H
+#define __UAPI_ASM_LOONGARCH_REG_H
+
+#define LOONGARCH_EF_R0 0
+#define LOONGARCH_EF_R1 1
+#define LOONGARCH_EF_R2 2
+#define LOONGARCH_EF_R3 3
+#define LOONGARCH_EF_R4 4
+#define LOONGARCH_EF_R5 5
+#define LOONGARCH_EF_R6 6
+#define LOONGARCH_EF_R7 7
+#define LOONGARCH_EF_R8 8
+#define LOONGARCH_EF_R9 9
+#define LOONGARCH_EF_R10 10
+#define LOONGARCH_EF_R11 11
+#define LOONGARCH_EF_R12 12
+#define LOONGARCH_EF_R13 13
+#define LOONGARCH_EF_R14 14
+#define LOONGARCH_EF_R15 15
+#define LOONGARCH_EF_R16 16
+#define LOONGARCH_EF_R17 17
+#define LOONGARCH_EF_R18 18
+#define LOONGARCH_EF_R19 19
+#define LOONGARCH_EF_R20 20
+#define LOONGARCH_EF_R21 21
+#define LOONGARCH_EF_R22 22
+#define LOONGARCH_EF_R23 23
+#define LOONGARCH_EF_R24 24
+#define LOONGARCH_EF_R25 25
+#define LOONGARCH_EF_R26 26
+#define LOONGARCH_EF_R27 27
+#define LOONGARCH_EF_R28 28
+#define LOONGARCH_EF_R29 29
+#define LOONGARCH_EF_R30 30
+#define LOONGARCH_EF_R31 31
+
+/*
+ * Saved special registers
+ */
+#define LOONGARCH_EF_ORIG_A0 32
+#define LOONGARCH_EF_CSR_ERA 33
+#define LOONGARCH_EF_CSR_BADV 34
+#define LOONGARCH_EF_CSR_CRMD 35
+#define LOONGARCH_EF_CSR_PRMD 36
+#define LOONGARCH_EF_CSR_EUEN 37
+#define LOONGARCH_EF_CSR_ECFG 38
+#define LOONGARCH_EF_CSR_ESTAT 39
+
+#define LOONGARCH_EF_SIZE 320 /* size in bytes */
+
+#endif /* __UAPI_ASM_LOONGARCH_REG_H */
diff --git a/arch/loongarch/include/uapi/asm/sigcontext.h b/arch/loongarch/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000000..52e49b8bf4be
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/sigcontext.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _UAPI_ASM_SIGCONTEXT_H
+#define _UAPI_ASM_SIGCONTEXT_H
+
+#include <linux/types.h>
+#include <linux/posix_types.h>
+
+/* FP context was used */
+#define SC_USED_FP (1 << 0)
+/* Address error was due to memory load */
+#define SC_ADDRERR_RD (1 << 30)
+/* Address error was due to memory store */
+#define SC_ADDRERR_WR (1 << 31)
+
+struct sigcontext {
+ __u64 sc_pc;
+ __u64 sc_regs[32];
+ __u32 sc_flags;
+ __u64 sc_extcontext[0] __attribute__((__aligned__(16)));
+};
+
+#define CONTEXT_INFO_ALIGN 16
+struct sctx_info {
+ __u32 magic;
+ __u32 size;
+ __u64 padding; /* padding to 16 bytes */
+};
+
+/* FPU context */
+#define FPU_CTX_MAGIC 0x46505501
+#define FPU_CTX_ALIGN 8
+struct fpu_context {
+ __u64 regs[32];
+ __u64 fcc;
+ __u32 fcsr;
+};
+
+#endif /* _UAPI_ASM_SIGCONTEXT_H */
diff --git a/arch/loongarch/include/uapi/asm/signal.h b/arch/loongarch/include/uapi/asm/signal.h
new file mode 100644
index 000000000000..992d965aa13f
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/signal.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _UAPI_ASM_SIGNAL_H
+#define _UAPI_ASM_SIGNAL_H
+
+#define MINSIGSTKSZ 4096
+#define SIGSTKSZ 16384
+
+#include <asm-generic/signal.h>
+
+#endif
diff --git a/arch/loongarch/include/uapi/asm/ucontext.h b/arch/loongarch/include/uapi/asm/ucontext.h
new file mode 100644
index 000000000000..12577e22b1c7
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/ucontext.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __LOONGARCH_UAPI_ASM_UCONTEXT_H
+#define __LOONGARCH_UAPI_ASM_UCONTEXT_H
+
+/**
+ * struct ucontext - user context structure
+ * @uc_flags:
+ * @uc_link:
+ * @uc_stack:
+ * @uc_mcontext: holds basic processor state
+ * @uc_sigmask:
+ * @uc_extcontext: holds extended processor state
+ */
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ sigset_t uc_sigmask;
+ /* There's some padding here to allow sigset_t to be expanded in the
+ * future. Though this is unlikely, other architectures put uc_sigmask
+ * at the end of this structure and explicitly state it can be
+ * expanded, so we didn't want to box ourselves in here. */
+ __u8 __unused[1024 / 8 - sizeof(sigset_t)];
+ /* We can't put uc_sigmask at the end of this structure because we need
+ * to be able to expand sigcontext in the future. For example, the
+ * vector ISA extension will almost certainly add ISA state. We want
+ * to ensure all user-visible ISA state can be saved and restored via a
+ * ucontext, so we're putting this at the end in order to allow for
+ * infinite extensibility. Since we know this will be extended and we
+ * assume sigset_t won't be extended an extreme amount, we're
+ * prioritizing this. */
+ struct sigcontext uc_mcontext;
+};
+
+#endif /* __LOONGARCH_UAPI_ASM_UCONTEXT_H */
diff --git a/arch/loongarch/include/uapi/asm/unistd.h b/arch/loongarch/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..fcb668984f03
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/unistd.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
+
+#include <asm-generic/unistd.h>