diff options
Diffstat (limited to 'arch/loongarch/kernel')
| -rw-r--r-- | arch/loongarch/kernel/Makefile.syscalls | 1 | ||||
| -rw-r--r-- | arch/loongarch/kernel/cpu-probe.c | 13 | ||||
| -rw-r--r-- | arch/loongarch/kernel/efi-header.S | 4 | ||||
| -rw-r--r-- | arch/loongarch/kernel/efi.c | 4 | ||||
| -rw-r--r-- | arch/loongarch/kernel/entry.S | 22 | ||||
| -rw-r--r-- | arch/loongarch/kernel/env.c | 5 | ||||
| -rw-r--r-- | arch/loongarch/kernel/fpu.S | 111 | ||||
| -rw-r--r-- | arch/loongarch/kernel/head.S | 39 | ||||
| -rw-r--r-- | arch/loongarch/kernel/module-sections.c | 1 | ||||
| -rw-r--r-- | arch/loongarch/kernel/module.c | 204 | ||||
| -rw-r--r-- | arch/loongarch/kernel/proc.c | 10 | ||||
| -rw-r--r-- | arch/loongarch/kernel/process.c | 11 | ||||
| -rw-r--r-- | arch/loongarch/kernel/ptrace.c | 5 | ||||
| -rw-r--r-- | arch/loongarch/kernel/relocate.c | 13 | ||||
| -rw-r--r-- | arch/loongarch/kernel/setup.c | 8 | ||||
| -rw-r--r-- | arch/loongarch/kernel/switch.S | 28 | ||||
| -rw-r--r-- | arch/loongarch/kernel/syscall.c | 15 | ||||
| -rw-r--r-- | arch/loongarch/kernel/time.c | 31 | ||||
| -rw-r--r-- | arch/loongarch/kernel/traps.c | 15 | ||||
| -rw-r--r-- | arch/loongarch/kernel/unaligned.c | 30 |
20 files changed, 432 insertions, 138 deletions
diff --git a/arch/loongarch/kernel/Makefile.syscalls b/arch/loongarch/kernel/Makefile.syscalls index ab7d9baa2915..cd46c2b69c7f 100644 --- a/arch/loongarch/kernel/Makefile.syscalls +++ b/arch/loongarch/kernel/Makefile.syscalls @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 # No special ABIs on loongarch so far +syscall_abis_32 += syscall_abis_64 += diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c index a2060a24b39f..08a227034042 100644 --- a/arch/loongarch/kernel/cpu-probe.c +++ b/arch/loongarch/kernel/cpu-probe.c @@ -106,7 +106,11 @@ EXPORT_SYMBOL(vm_map_base); static void cpu_probe_addrbits(struct cpuinfo_loongarch *c) { -#ifdef __NEED_ADDRBITS_PROBE +#ifdef CONFIG_32BIT + c->pabits = cpu_pabits; + c->vabits = cpu_vabits; + vm_map_base = KVRANGE; +#else c->pabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_PABITS) >> 4; c->vabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_VABITS) >> 12; vm_map_base = 0UL - (1UL << c->vabits); @@ -298,8 +302,15 @@ static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int return; } +#ifdef CONFIG_64BIT *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR); *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME); +#else + *vendor = iocsr_read32(LOONGARCH_IOCSR_VENDOR) | + (u64)iocsr_read32(LOONGARCH_IOCSR_VENDOR + 4) << 32; + *cpuname = iocsr_read32(LOONGARCH_IOCSR_CPUNAME) | + (u64)iocsr_read32(LOONGARCH_IOCSR_CPUNAME + 4) << 32; +#endif if (!__cpu_full_name[cpu]) { if (((char *)vendor)[0] == 0) diff --git a/arch/loongarch/kernel/efi-header.S b/arch/loongarch/kernel/efi-header.S index ba0bdbf86aa8..6df56241cb95 100644 --- a/arch/loongarch/kernel/efi-header.S +++ b/arch/loongarch/kernel/efi-header.S @@ -9,7 +9,11 @@ .macro __EFI_PE_HEADER .long IMAGE_NT_SIGNATURE .Lcoff_header: +#ifdef CONFIG_32BIT + .short IMAGE_FILE_MACHINE_LOONGARCH32 /* Machine */ +#else .short IMAGE_FILE_MACHINE_LOONGARCH64 /* Machine */ +#endif .short .Lsection_count /* NumberOfSections */ .long 0 /* TimeDateStamp */ .long 0 /* PointerToSymbolTable */ diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index 860a3bc030e0..52c21c895318 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -115,7 +115,9 @@ void __init efi_init(void) efi_systab_report_header(&efi_systab->hdr, efi_systab->fw_vendor); - set_bit(EFI_64BIT, &efi.flags); + if (IS_ENABLED(CONFIG_64BIT)) + set_bit(EFI_64BIT, &efi.flags); + efi_nr_tables = efi_systab->nr_tables; efi_config_table = (unsigned long)efi_systab->tables; diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S index 47e1db9a1ce4..b53d333a7c42 100644 --- a/arch/loongarch/kernel/entry.S +++ b/arch/loongarch/kernel/entry.S @@ -23,24 +23,24 @@ SYM_CODE_START(handle_syscall) UNWIND_HINT_UNDEFINED csrrd t0, PERCPU_BASE_KS la.pcrel t1, kernelsp - add.d t1, t1, t0 + PTR_ADD t1, t1, t0 move t2, sp - ld.d sp, t1, 0 + PTR_L sp, t1, 0 - addi.d sp, sp, -PT_SIZE + PTR_ADDI sp, sp, -PT_SIZE cfi_st t2, PT_R3 cfi_rel_offset sp, PT_R3 - st.d zero, sp, PT_R0 + LONG_S zero, sp, PT_R0 csrrd t2, LOONGARCH_CSR_PRMD - st.d t2, sp, PT_PRMD + LONG_S t2, sp, PT_PRMD csrrd t2, LOONGARCH_CSR_CRMD - st.d t2, sp, PT_CRMD + LONG_S t2, sp, PT_CRMD csrrd t2, LOONGARCH_CSR_EUEN - st.d t2, sp, PT_EUEN + LONG_S t2, sp, PT_EUEN csrrd t2, LOONGARCH_CSR_ECFG - st.d t2, sp, PT_ECFG + LONG_S t2, sp, PT_ECFG csrrd t2, LOONGARCH_CSR_ESTAT - st.d t2, sp, PT_ESTAT + LONG_S t2, sp, PT_ESTAT cfi_st ra, PT_R1 cfi_st a0, PT_R4 cfi_st a1, PT_R5 @@ -51,7 +51,7 @@ SYM_CODE_START(handle_syscall) cfi_st a6, PT_R10 cfi_st a7, PT_R11 csrrd ra, LOONGARCH_CSR_ERA - st.d ra, sp, PT_ERA + LONG_S ra, sp, PT_ERA cfi_rel_offset ra, PT_ERA cfi_st tp, PT_R2 @@ -67,7 +67,7 @@ SYM_CODE_START(handle_syscall) #endif move u0, t0 - li.d tp, ~_THREAD_MASK + LONG_LI tp, ~_THREAD_MASK and tp, tp, sp move a0, sp diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c index 23bd5ae2212c..841206fde3ab 100644 --- a/arch/loongarch/kernel/env.c +++ b/arch/loongarch/kernel/env.c @@ -72,9 +72,12 @@ static int __init fdt_cpu_clk_init(void) clk = of_clk_get(np, 0); of_node_put(np); + cpu_clock_freq = 200 * 1000 * 1000; - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + pr_warn("No valid CPU clock freq, assume 200MHz.\n"); return -ENODEV; + } cpu_clock_freq = clk_get_rate(clk); clk_put(clk); diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index 28caf416ae36..f225dcc5b530 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -96,6 +96,49 @@ EX fld.d $f31, \base, (31 * FPU_REG_WIDTH) .endm +#ifdef CONFIG_32BIT + .macro sc_save_fcc thread tmp0 tmp1 + movcf2gr \tmp0, $fcc0 + move \tmp1, \tmp0 + movcf2gr \tmp0, $fcc1 + bstrins.w \tmp1, \tmp0, 15, 8 + movcf2gr \tmp0, $fcc2 + bstrins.w \tmp1, \tmp0, 23, 16 + movcf2gr \tmp0, $fcc3 + bstrins.w \tmp1, \tmp0, 31, 24 + EX st.w \tmp1, \thread, THREAD_FCC + movcf2gr \tmp0, $fcc4 + move \tmp1, \tmp0 + movcf2gr \tmp0, $fcc5 + bstrins.w \tmp1, \tmp0, 15, 8 + movcf2gr \tmp0, $fcc6 + bstrins.w \tmp1, \tmp0, 23, 16 + movcf2gr \tmp0, $fcc7 + bstrins.w \tmp1, \tmp0, 31, 24 + EX st.w \tmp1, \thread, (THREAD_FCC + 4) + .endm + + .macro sc_restore_fcc thread tmp0 tmp1 + EX ld.w \tmp0, \thread, THREAD_FCC + bstrpick.w \tmp1, \tmp0, 7, 0 + movgr2cf $fcc0, \tmp1 + bstrpick.w \tmp1, \tmp0, 15, 8 + movgr2cf $fcc1, \tmp1 + bstrpick.w \tmp1, \tmp0, 23, 16 + movgr2cf $fcc2, \tmp1 + bstrpick.w \tmp1, \tmp0, 31, 24 + movgr2cf $fcc3, \tmp1 + EX ld.w \tmp0, \thread, (THREAD_FCC + 4) + bstrpick.w \tmp1, \tmp0, 7, 0 + movgr2cf $fcc4, \tmp1 + bstrpick.w \tmp1, \tmp0, 15, 8 + movgr2cf $fcc5, \tmp1 + bstrpick.w \tmp1, \tmp0, 23, 16 + movgr2cf $fcc6, \tmp1 + bstrpick.w \tmp1, \tmp0, 31, 24 + movgr2cf $fcc7, \tmp1 + .endm +#else .macro sc_save_fcc base, tmp0, tmp1 movcf2gr \tmp0, $fcc0 move \tmp1, \tmp0 @@ -135,6 +178,7 @@ bstrpick.d \tmp1, \tmp0, 63, 56 movgr2cf $fcc7, \tmp1 .endm +#endif .macro sc_save_fcsr base, tmp0 movfcsr2gr \tmp0, fcsr0 @@ -410,6 +454,72 @@ SYM_FUNC_START(_init_fpu) li.w t1, -1 # SNaN +#ifdef CONFIG_32BIT + movgr2fr.w $f0, t1 + movgr2frh.w $f0, t1 + movgr2fr.w $f1, t1 + movgr2frh.w $f1, t1 + movgr2fr.w $f2, t1 + movgr2frh.w $f2, t1 + movgr2fr.w $f3, t1 + movgr2frh.w $f3, t1 + movgr2fr.w $f4, t1 + movgr2frh.w $f4, t1 + movgr2fr.w $f5, t1 + movgr2frh.w $f5, t1 + movgr2fr.w $f6, t1 + movgr2frh.w $f6, t1 + movgr2fr.w $f7, t1 + movgr2frh.w $f7, t1 + movgr2fr.w $f8, t1 + movgr2frh.w $f8, t1 + movgr2fr.w $f9, t1 + movgr2frh.w $f9, t1 + movgr2fr.w $f10, t1 + movgr2frh.w $f10, t1 + movgr2fr.w $f11, t1 + movgr2frh.w $f11, t1 + movgr2fr.w $f12, t1 + movgr2frh.w $f12, t1 + movgr2fr.w $f13, t1 + movgr2frh.w $f13, t1 + movgr2fr.w $f14, t1 + movgr2frh.w $f14, t1 + movgr2fr.w $f15, t1 + movgr2frh.w $f15, t1 + movgr2fr.w $f16, t1 + movgr2frh.w $f16, t1 + movgr2fr.w $f17, t1 + movgr2frh.w $f17, t1 + movgr2fr.w $f18, t1 + movgr2frh.w $f18, t1 + movgr2fr.w $f19, t1 + movgr2frh.w $f19, t1 + movgr2fr.w $f20, t1 + movgr2frh.w $f20, t1 + movgr2fr.w $f21, t1 + movgr2frh.w $f21, t1 + movgr2fr.w $f22, t1 + movgr2frh.w $f22, t1 + movgr2fr.w $f23, t1 + movgr2frh.w $f23, t1 + movgr2fr.w $f24, t1 + movgr2frh.w $f24, t1 + movgr2fr.w $f25, t1 + movgr2frh.w $f25, t1 + movgr2fr.w $f26, t1 + movgr2frh.w $f26, t1 + movgr2fr.w $f27, t1 + movgr2frh.w $f27, t1 + movgr2fr.w $f28, t1 + movgr2frh.w $f28, t1 + movgr2fr.w $f29, t1 + movgr2frh.w $f29, t1 + movgr2fr.w $f30, t1 + movgr2frh.w $f30, t1 + movgr2fr.w $f31, t1 + movgr2frh.w $f31, t1 +#else movgr2fr.d $f0, t1 movgr2fr.d $f1, t1 movgr2fr.d $f2, t1 @@ -442,6 +552,7 @@ SYM_FUNC_START(_init_fpu) movgr2fr.d $f29, t1 movgr2fr.d $f30, t1 movgr2fr.d $f31, t1 +#endif jr ra SYM_FUNC_END(_init_fpu) diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S index e3865e92a917..aba548db2446 100644 --- a/arch/loongarch/kernel/head.S +++ b/arch/loongarch/kernel/head.S @@ -43,36 +43,29 @@ SYM_DATA(kernel_fsize, .long _kernel_fsize); SYM_CODE_START(kernel_entry) # kernel entry point - /* Config direct window and set PG */ - SETUP_DMWINS t0 + SETUP_TWINS + SETUP_MODES t0 JUMP_VIRT_ADDR t0, t1 - - /* Enable PG */ - li.w t0, 0xb0 # PLV=0, IE=0, PG=1 - csrwr t0, LOONGARCH_CSR_CRMD - li.w t0, 0x04 # PLV=0, PIE=1, PWE=0 - csrwr t0, LOONGARCH_CSR_PRMD - li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0 - csrwr t0, LOONGARCH_CSR_EUEN + SETUP_DMWINS t0 la.pcrel t0, __bss_start # clear .bss - st.d zero, t0, 0 + LONG_S zero, t0, 0 la.pcrel t1, __bss_stop - LONGSIZE 1: - addi.d t0, t0, LONGSIZE - st.d zero, t0, 0 + PTR_ADDI t0, t0, LONGSIZE + LONG_S zero, t0, 0 bne t0, t1, 1b la.pcrel t0, fw_arg0 - st.d a0, t0, 0 # firmware arguments + PTR_S a0, t0, 0 # firmware arguments la.pcrel t0, fw_arg1 - st.d a1, t0, 0 + PTR_S a1, t0, 0 la.pcrel t0, fw_arg2 - st.d a2, t0, 0 + PTR_S a2, t0, 0 #ifdef CONFIG_PAGE_SIZE_4KB - li.d t0, 0 - li.d t1, CSR_STFILL + LONG_LI t0, 0 + LONG_LI t1, CSR_STFILL csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1 #endif /* KSave3 used for percpu base, initialized as 0 */ @@ -98,7 +91,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point /* Jump to the new kernel: new_pc = current_pc + random_offset */ pcaddi t0, 0 - add.d t0, t0, a0 + PTR_ADD t0, t0, a0 jirl zero, t0, 0xc #endif /* CONFIG_RANDOMIZE_BASE */ @@ -121,12 +114,14 @@ SYM_CODE_END(kernel_entry) */ SYM_CODE_START(smpboot_entry) - SETUP_DMWINS t0 + SETUP_TWINS + SETUP_MODES t0 JUMP_VIRT_ADDR t0, t1 + SETUP_DMWINS t0 #ifdef CONFIG_PAGE_SIZE_4KB - li.d t0, 0 - li.d t1, CSR_STFILL + LONG_LI t0, 0 + LONG_LI t1, CSR_STFILL csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1 #endif /* Enable PG */ diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c index a43ba7f9f987..9fa1c9814fcc 100644 --- a/arch/loongarch/kernel/module-sections.c +++ b/arch/loongarch/kernel/module-sections.c @@ -93,6 +93,7 @@ static void count_max_entries(Elf_Rela *relas, int num, (*plts)++; break; case R_LARCH_GOT_PC_HI20: + case R_LARCH_GOT_PCADD_HI20: (*gots)++; break; default: diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c index 36d6d9eeb7c7..7d4d571ee55e 100644 --- a/arch/loongarch/kernel/module.c +++ b/arch/loongarch/kernel/module.c @@ -22,72 +22,89 @@ #include <asm/inst.h> #include <asm/unwind.h> -static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top) +/* + * reloc_rela_handler() - Apply a particular relocation to a module + * @mod: the module to apply the reloc to + * @location: the address at which the reloc is to be applied + * @v: the value of the reloc, with addend for RELA-style + * @rela_stack: the stack used for store relocation info, LOCAL to THIS module + * @rela_stac_top: where the stack operation(pop/push) applies to + * + * Return: 0 upon success, else -ERRNO + */ +typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v, + long *rela_stack, size_t *rela_stack_top, unsigned int type); + +static int rela_stack_push(long stack_value, long *rela_stack, size_t *rela_stack_top) { if (*rela_stack_top >= RELA_STACK_DEPTH) return -ENOEXEC; rela_stack[(*rela_stack_top)++] = stack_value; - pr_debug("%s stack_value = 0x%llx\n", __func__, stack_value); + pr_debug("%s stack_value = 0x%lx\n", __func__, stack_value); return 0; } -static int rela_stack_pop(s64 *stack_value, s64 *rela_stack, size_t *rela_stack_top) +static int rela_stack_pop(long *stack_value, long *rela_stack, size_t *rela_stack_top) { if (*rela_stack_top == 0) return -ENOEXEC; *stack_value = rela_stack[--(*rela_stack_top)]; - pr_debug("%s stack_value = 0x%llx\n", __func__, *stack_value); + pr_debug("%s stack_value = 0x%lx\n", __func__, *stack_value); return 0; } static int apply_r_larch_none(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { return 0; } static int apply_r_larch_error(struct module *me, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { pr_err("%s: Unsupport relocation type %u, please add its support.\n", me->name, type); return -EINVAL; } static int apply_r_larch_32(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { *location = v; return 0; } +#ifdef CONFIG_32BIT +#define apply_r_larch_64 apply_r_larch_error +#else static int apply_r_larch_64(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { *(Elf_Addr *)location = v; return 0; } +#endif static int apply_r_larch_sop_push_pcrel(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { - return rela_stack_push(v - (u64)location, rela_stack, rela_stack_top); + return rela_stack_push(v - (unsigned long)location, rela_stack, rela_stack_top); } static int apply_r_larch_sop_push_absolute(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { return rela_stack_push(v, rela_stack, rela_stack_top); } static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { int err = 0; - s64 opr1; + long opr1; err = rela_stack_pop(&opr1, rela_stack, rela_stack_top); if (err) @@ -104,7 +121,7 @@ static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Add static int apply_r_larch_sop_push_plt_pcrel(struct module *mod, Elf_Shdr *sechdrs, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { ptrdiff_t offset = (void *)v - (void *)location; @@ -118,10 +135,10 @@ static int apply_r_larch_sop_push_plt_pcrel(struct module *mod, } static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { int err = 0; - s64 opr1, opr2, opr3; + long opr1, opr2, opr3; if (type == R_LARCH_SOP_IF_ELSE) { err = rela_stack_pop(&opr3, rela_stack, rela_stack_top); @@ -164,10 +181,10 @@ static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v, } static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { int err = 0; - s64 opr1; + long opr1; union loongarch_instruction *insn = (union loongarch_instruction *)location; err = rela_stack_pop(&opr1, rela_stack, rela_stack_top); @@ -244,31 +261,33 @@ static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Ad } overflow: - pr_err("module %s: opr1 = 0x%llx overflow! dangerous %s (%u) relocation\n", + pr_err("module %s: opr1 = 0x%lx overflow! dangerous %s (%u) relocation\n", mod->name, opr1, __func__, type); return -ENOEXEC; unaligned: - pr_err("module %s: opr1 = 0x%llx unaligned! dangerous %s (%u) relocation\n", + pr_err("module %s: opr1 = 0x%lx unaligned! dangerous %s (%u) relocation\n", mod->name, opr1, __func__, type); return -ENOEXEC; } static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { switch (type) { case R_LARCH_ADD32: *(s32 *)location += v; return 0; - case R_LARCH_ADD64: - *(s64 *)location += v; - return 0; case R_LARCH_SUB32: *(s32 *)location -= v; return 0; +#ifdef CONFIG_64BIT + case R_LARCH_ADD64: + *(s64 *)location += v; + return 0; case R_LARCH_SUB64: *(s64 *)location -= v; +#endif return 0; default: pr_err("%s: Unsupport relocation type %u\n", mod->name, type); @@ -278,7 +297,7 @@ static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v, static int apply_r_larch_b26(struct module *mod, Elf_Shdr *sechdrs, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { ptrdiff_t offset = (void *)v - (void *)location; union loongarch_instruction *insn = (union loongarch_instruction *)location; @@ -310,15 +329,40 @@ static int apply_r_larch_b26(struct module *mod, return 0; } +static int apply_r_larch_pcadd(struct module *mod, u32 *location, Elf_Addr v, + long *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + union loongarch_instruction *insn = (union loongarch_instruction *)location; + /* Use s32 for a sign-extension deliberately. */ + s32 offset_hi20 = (void *)((v + 0x800)) - (void *)((Elf_Addr)location); + + switch (type) { + case R_LARCH_PCADD_LO12: + insn->reg2i12_format.immediate = v & 0xfff; + break; + case R_LARCH_PCADD_HI20: + v = offset_hi20 >> 12; + insn->reg1i20_format.immediate = v & 0xfffff; + break; + default: + pr_err("%s: Unsupport relocation type %u\n", mod->name, type); + return -EINVAL; + } + + return 0; +} + static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { union loongarch_instruction *insn = (union loongarch_instruction *)location; /* Use s32 for a sign-extension deliberately. */ s32 offset_hi20 = (void *)((v + 0x800) & ~0xfff) - (void *)((Elf_Addr)location & ~0xfff); +#ifdef CONFIG_64BIT Elf_Addr anchor = (((Elf_Addr)location) & ~0xfff) + offset_hi20; ptrdiff_t offset_rem = (void *)v - (void *)anchor; +#endif switch (type) { case R_LARCH_PCALA_LO12: @@ -328,6 +372,7 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v, v = offset_hi20 >> 12; insn->reg1i20_format.immediate = v & 0xfffff; break; +#ifdef CONFIG_64BIT case R_LARCH_PCALA64_LO20: v = offset_rem >> 32; insn->reg1i20_format.immediate = v & 0xfffff; @@ -336,6 +381,7 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v, v = offset_rem >> 52; insn->reg2i12_format.immediate = v & 0xfff; break; +#endif default: pr_err("%s: Unsupport relocation type %u\n", mod->name, type); return -EINVAL; @@ -346,30 +392,43 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v, static int apply_r_larch_got_pc(struct module *mod, Elf_Shdr *sechdrs, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { - Elf_Addr got = module_emit_got_entry(mod, sechdrs, v); + reloc_rela_handler got_handler; - if (!got) - return -EINVAL; + if (type != R_LARCH_GOT_PCADD_LO12) { + v = module_emit_got_entry(mod, sechdrs, v); + if (!v) + return -EINVAL; + } switch (type) { case R_LARCH_GOT_PC_LO12: type = R_LARCH_PCALA_LO12; + got_handler = apply_r_larch_pcala; break; case R_LARCH_GOT_PC_HI20: type = R_LARCH_PCALA_HI20; + got_handler = apply_r_larch_pcala; + break; + case R_LARCH_GOT_PCADD_LO12: + type = R_LARCH_PCADD_LO12; + got_handler = apply_r_larch_pcadd; + break; + case R_LARCH_GOT_PCADD_HI20: + type = R_LARCH_PCADD_HI20; + got_handler = apply_r_larch_pcadd; break; default: pr_err("%s: Unsupport relocation type %u\n", mod->name, type); return -EINVAL; } - return apply_r_larch_pcala(mod, location, got, rela_stack, rela_stack_top, type); + return got_handler(mod, location, v, rela_stack, rela_stack_top, type); } static int apply_r_larch_32_pcrel(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { ptrdiff_t offset = (void *)v - (void *)location; @@ -377,31 +436,22 @@ static int apply_r_larch_32_pcrel(struct module *mod, u32 *location, Elf_Addr v, return 0; } +#ifdef CONFIG_32BIT +#define apply_r_larch_64_pcrel apply_r_larch_error +#else static int apply_r_larch_64_pcrel(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { ptrdiff_t offset = (void *)v - (void *)location; *(u64 *)location = offset; return 0; } - -/* - * reloc_handlers_rela() - Apply a particular relocation to a module - * @mod: the module to apply the reloc to - * @location: the address at which the reloc is to be applied - * @v: the value of the reloc, with addend for RELA-style - * @rela_stack: the stack used for store relocation info, LOCAL to THIS module - * @rela_stac_top: where the stack operation(pop/push) applies to - * - * Return: 0 upon success, else -ERRNO - */ -typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type); +#endif /* The handlers for known reloc types */ static reloc_rela_handler reloc_rela_handlers[] = { - [R_LARCH_NONE ... R_LARCH_64_PCREL] = apply_r_larch_error, + [R_LARCH_NONE ... R_LARCH_TLS_DESC_PCADD_LO12] = apply_r_larch_error, [R_LARCH_NONE] = apply_r_larch_none, [R_LARCH_32] = apply_r_larch_32, @@ -414,7 +464,8 @@ static reloc_rela_handler reloc_rela_handlers[] = { [R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE] = apply_r_larch_sop, [R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field, [R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub, - [R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12] = apply_r_larch_pcala, + [R_LARCH_PCADD_HI20 ... R_LARCH_PCADD_LO12] = apply_r_larch_pcadd, + [R_LARCH_PCALA_HI20 ... R_LARCH_PCALA64_HI12] = apply_r_larch_pcala, [R_LARCH_32_PCREL] = apply_r_larch_32_pcrel, [R_LARCH_64_PCREL] = apply_r_larch_64_pcrel, }; @@ -423,9 +474,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *mod) { - int i, err; - unsigned int type; - s64 rela_stack[RELA_STACK_DEPTH]; + int err; + unsigned int i, idx, type; + unsigned int num_relocations; + long rela_stack[RELA_STACK_DEPTH]; size_t rela_stack_top = 0; reloc_rela_handler handler; void *location; @@ -436,8 +488,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, pr_debug("%s: Applying relocate section %u to %u\n", __func__, relsec, sechdrs[relsec].sh_info); + idx = 0; rela_stack_top = 0; - for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + num_relocations = sechdrs[relsec].sh_size / sizeof(*rel); + for (i = 0; i < num_relocations; i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; /* This is the symbol it is referring to */ @@ -462,17 +516,59 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, return -EINVAL; } - pr_debug("type %d st_value %llx r_addend %llx loc %llx\n", + pr_debug("type %d st_value %lx r_addend %lx loc %lx\n", (int)ELF_R_TYPE(rel[i].r_info), - sym->st_value, rel[i].r_addend, (u64)location); + (unsigned long)sym->st_value, (unsigned long)rel[i].r_addend, (unsigned long)location); v = sym->st_value + rel[i].r_addend; + + if (type == R_LARCH_PCADD_LO12 || type == R_LARCH_GOT_PCADD_LO12) { + bool found = false; + unsigned int j = idx; + + do { + u32 hi20_type = ELF_R_TYPE(rel[j].r_info); + unsigned long hi20_location = + sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[j].r_offset; + + /* Find the corresponding HI20 relocation entry */ + if ((hi20_location == sym->st_value) && (hi20_type == type - 1)) { + s32 hi20, lo12; + Elf_Sym *hi20_sym = + (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_R_SYM(rel[j].r_info); + unsigned long hi20_sym_val = hi20_sym->st_value + rel[j].r_addend; + + /* Calculate LO12 offset */ + size_t offset = hi20_sym_val - hi20_location; + if (hi20_type == R_LARCH_GOT_PCADD_HI20) { + offset = module_emit_got_entry(mod, sechdrs, hi20_sym_val); + offset = offset - hi20_location; + } + hi20 = (offset + 0x800) & 0xfffff000; + v = lo12 = offset - hi20; + found = true; + break; + } + + j = (j + 1) % num_relocations; + + } while (idx != j); + + if (!found) { + pr_err("%s: Can not find HI20 relocation information\n", mod->name); + return -EINVAL; + } + + idx = j; /* Record the previous j-loop end index */ + } + switch (type) { case R_LARCH_B26: err = apply_r_larch_b26(mod, sechdrs, location, v, rela_stack, &rela_stack_top, type); break; case R_LARCH_GOT_PC_HI20...R_LARCH_GOT_PC_LO12: + case R_LARCH_GOT_PCADD_HI20...R_LARCH_GOT_PCADD_LO12: err = apply_r_larch_got_pc(mod, sechdrs, location, v, rela_stack, &rela_stack_top, type); break; diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c index 63d2b7e7e844..a8800d20e11b 100644 --- a/arch/loongarch/kernel/proc.c +++ b/arch/loongarch/kernel/proc.c @@ -20,11 +20,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) unsigned int prid = cpu_data[n].processor_id; unsigned int version = cpu_data[n].processor_id & 0xff; unsigned int fp_version = cpu_data[n].fpu_vers; + u64 freq = cpu_clock_freq, bogomips = lpj_fine * cpu_clock_freq; #ifdef CONFIG_SMP if (!cpu_online(n)) return 0; #endif + do_div(freq, 10000); + do_div(bogomips, const_clock_freq * (5000/HZ)); /* * For the first processor also print the system type @@ -41,11 +44,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "PRID\t\t\t: %s (%08x)\n", id_to_core_name(prid), prid); seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version); seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version); - seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n", - cpu_clock_freq / 1000000, (cpu_clock_freq / 10000) % 100); - seq_printf(m, "BogoMIPS\t\t: %llu.%02llu\n", - (lpj_fine * cpu_clock_freq / const_clock_freq) / (500000/HZ), - ((lpj_fine * cpu_clock_freq / const_clock_freq) / (5000/HZ)) % 100); + seq_printf(m, "CPU MHz\t\t\t: %u.%02u\n", (u32)freq / 100, (u32)freq % 100); + seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", (u32)bogomips / 100, (u32)bogomips % 100); seq_printf(m, "TLB Entries\t\t: %d\n", cpu_data[n].tlbsize); seq_printf(m, "Address Sizes\t\t: %d bits physical, %d bits virtual\n", cpu_pabits + 1, cpu_vabits + 1); diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c index efd9edf65603..4ac1c3086152 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -130,6 +130,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) preempt_enable(); + if (IS_ENABLED(CONFIG_RANDSTRUCT)) { + memcpy(dst, src, sizeof(struct task_struct)); + return 0; + } + if (!used_math()) memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr)); else @@ -377,8 +382,11 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace); } -#ifdef CONFIG_64BIT +#ifdef CONFIG_32BIT +void loongarch_dump_regs32(u32 *uregs, const struct pt_regs *regs) +#else void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs) +#endif { unsigned int i; @@ -395,4 +403,3 @@ void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs) uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg; uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat; } -#endif /* CONFIG_64BIT */ diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c index 8edd0954e55a..be38430f7e28 100644 --- a/arch/loongarch/kernel/ptrace.c +++ b/arch/loongarch/kernel/ptrace.c @@ -650,8 +650,13 @@ static int ptrace_hbp_set_addr(unsigned int note_type, struct perf_event_attr attr; /* Kernel-space address cannot be monitored by user-space */ +#ifdef CONFIG_32BIT + if ((unsigned long)addr >= KPRANGE0) + return -EINVAL; +#else if ((unsigned long)addr >= XKPRANGE) return -EINVAL; +#endif bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); if (IS_ERR(bp)) diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c index b5e2312a2fca..82aa3f035927 100644 --- a/arch/loongarch/kernel/relocate.c +++ b/arch/loongarch/kernel/relocate.c @@ -68,18 +68,25 @@ static inline void __init relocate_absolute(long random_offset) for (p = begin; (void *)p < end; p++) { long v = p->symvalue; - uint32_t lu12iw, ori, lu32id, lu52id; + uint32_t lu12iw, ori; +#ifdef CONFIG_64BIT + uint32_t lu32id, lu52id; +#endif union loongarch_instruction *insn = (void *)p->pc; lu12iw = (v >> 12) & 0xfffff; ori = v & 0xfff; +#ifdef CONFIG_64BIT lu32id = (v >> 32) & 0xfffff; lu52id = v >> 52; +#endif insn[0].reg1i20_format.immediate = lu12iw; insn[1].reg2i12_format.immediate = ori; +#ifdef CONFIG_64BIT insn[2].reg1i20_format.immediate = lu32id; insn[3].reg2i12_format.immediate = lu52id; +#endif } } @@ -183,7 +190,7 @@ static inline void __init *determine_relocation_address(void) if (kaslr_disabled()) return destination; - kernel_length = (long)_end - (long)_text; + kernel_length = (unsigned long)_end - (unsigned long)_text; random_offset = get_random_boot() << 16; random_offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1); @@ -232,7 +239,7 @@ unsigned long __init relocate_kernel(void) early_memunmap(cmdline, COMMAND_LINE_SIZE); if (random_offset) { - kernel_length = (long)(_end) - (long)(_text); + kernel_length = (unsigned long)(_end) - (unsigned long)(_text); /* Copy the kernel to it's new location */ memcpy(location_new, _text, kernel_length); diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 25a87378e48e..20cb6f306456 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -56,6 +56,7 @@ #define SMBIOS_FREQLOW_MASK 0xFF #define SMBIOS_CORE_PACKAGE_OFFSET 0x23 #define SMBIOS_THREAD_PACKAGE_OFFSET 0x25 +#define SMBIOS_THREAD_PACKAGE_2_OFFSET 0x2E #define LOONGSON_EFI_ENABLE (1 << 3) unsigned long fw_arg0, fw_arg1, fw_arg2; @@ -126,7 +127,12 @@ static void __init parse_cpu_table(const struct dmi_header *dm) cpu_clock_freq = freq_temp * 1000000; loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]); - loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET); + loongson_sysconf.cores_per_package = *(u8 *)(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET); + if (dm->length >= 0x30 && loongson_sysconf.cores_per_package == 0xff) { + /* SMBIOS 3.0+ has ThreadCount2 for more than 255 threads */ + loongson_sysconf.cores_per_package = + *(u16 *)(dmi_data + SMBIOS_THREAD_PACKAGE_2_OFFSET); + } pr_info("CpuClock = %llu\n", cpu_clock_freq); } diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S index 9c23cb7e432f..f377d8f5c51a 100644 --- a/arch/loongarch/kernel/switch.S +++ b/arch/loongarch/kernel/switch.S @@ -16,18 +16,23 @@ */ .align 5 SYM_FUNC_START(__switch_to) - csrrd t1, LOONGARCH_CSR_PRMD - stptr.d t1, a0, THREAD_CSRPRMD +#ifdef CONFIG_32BIT + PTR_ADDI a0, a0, TASK_STRUCT_OFFSET + PTR_ADDI a1, a1, TASK_STRUCT_OFFSET +#endif + csrrd t1, LOONGARCH_CSR_PRMD + LONG_SPTR t1, a0, (THREAD_CSRPRMD - TASK_STRUCT_OFFSET) cpu_save_nonscratch a0 - stptr.d ra, a0, THREAD_REG01 - stptr.d a3, a0, THREAD_SCHED_RA - stptr.d a4, a0, THREAD_SCHED_CFA + LONG_SPTR a3, a0, (THREAD_SCHED_RA - TASK_STRUCT_OFFSET) + LONG_SPTR a4, a0, (THREAD_SCHED_CFA - TASK_STRUCT_OFFSET) + #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) - la t7, __stack_chk_guard - LONG_L t8, a1, TASK_STACK_CANARY - LONG_S t8, t7, 0 + la t7, __stack_chk_guard + LONG_LPTR t8, a1, (TASK_STACK_CANARY - TASK_STRUCT_OFFSET) + LONG_SPTR t8, t7, 0 #endif + move tp, a2 cpu_restore_nonscratch a1 @@ -35,8 +40,11 @@ SYM_FUNC_START(__switch_to) PTR_ADD t0, t0, tp set_saved_sp t0, t1, t2 - ldptr.d t1, a1, THREAD_CSRPRMD - csrwr t1, LOONGARCH_CSR_PRMD + LONG_LPTR t1, a1, (THREAD_CSRPRMD - TASK_STRUCT_OFFSET) + csrwr t1, LOONGARCH_CSR_PRMD +#ifdef CONFIG_32BIT + PTR_ADDI a0, a0, -TASK_STRUCT_OFFSET +#endif jr ra SYM_FUNC_END(__switch_to) diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c index 168bd97540f8..1249d82c1cd0 100644 --- a/arch/loongarch/kernel/syscall.c +++ b/arch/loongarch/kernel/syscall.c @@ -34,9 +34,22 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); } +SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, unsigned long, + prot, unsigned long, flags, unsigned long, fd, unsigned long, offset) +{ + if (offset & (~PAGE_MASK >> 12)) + return -EINVAL; + + return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> (PAGE_SHIFT - 12)); +} + void *sys_call_table[__NR_syscalls] = { [0 ... __NR_syscalls - 1] = sys_ni_syscall, +#ifdef CONFIG_32BIT +#include <asm/syscall_table_32.h> +#else #include <asm/syscall_table_64.h> +#endif }; typedef long (*sys_call_fn)(unsigned long, unsigned long, @@ -75,7 +88,7 @@ void noinstr __no_stack_protector do_syscall(struct pt_regs *regs) * * The resulting 6 bits of entropy is seen in SP[9:4]. */ - choose_random_kstack_offset(drdtime()); + choose_random_kstack_offset(get_cycles()); syscall_exit_to_user_mode(regs); } diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index 6fb92cc1a4c9..dbaaabcaf6f0 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -18,6 +18,7 @@ #include <asm/loongarch.h> #include <asm/paravirt.h> #include <asm/time.h> +#include <asm/timex.h> u64 cpu_clock_freq; EXPORT_SYMBOL(cpu_clock_freq); @@ -50,10 +51,10 @@ static int constant_set_state_oneshot(struct clock_event_device *evt) raw_spin_lock(&state_lock); - timer_config = csr_read64(LOONGARCH_CSR_TCFG); + timer_config = csr_read(LOONGARCH_CSR_TCFG); timer_config |= CSR_TCFG_EN; timer_config &= ~CSR_TCFG_PERIOD; - csr_write64(timer_config, LOONGARCH_CSR_TCFG); + csr_write(timer_config, LOONGARCH_CSR_TCFG); raw_spin_unlock(&state_lock); @@ -62,15 +63,15 @@ static int constant_set_state_oneshot(struct clock_event_device *evt) static int constant_set_state_periodic(struct clock_event_device *evt) { - unsigned long period; unsigned long timer_config; + u64 period = const_clock_freq; raw_spin_lock(&state_lock); - period = const_clock_freq / HZ; + do_div(period, HZ); timer_config = period & CSR_TCFG_VAL; timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN); - csr_write64(timer_config, LOONGARCH_CSR_TCFG); + csr_write(timer_config, LOONGARCH_CSR_TCFG); raw_spin_unlock(&state_lock); @@ -83,9 +84,9 @@ static int constant_set_state_shutdown(struct clock_event_device *evt) raw_spin_lock(&state_lock); - timer_config = csr_read64(LOONGARCH_CSR_TCFG); + timer_config = csr_read(LOONGARCH_CSR_TCFG); timer_config &= ~CSR_TCFG_EN; - csr_write64(timer_config, LOONGARCH_CSR_TCFG); + csr_write(timer_config, LOONGARCH_CSR_TCFG); raw_spin_unlock(&state_lock); @@ -98,7 +99,7 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev delta &= CSR_TCFG_VAL; timer_config = delta | CSR_TCFG_EN; - csr_write64(timer_config, LOONGARCH_CSR_TCFG); + csr_write(timer_config, LOONGARCH_CSR_TCFG); return 0; } @@ -120,7 +121,7 @@ static int arch_timer_dying(unsigned int cpu) static unsigned long get_loops_per_jiffy(void) { - unsigned long lpj = (unsigned long)const_clock_freq; + u64 lpj = const_clock_freq; do_div(lpj, HZ); @@ -131,13 +132,13 @@ static long init_offset; void save_counter(void) { - init_offset = drdtime(); + init_offset = get_cycles(); } void sync_counter(void) { /* Ensure counter begin at 0 */ - csr_write64(init_offset, LOONGARCH_CSR_CNTC); + csr_write(init_offset, LOONGARCH_CSR_CNTC); } int constant_clockevent_init(void) @@ -197,12 +198,12 @@ int constant_clockevent_init(void) static u64 read_const_counter(struct clocksource *clk) { - return drdtime(); + return get_cycles64(); } static noinstr u64 sched_clock_read(void) { - return drdtime(); + return get_cycles64(); } static struct clocksource clocksource_const = { @@ -211,7 +212,9 @@ static struct clocksource clocksource_const = { .read = read_const_counter, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, +#ifdef CONFIG_GENERIC_GETTIMEOFDAY .vdso_clock_mode = VDSO_CLOCKMODE_CPU, +#endif }; int __init constant_clocksource_init(void) @@ -235,7 +238,7 @@ void __init time_init(void) else const_clock_freq = calc_const_freq(); - init_offset = -(drdtime() - csr_read64(LOONGARCH_CSR_CNTC)); + init_offset = -(get_cycles() - csr_read(LOONGARCH_CSR_CNTC)); constant_clockevent_init(); constant_clocksource_init(); diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c index da5926fead4a..004b8ebf0051 100644 --- a/arch/loongarch/kernel/traps.c +++ b/arch/loongarch/kernel/traps.c @@ -625,7 +625,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs) bool user = user_mode(regs); bool pie = regs_irqs_disabled(regs); unsigned long era = exception_era(regs); - u64 badv = 0, lower = 0, upper = ULONG_MAX; + unsigned long badv = 0, lower = 0, upper = ULONG_MAX; union loongarch_instruction insn; irqentry_state_t state = irqentry_enter(regs); @@ -1070,10 +1070,13 @@ asmlinkage void noinstr do_reserved(struct pt_regs *regs) asmlinkage void cache_parity_error(void) { + u32 merrctl = csr_read32(LOONGARCH_CSR_MERRCTL); + unsigned long merrera = csr_read(LOONGARCH_CSR_MERRERA); + /* For the moment, report the problem and hang. */ pr_err("Cache error exception:\n"); - pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL)); - pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA)); + pr_err("csr_merrctl == %08x\n", merrctl); + pr_err("csr_merrera == %016lx\n", merrera); panic("Can't handle the cache error!"); } @@ -1130,9 +1133,9 @@ static void configure_exception_vector(void) eentry = (unsigned long)exception_handlers; tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE; - csr_write64(eentry, LOONGARCH_CSR_EENTRY); - csr_write64(__pa(eentry), LOONGARCH_CSR_MERRENTRY); - csr_write64(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY); + csr_write(eentry, LOONGARCH_CSR_EENTRY); + csr_write(__pa(eentry), LOONGARCH_CSR_MERRENTRY); + csr_write(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY); } void per_cpu_trap_init(int cpu) diff --git a/arch/loongarch/kernel/unaligned.c b/arch/loongarch/kernel/unaligned.c index 487be604b96a..cc929c9fe7e9 100644 --- a/arch/loongarch/kernel/unaligned.c +++ b/arch/loongarch/kernel/unaligned.c @@ -27,12 +27,21 @@ static u32 unaligned_instructions_user; static u32 unaligned_instructions_kernel; #endif -static inline unsigned long read_fpr(unsigned int idx) +static inline u64 read_fpr(unsigned int idx) { +#ifdef CONFIG_64BIT #define READ_FPR(idx, __value) \ __asm__ __volatile__("movfr2gr.d %0, $f"#idx"\n\t" : "=r"(__value)); - - unsigned long __value; +#else +#define READ_FPR(idx, __value) \ +{ \ + u32 __value_lo, __value_hi; \ + __asm__ __volatile__("movfr2gr.s %0, $f"#idx"\n\t" : "=r"(__value_lo)); \ + __asm__ __volatile__("movfrh2gr.s %0, $f"#idx"\n\t" : "=r"(__value_hi)); \ + __value = (__value_lo | ((u64)__value_hi << 32)); \ +} +#endif + u64 __value; switch (idx) { case 0: @@ -138,11 +147,20 @@ static inline unsigned long read_fpr(unsigned int idx) return __value; } -static inline void write_fpr(unsigned int idx, unsigned long value) +static inline void write_fpr(unsigned int idx, u64 value) { +#ifdef CONFIG_64BIT #define WRITE_FPR(idx, value) \ __asm__ __volatile__("movgr2fr.d $f"#idx", %0\n\t" :: "r"(value)); - +#else +#define WRITE_FPR(idx, value) \ +{ \ + u32 value_lo = value; \ + u32 value_hi = value >> 32; \ + __asm__ __volatile__("movgr2fr.w $f"#idx", %0\n\t" :: "r"(value_lo)); \ + __asm__ __volatile__("movgr2frh.w $f"#idx", %0\n\t" :: "r"(value_hi)); \ +} +#endif switch (idx) { case 0: WRITE_FPR(0, value); @@ -252,7 +270,7 @@ void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned i bool sign, write; bool user = user_mode(regs); unsigned int res, size = 0; - unsigned long value = 0; + u64 value = 0; union loongarch_instruction insn; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); |
