diff options
Diffstat (limited to 'arch/riscv/kernel/head.S')
| -rw-r--r-- | arch/riscv/kernel/head.S | 205 |
1 files changed, 118 insertions, 87 deletions
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index fce5184b22c3..bdf3352acf4c 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -13,21 +13,12 @@ #include <asm/csr.h> #include <asm/hwcap.h> #include <asm/image.h> +#include <asm/scs.h> +#include <asm/xip_fixup.h> #include "efi-header.S" -#ifdef CONFIG_XIP_KERNEL -.macro XIP_FIXUP_OFFSET reg - REG_L t0, _xip_fixup - add \reg, \reg, t0 -.endm -_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET -#else -.macro XIP_FIXUP_OFFSET reg -.endm -#endif /* CONFIG_XIP_KERNEL */ - __HEAD -ENTRY(_start) +SYM_CODE_START(_start) /* * Image header expected by Linux boot-loaders. The image header data * structure is described in asm/image.h. @@ -79,7 +70,8 @@ pe_head_start: .align 2 #ifdef CONFIG_MMU -relocate: + .global relocate_enable_mmu +relocate_enable_mmu: /* Relocate return address */ la a1, kernel_map XIP_FIXUP_OFFSET a1 @@ -95,7 +87,9 @@ relocate: /* Compute satp for kernel page tables, but don't load it yet */ srl a2, a0, PAGE_SHIFT - li a1, SATP_MODE + la a1, satp_mode + XIP_FIXUP_OFFSET a1 + REG_L a1, 0(a1) or a2, a2, a1 /* @@ -117,15 +111,12 @@ relocate: csrw CSR_TVEC, a0 /* Reload the global pointer */ -.option push -.option norelax - la gp, __global_pointer$ -.option pop + load_global_pointer /* * Switch to kernel page tables. A full fence is necessary in order to * avoid using the trampoline translations, which are only correct for - * the first superpage. Fetching the fence is guarnteed to work + * the first superpage. Fetching the fence is guaranteed to work * because that first superpage is translated the same way. */ csrw CSR_SATP, a2 @@ -140,48 +131,62 @@ secondary_start_sbi: csrw CSR_IE, zero csrw CSR_IP, zero +#ifndef CONFIG_RISCV_M_MODE + /* Enable time CSR */ + li t0, 0x2 + csrw CSR_SCOUNTEREN, t0 +#endif + /* Load the global pointer */ - .option push - .option norelax - la gp, __global_pointer$ - .option pop + load_global_pointer /* - * Disable FPU to detect illegal usage of - * floating point in kernel space + * Disable FPU & VECTOR to detect illegal usage of + * floating point or vector in kernel space */ - li t0, SR_FS + li t0, SR_FS_VS csrc CSR_STATUS, t0 /* Set trap vector to spin forever to help debug */ la a3, .Lsecondary_park csrw CSR_TVEC, a3 - slli a3, a0, LGREG - la a4, __cpu_up_stack_pointer - XIP_FIXUP_OFFSET a4 - la a5, __cpu_up_task_pointer - XIP_FIXUP_OFFSET a5 - add a4, a3, a4 - add a5, a3, a5 - REG_L sp, (a4) - REG_L tp, (a5) - - .global secondary_start_common -secondary_start_common: + /* a0 contains the hartid & a1 contains boot data */ + li a2, SBI_HART_BOOT_TASK_PTR_OFFSET + XIP_FIXUP_OFFSET a2 + add a2, a2, a1 + REG_L tp, (a2) + li a3, SBI_HART_BOOT_STACK_PTR_OFFSET + XIP_FIXUP_OFFSET a3 + add a3, a3, a1 + REG_L sp, (a3) + +.Lsecondary_start_common: #ifdef CONFIG_MMU /* Enable virtual memory and relocate to virtual address */ la a0, swapper_pg_dir XIP_FIXUP_OFFSET a0 - call relocate + call relocate_enable_mmu #endif - call setup_trap_vector - tail smp_callin + call .Lsetup_trap_vector + scs_load_current + call smp_callin #endif /* CONFIG_SMP */ .align 2 -setup_trap_vector: +.Lsecondary_park: + /* + * Park this hart if we: + * - have too many harts on CONFIG_RISCV_BOOT_SPINWAIT + * - receive an early trap, before setup_trap_vector finished + * - fail in smp_callin(), as a successful one wouldn't return + */ + wfi + j .Lsecondary_park + +.align 2 +.Lsetup_trap_vector: /* Set trap vector to exception handler */ la a0, handle_exception csrw CSR_TVEC, a0 @@ -193,14 +198,9 @@ setup_trap_vector: csrw CSR_SCRATCH, zero ret -.Lsecondary_park: - /* We lack SMP support or have too many harts, so park this hart */ - wfi - j .Lsecondary_park +SYM_CODE_END(_start) -END(_start) - -ENTRY(_start_kernel) +SYM_CODE_START(_start_kernel) /* Mask all interrupts */ csrw CSR_IE, zero csrw CSR_IP, zero @@ -217,7 +217,7 @@ ENTRY(_start_kernel) * not implement PMPs, so we set up a quick trap handler to just skip * touching the PMPs on any trap. */ - la a0, pmp_done + la a0, .Lpmp_done csrw CSR_TVEC, a0 li a0, -1 @@ -225,35 +225,36 @@ ENTRY(_start_kernel) li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) csrw CSR_PMPCFG0, a0 .align 2 -pmp_done: +.Lpmp_done: /* * The hartid in a0 is expected later on, and we have no firmware * to hand it to us. */ csrr a0, CSR_MHARTID +#else + /* Enable time CSR */ + li t0, 0x2 + csrw CSR_SCOUNTEREN, t0 #endif /* CONFIG_RISCV_M_MODE */ /* Load the global pointer */ -.option push -.option norelax - la gp, __global_pointer$ -.option pop + load_global_pointer /* - * Disable FPU to detect illegal usage of - * floating point in kernel space + * Disable FPU & VECTOR to detect illegal usage of + * floating point or vector in kernel space */ - li t0, SR_FS + li t0, SR_FS_VS csrc CSR_STATUS, t0 -#ifdef CONFIG_SMP +#ifdef CONFIG_RISCV_BOOT_SPINWAIT li t0, CONFIG_NR_CPUS blt a0, t0, .Lgood_cores tail .Lsecondary_park .Lgood_cores: -#endif + /* The lottery system is only required for spinwait booting method */ #ifndef CONFIG_XIP_KERNEL /* Pick one hart to run the main boot sequence */ la a3, hart_lottery @@ -266,59 +267,70 @@ pmp_done: la a3, hart_lottery mv a2, a3 XIP_FIXUP_OFFSET a2 + XIP_FIXUP_FLASH_OFFSET a3 lw t1, (a3) amoswap.w t0, t1, (a2) /* first time here if hart_lottery in RAM is not set */ beq t0, t1, .Lsecondary_start +#endif /* CONFIG_XIP */ +#endif /* CONFIG_RISCV_BOOT_SPINWAIT */ + +#ifdef CONFIG_XIP_KERNEL la sp, _end + THREAD_SIZE XIP_FIXUP_OFFSET sp mv s0, a0 + mv s1, a1 call __copy_data - /* Restore a0 copy */ + /* Restore a0 & a1 copy */ mv a0, s0 + mv a1, s1 #endif #ifndef CONFIG_XIP_KERNEL /* Clear BSS for flat non-ELF images */ la a3, __bss_start la a4, __bss_stop - ble a4, a3, clear_bss_done -clear_bss: + ble a4, a3, .Lclear_bss_done +.Lclear_bss: REG_S zero, (a3) add a3, a3, RISCV_SZPTR - blt a3, a4, clear_bss -clear_bss_done: + blt a3, a4, .Lclear_bss +.Lclear_bss_done: #endif - /* Save hart ID and DTB physical address */ - mv s0, a0 - mv s1, a1 - la a2, boot_cpu_hartid XIP_FIXUP_OFFSET a2 REG_S a0, (a2) /* Initialize page tables and relocate to virtual addresses */ + la tp, init_task la sp, init_thread_union + THREAD_SIZE XIP_FIXUP_OFFSET sp + addi sp, sp, -PT_SIZE_ON_STACK + scs_load_init_stack #ifdef CONFIG_BUILTIN_DTB la a0, __dtb_start + XIP_FIXUP_OFFSET a0 #else - mv a0, s1 + mv a0, a1 #endif /* CONFIG_BUILTIN_DTB */ + /* Set trap vector to spin forever to help debug */ + la a3, .Lsecondary_park + csrw CSR_TVEC, a3 call setup_vm #ifdef CONFIG_MMU la a0, early_pg_dir XIP_FIXUP_OFFSET a0 - call relocate + call relocate_enable_mmu #endif /* CONFIG_MMU */ - call setup_trap_vector + call .Lsetup_trap_vector /* Restore C environment */ la tp, init_task - sw zero, TASK_TI_CPU(tp) la sp, init_thread_union + THREAD_SIZE + addi sp, sp, -PT_SIZE_ON_STACK + scs_load_current #ifdef CONFIG_KASAN call kasan_early_init @@ -327,16 +339,16 @@ clear_bss_done: call soc_early_init tail start_kernel +#ifdef CONFIG_RISCV_BOOT_SPINWAIT .Lsecondary_start: -#ifdef CONFIG_SMP /* Set trap vector to spin forever to help debug */ la a3, .Lsecondary_park csrw CSR_TVEC, a3 slli a3, a0, LGREG - la a1, __cpu_up_stack_pointer + la a1, __cpu_spinwait_stack_pointer XIP_FIXUP_OFFSET a1 - la a2, __cpu_up_task_pointer + la a2, __cpu_spinwait_task_pointer XIP_FIXUP_OFFSET a2 add a1, a3, a1 add a2, a3, a2 @@ -353,13 +365,13 @@ clear_bss_done: beqz tp, .Lwait_for_cpu_up fence - tail secondary_start_common -#endif + tail .Lsecondary_start_common +#endif /* CONFIG_RISCV_BOOT_SPINWAIT */ -END(_start_kernel) +SYM_CODE_END(_start_kernel) #ifdef CONFIG_RISCV_M_MODE -ENTRY(reset_regs) +SYM_CODE_START_LOCAL(reset_regs) li sp, 0 li gp, 0 li tp, 0 @@ -393,7 +405,7 @@ ENTRY(reset_regs) #ifdef CONFIG_FPU csrr t0, CSR_MISA andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D) - beqz t0, .Lreset_regs_done + beqz t0, .Lreset_regs_done_fpu li t1, SR_FS csrs CSR_STATUS, t1 @@ -431,12 +443,31 @@ ENTRY(reset_regs) fmv.s.x f31, zero csrw fcsr, 0 /* note that the caller must clear SR_FS */ +.Lreset_regs_done_fpu: #endif /* CONFIG_FPU */ -.Lreset_regs_done: + +#ifdef CONFIG_RISCV_ISA_V + csrr t0, CSR_MISA + li t1, COMPAT_HWCAP_ISA_V + and t0, t0, t1 + beqz t0, .Lreset_regs_done_vector + + /* + * Clear vector registers and reset vcsr + * VLMAX has a defined value, VLEN is a constant, + * and this form of vsetvli is defined to set vl to VLMAX. + */ + li t1, SR_VS + csrs CSR_STATUS, t1 + csrs CSR_VCSR, x0 + vsetvli t1, x0, e8, m8, ta, ma + vmv.v.i v0, 0 + vmv.v.i v8, 0 + vmv.v.i v16, 0 + vmv.v.i v24, 0 + /* note that the caller must clear SR_VS */ +.Lreset_regs_done_vector: +#endif /* CONFIG_RISCV_ISA_V */ ret -END(reset_regs) +SYM_CODE_END(reset_regs) #endif /* CONFIG_RISCV_M_MODE */ - -__PAGE_ALIGNED_BSS - /* Empty zero page */ - .balign PAGE_SIZE |
