diff options
Diffstat (limited to 'arch/powerpc/kernel/head_64.S')
| -rw-r--r-- | arch/powerpc/kernel/head_64.S | 251 |
1 files changed, 146 insertions, 105 deletions
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 5c5181e8d5f1..63432a33ec49 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -18,6 +18,7 @@ * variants. */ +#include <linux/linkage.h> #include <linux/threads.h> #include <linux/init.h> #include <asm/reg.h> @@ -39,7 +40,6 @@ #include <asm/hw_irq.h> #include <asm/cputhreads.h> #include <asm/ppc-opcode.h> -#include <asm/export.h> #include <asm/feature-fixups.h> #ifdef CONFIG_PPC_BOOK3S #include <asm/exception-64s.h> @@ -75,6 +75,13 @@ * 2. The kernel is entered at __start */ +/* + * boot_from_prom and prom_init run at the physical address. Everything + * after prom and kexec entry run at the virtual address (PAGE_OFFSET). + * Secondaries run at the virtual address from generic_secondary_common_init + * onward. + */ + OPEN_FIXED_SECTION(first_256B, 0x0, 0x100) USE_FIXED_SECTION(first_256B) /* @@ -111,7 +118,7 @@ __secondary_hold_acknowledge: #ifdef CONFIG_RELOCATABLE /* This flag is set to 1 by a loader if the kernel should run * at the loaded address instead of the linked address. This - * is used by kexec-tools to keep the the kdump kernel in the + * is used by kexec-tools to keep the kdump kernel in the * crash_kernel region. The loader is responsible for * observing the alignment requirement. */ @@ -143,7 +150,7 @@ DEFINE_FIXED_SYMBOL(__run_at_load, first_256B) .globl __secondary_hold __secondary_hold: FIXUP_ENDIAN -#ifndef CONFIG_PPC_BOOK3E +#ifndef CONFIG_PPC_BOOK3E_64 mfmsr r24 ori r24,r24,MSR_RI mtmsrd r24 /* RI on */ @@ -159,17 +166,13 @@ __secondary_hold: std r24,(ABS_ADDR(__secondary_hold_acknowledge, first_256B))(0) sync - li r26,0 -#ifdef CONFIG_PPC_BOOK3E - tovirt(r26,r26) -#endif /* All secondary cpus wait here until told to start. */ -100: ld r12,(ABS_ADDR(__secondary_hold_spinloop, first_256B))(r26) +100: ld r12,(ABS_ADDR(__secondary_hold_spinloop, first_256B))(0) cmpdi 0,r12,0 beq 100b #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE) -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 tovirt(r12,r12) #endif mtctr r12 @@ -178,7 +181,7 @@ __secondary_hold: * it may be the case that other platforms have r4 right to * begin with, this gives us some safety in case it is not */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 mr r4,r25 #else li r4,0 @@ -192,13 +195,6 @@ __secondary_hold: #endif CLOSE_FIXED_SECTION(first_256B) -/* This value is used to mark exception frames on the stack. */ - .section ".toc","aw" -/* This value is used to mark exception frames on the stack. */ -exception_marker: - .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER - .previous - /* * On server, we include the exception vectors code here as it * relies on absolute addressing which is only possible within @@ -214,7 +210,7 @@ USE_TEXT_SECTION() #include "interrupt_64.S" -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 /* * The booting_thread_hwid holds the thread id we want to boot in cpu * hotplug case. It is set by cpu hotplug code, and is invalid by default. @@ -313,16 +309,14 @@ _GLOBAL(fsl_secondary_thread_init) /* turn on 64-bit mode */ bl enable_64b_mode - /* get a valid TOC pointer, wherever we're mapped at */ - bl relative_toc - tovirt(r2,r2) - /* Book3E initialization */ mr r3,r24 bl book3e_secondary_thread_init + bl relative_toc + b generic_secondary_common_init -#endif /* CONFIG_PPC_BOOK3E */ +#endif /* CONFIG_PPC_BOOK3E_64 */ /* * On pSeries and most other platforms, secondary processors spin @@ -335,22 +329,24 @@ _GLOBAL(fsl_secondary_thread_init) */ _GLOBAL(generic_secondary_smp_init) FIXUP_ENDIAN + + li r13,0 + + /* Poison TOC */ + li r2,-1 + mr r24,r3 mr r25,r4 /* turn on 64-bit mode */ bl enable_64b_mode - /* get a valid TOC pointer, wherever we're mapped at */ - bl relative_toc - tovirt(r2,r2) - -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 /* Book3E initialization */ mr r3,r24 mr r4,r25 bl book3e_secondary_core_init - + /* Now NIA and r2 are relocated to PAGE_OFFSET if not already */ /* * After common core init has finished, check if the current thread is the * one we wanted to boot. If not, start the specified thread and stop the @@ -378,8 +374,7 @@ _GLOBAL(generic_secondary_smp_init) beq 20f /* start the specified thread */ - LOAD_REG_ADDR(r5, fsl_secondary_thread_init) - ld r4, 0(r5) + LOAD_REG_ADDR(r5, DOTSYM(fsl_secondary_thread_init)) bl book3e_start_thread /* stop the current thread */ @@ -388,6 +383,16 @@ _GLOBAL(generic_secondary_smp_init) 10: b 10b 20: +#else + /* Now the MMU is off, can branch to our PAGE_OFFSET address */ + bcl 20,31,$+4 +1: mflr r11 + addi r11,r11,(2f - 1b) + tovirt(r11, r11) + mtctr r11 + bctr +2: + bl relative_toc #endif generic_secondary_common_init: @@ -400,8 +405,12 @@ generic_secondary_common_init: #else LOAD_REG_ADDR(r8, paca_ptrs) /* Load paca_ptrs pointe */ ld r8,0(r8) /* Get base vaddr of array */ +#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS) + LOAD_REG_IMMEDIATE(r7, NR_CPUS) +#else LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ lwz r7,0(r7) /* also the max paca allocated */ +#endif li r5,0 /* logical cpu id */ 1: sldi r9,r5,3 /* get paca_ptrs[] index from cpu id */ @@ -417,7 +426,7 @@ generic_secondary_common_init: b kexec_wait /* next kernel might do better */ 2: SET_PACA(r13) -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */ mtspr SPRN_SPRG_TLB_EXFRAME,r12 #endif @@ -427,7 +436,7 @@ generic_secondary_common_init: /* Create a temp kernel stack for use before relocation is on. */ ld r1,PACAEMERGSP(r13) - subi r1,r1,STACK_FRAME_OVERHEAD + subi r1,r1,STACK_FRAME_MIN_SIZE /* See if we need to call a cpu state restore handler */ LOAD_REG_ADDR(r23, cur_cpu_spec) @@ -435,7 +444,7 @@ generic_secondary_common_init: ld r12,CPU_SPEC_RESTORE(r23) cmpdi 0,r12,0 beq 3f -#ifdef PPC64_ELF_ABI_v1 +#ifdef CONFIG_PPC64_ELF_ABI_V1 ld r12,0(r12) #endif mtctr r12 @@ -465,7 +474,7 @@ generic_secondary_common_init: * Assumes we're mapped EA == RA if the MMU is on. */ #ifdef CONFIG_PPC_BOOK3S -__mmu_off: +SYM_FUNC_START_LOCAL(__mmu_off) mfmsr r3 andi. r0,r3,MSR_IR|MSR_DR beqlr @@ -476,8 +485,34 @@ __mmu_off: sync rfid b . /* prevent speculative execution */ -#endif +SYM_FUNC_END(__mmu_off) +SYM_FUNC_START_LOCAL(start_initialization_book3s) + mflr r25 + + /* Setup some critical 970 SPRs before switching MMU off */ + mfspr r0,SPRN_PVR + srwi r0,r0,16 + cmpwi r0,0x39 /* 970 */ + beq 1f + cmpwi r0,0x3c /* 970FX */ + beq 1f + cmpwi r0,0x44 /* 970MP */ + beq 1f + cmpwi r0,0x45 /* 970GX */ + bne 2f +1: bl __cpu_preinit_ppc970 +2: + + /* Switch off MMU if not already off */ + bl __mmu_off + + /* Now the MMU is off, can return to our PAGE_OFFSET address */ + tovirt(r25,r25) + mtlr r25 + blr +SYM_FUNC_END(start_initialization_book3s) +#endif /* * Here is our main kernel entry point. We support currently 2 kind of entries @@ -494,14 +529,11 @@ __start_initialization_multiplatform: /* Make sure we are running in 64 bits mode */ bl enable_64b_mode - /* Get TOC pointer (current runtime address) */ - bl relative_toc + /* Zero r13 (paca) so early program check / mce don't use it */ + li r13,0 - /* find out where we are now */ - bcl 20,31,$+4 -0: mflr r26 /* r26 = runtime addr here */ - addis r26,r26,(_stext - 0b)@ha - addi r26,r26,(_stext - 0b)@l /* current runtime base addr */ + /* Poison TOC */ + li r2,-1 /* * Are we booted from a PROM Of-type client-interface ? @@ -519,32 +551,41 @@ __start_initialization_multiplatform: mr r29,r9 #endif -#ifdef CONFIG_PPC_BOOK3E + /* Get TOC pointer (current runtime address) */ + bl relative_toc + + /* These functions return to the virtual (PAGE_OFFSET) address */ +#ifdef CONFIG_PPC_BOOK3E_64 bl start_initialization_book3e - b __after_prom_start #else - /* Setup some critical 970 SPRs before switching MMU off */ - mfspr r0,SPRN_PVR - srwi r0,r0,16 - cmpwi r0,0x39 /* 970 */ - beq 1f - cmpwi r0,0x3c /* 970FX */ - beq 1f - cmpwi r0,0x44 /* 970MP */ - beq 1f - cmpwi r0,0x45 /* 970GX */ - bne 2f -1: bl __cpu_preinit_ppc970 -2: + bl start_initialization_book3s +#endif /* CONFIG_PPC_BOOK3E_64 */ + + /* Get TOC pointer, virtual */ + bl relative_toc + + /* find out where we are now */ + + /* OPAL doesn't pass base address in r4, have to derive it. */ + bcl 20,31,$+4 +0: mflr r26 /* r26 = runtime addr here */ + addis r26,r26,(_stext - 0b)@ha + addi r26,r26,(_stext - 0b)@l /* current runtime base addr */ - /* Switch off MMU if not already off */ - bl __mmu_off b __after_prom_start -#endif /* CONFIG_PPC_BOOK3E */ __REF __boot_from_prom: #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE + /* Get TOC pointer, non-virtual */ + bl relative_toc + + /* find out where we are now */ + bcl 20,31,$+4 +0: mflr r26 /* r26 = runtime addr here */ + addis r26,r26,(_stext - 0b)@ha + addi r26,r26,(_stext - 0b)@l /* current runtime base addr */ + /* Save parameters */ mr r31,r3 mr r30,r4 @@ -574,7 +615,7 @@ __boot_from_prom: /* Do all of the interaction with OF client interface */ mr r8,r26 - bl prom_init + bl CFUNC(prom_init) #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ /* We never return. We also hit that trap if trying to boot @@ -585,21 +626,14 @@ __boot_from_prom: __after_prom_start: #ifdef CONFIG_RELOCATABLE /* process relocations for the final address of the kernel */ - lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ - sldi r25,r25,32 -#if defined(CONFIG_PPC_BOOK3E) - tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */ -#endif lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26) -#if defined(CONFIG_PPC_BOOK3E) - tophys(r26,r26) -#endif cmplwi cr0,r7,1 /* flagged to stay where we are ? */ - bne 1f - add r25,r25,r26 + mr r25,r26 /* then use current kernel base */ + beq 1f + LOAD_REG_IMMEDIATE(r25, PAGE_OFFSET) /* else use static kernel base */ 1: mr r3,r25 bl relocate -#if defined(CONFIG_PPC_BOOK3E) +#if defined(CONFIG_PPC_BOOK3E_64) /* IVPR needs to be set after relocation. */ bl init_core_book3e #endif @@ -612,15 +646,10 @@ __after_prom_start: * * Note: This process overwrites the OF exception vectors. */ - li r3,0 /* target addr */ -#ifdef CONFIG_PPC_BOOK3E - tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */ -#endif - mr. r4,r26 /* In some cases the loader may */ -#if defined(CONFIG_PPC_BOOK3E) - tovirt(r4,r4) -#endif - beq 9f /* have already put us at zero */ + LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET) + mr r4,r26 /* Load the virtual source address into r4 */ + cmpld r3,r4 /* Check if source == dest */ + beq 9f /* If so skip the copy */ li r6,0x100 /* Start offset, the first 0x100 */ /* bytes were copied earlier. */ @@ -630,14 +659,11 @@ __after_prom_start: * variable __run_at_load, if it is set the kernel is treated as relocatable * kernel, otherwise it will be moved to PHYSICAL_START */ -#if defined(CONFIG_PPC_BOOK3E) - tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */ -#endif lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26) cmplwi cr0,r7,1 bne 3f -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 LOAD_REG_ADDR(r5, __end_interrupts) LOAD_REG_ADDR(r11, _stext) sub r5,r5,r11 @@ -751,9 +777,15 @@ _GLOBAL(pmac_secondary_start) sync slbia - /* get TOC pointer (real address) */ + /* Branch to our PAGE_OFFSET address */ + bcl 20,31,$+4 +1: mflr r11 + addi r11,r11,(2f - 1b) + tovirt(r11, r11) + mtctr r11 + bctr +2: bl relative_toc - tovirt(r2,r2) /* Copy some CPU settings from CPU 0 */ bl __restore_cpu_ppc970 @@ -780,7 +812,7 @@ _GLOBAL(pmac_secondary_start) /* Create a temp kernel stack for use before relocation is on. */ ld r1,PACAEMERGSP(r13) - subi r1,r1,STACK_FRAME_OVERHEAD + subi r1,r1,STACK_FRAME_MIN_SIZE b __secondary_start @@ -812,7 +844,7 @@ __secondary_start: * can turn it on below. This is a call to C, which is OK, we're still * running on the emergency stack. */ - bl early_setup_secondary + bl CFUNC(early_setup_secondary) /* * The primary has initialized our kernel stack for us in the paca, grab @@ -848,10 +880,10 @@ __secondary_start: * before going into C code. */ start_secondary_prolog: - ld r2,PACATOC(r13) + LOAD_PACA_TOC() li r3,0 std r3,0(r1) /* Zero the stack frame pointer */ - bl start_secondary + bl CFUNC(start_secondary) b . /* * Reset stack pointer and call start_secondary @@ -862,25 +894,26 @@ _GLOBAL(start_secondary_resume) ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ li r3,0 std r3,0(r1) /* Zero the stack frame pointer */ - bl start_secondary + bl CFUNC(start_secondary) b . #endif /* * This subroutine clobbers r11 and r12 */ -enable_64b_mode: +SYM_FUNC_START_LOCAL(enable_64b_mode) mfmsr r11 /* grab the current MSR */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ mtmsr r11 -#else /* CONFIG_PPC_BOOK3E */ +#else /* CONFIG_PPC_BOOK3E_64 */ LOAD_REG_IMMEDIATE(r12, MSR_64BIT) or r11,r11,r12 mtmsrd r11 isync #endif blr +SYM_FUNC_END(enable_64b_mode) /* * This puts the TOC pointer into r2, offset by 0x8000 (as expected @@ -891,10 +924,15 @@ enable_64b_mode: * TOC in -mcmodel=medium mode. After we relocate to 0 but before * the MMU is on we need our TOC to be a virtual address otherwise * these pointers will be real addresses which may get stored and - * accessed later with the MMU on. We use tovirt() at the call - * sites to handle this. + * accessed later with the MMU on. We branch to the virtual address + * while still in real mode then call relative_toc again to handle + * this. */ _GLOBAL(relative_toc) +#ifdef CONFIG_PPC_KERNEL_PCREL + tdnei r2,-1 + blr +#else mflr r0 bcl 20,31,$+4 0: mflr r11 @@ -905,15 +943,15 @@ _GLOBAL(relative_toc) .balign 8 p_toc: .8byte .TOC. - 0b +#endif /* * This is where the main kernel code starts. */ __REF start_here_multiplatform: - /* set up the TOC */ - bl relative_toc - tovirt(r2,r2) + /* Adjust TOC for moved kernel. Could adjust when moving it instead. */ + bl relative_toc /* Clear out the BSS. It may have been done in prom_init, * already but that's irrelevant since prom_init will soon @@ -940,7 +978,7 @@ start_here_multiplatform: std r29,8(r11); #endif -#ifndef CONFIG_PPC_BOOK3E +#ifndef CONFIG_PPC_BOOK3E_64 mfmsr r6 ori r6,r6,MSR_RI mtmsrd r6 /* RI on */ @@ -958,13 +996,16 @@ start_here_multiplatform: LOAD_REG_IMMEDIATE(r1,THREAD_SIZE) add r1,r3,r1 li r0,0 - stdu r0,-STACK_FRAME_OVERHEAD(r1) + stdu r0,-STACK_FRAME_MIN_SIZE(r1) /* * Do very early kernel initializations, including initial hash table * and SLB setup before we turn on relocation. */ +#ifdef CONFIG_KASAN + bl CFUNC(kasan_early_init) +#endif /* Restore parameters passed from prom_init/kexec */ mr r3,r31 LOAD_REG_ADDR(r12, DOTSYM(early_setup)) @@ -985,7 +1026,7 @@ start_here_common: std r1,PACAKSAVE(r13) /* Load the TOC (virtual address) */ - ld r2,PACATOC(r13) + LOAD_PACA_TOC() /* Mark interrupts soft and hard disabled (they might be enabled * in the PACA when doing hotplug) @@ -996,7 +1037,7 @@ start_here_common: stb r0,PACAIRQHAPPENED(r13) /* Generic kernel entry */ - bl start_kernel + bl CFUNC(start_kernel) /* Not reached */ 0: trap |
