summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/head_32.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-11-26 10:42:40 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-11-26 10:42:40 -0800
commit1d87200446f1d10dfe9672ca8edb027a82612f8c (patch)
tree45cc71ff8e4d1bcde9b07ce8203277f2b8982941 /arch/x86/kernel/head_32.S
parent5c4a1c090d8676d8b84e2ac40671602be44afdfc (diff)
parentf01ec4fca8207e31b59a010c3de679c833f3a877 (diff)
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asm updates from Ingo Molnar: "The main changes in this cycle were: - Cross-arch changes to move the linker sections for NOTES and EXCEPTION_TABLE into the RO_DATA area, where they belong on most architectures. (Kees Cook) - Switch the x86 linker fill byte from x90 (NOP) to 0xcc (INT3), to trap jumps into the middle of those padding areas instead of sliding execution. (Kees Cook) - A thorough cleanup of symbol definitions within x86 assembler code. The rather randomly named macros got streamlined around a (hopefully) straightforward naming scheme: SYM_START(name, linkage, align...) SYM_END(name, sym_type) SYM_FUNC_START(name) SYM_FUNC_END(name) SYM_CODE_START(name) SYM_CODE_END(name) SYM_DATA_START(name) SYM_DATA_END(name) etc - with about three times of these basic primitives with some label, local symbol or attribute variant, expressed via postfixes. No change in functionality intended. (Jiri Slaby) - Misc other changes, cleanups and smaller fixes" * 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (67 commits) x86/entry/64: Remove pointless jump in paranoid_exit x86/entry/32: Remove unused resume_userspace label x86/build/vdso: Remove meaningless CFLAGS_REMOVE_*.o m68k: Convert missed RODATA to RO_DATA x86/vmlinux: Use INT3 instead of NOP for linker fill bytes x86/mm: Report actual image regions in /proc/iomem x86/mm: Report which part of kernel image is freed x86/mm: Remove redundant address-of operators on addresses xtensa: Move EXCEPTION_TABLE to RO_DATA segment powerpc: Move EXCEPTION_TABLE to RO_DATA segment parisc: Move EXCEPTION_TABLE to RO_DATA segment microblaze: Move EXCEPTION_TABLE to RO_DATA segment ia64: Move EXCEPTION_TABLE to RO_DATA segment h8300: Move EXCEPTION_TABLE to RO_DATA segment c6x: Move EXCEPTION_TABLE to RO_DATA segment arm64: Move EXCEPTION_TABLE to RO_DATA segment alpha: Move EXCEPTION_TABLE to RO_DATA segment x86/vmlinux: Move EXCEPTION_TABLE to RO_DATA segment x86/vmlinux: Actually use _etext for the end of the text segment vmlinux.lds.h: Allow EXCEPTION_TABLE to live in RO_DATA ...
Diffstat (limited to 'arch/x86/kernel/head_32.S')
-rw-r--r--arch/x86/kernel/head_32.S62
1 files changed, 31 insertions, 31 deletions
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 2e6a0676c1f4..3923ab4630d7 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
* can.
*/
__HEAD
-ENTRY(startup_32)
+SYM_CODE_START(startup_32)
movl pa(initial_stack),%ecx
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -156,7 +156,7 @@ ENTRY(startup_32)
jmp *%eax
.Lbad_subarch:
-WEAK(xen_entry)
+SYM_INNER_LABEL_ALIGN(xen_entry, SYM_L_WEAK)
/* Unknown implementation; there's really
nothing we can do at this point. */
ud2a
@@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4
#else
jmp .Ldefault_entry
#endif /* CONFIG_PARAVIRT */
+SYM_CODE_END(startup_32)
#ifdef CONFIG_HOTPLUG_CPU
/*
@@ -179,12 +180,12 @@ num_subarch_entries = (. - subarch_entries) / 4
* up already except stack. We just set up stack here. Then call
* start_secondary().
*/
-ENTRY(start_cpu0)
+SYM_FUNC_START(start_cpu0)
movl initial_stack, %ecx
movl %ecx, %esp
call *(initial_code)
1: jmp 1b
-ENDPROC(start_cpu0)
+SYM_FUNC_END(start_cpu0)
#endif
/*
@@ -195,7 +196,7 @@ ENDPROC(start_cpu0)
* If cpu hotplug is not supported then this code can go in init section
* which will be freed later
*/
-ENTRY(startup_32_smp)
+SYM_FUNC_START(startup_32_smp)
cld
movl $(__BOOT_DS),%eax
movl %eax,%ds
@@ -362,7 +363,7 @@ ENTRY(startup_32_smp)
call *(initial_code)
1: jmp 1b
-ENDPROC(startup_32_smp)
+SYM_FUNC_END(startup_32_smp)
#include "verify_cpu.S"
@@ -392,7 +393,7 @@ setup_once:
andl $0,setup_once_ref /* Once is enough, thanks */
ret
-ENTRY(early_idt_handler_array)
+SYM_FUNC_START(early_idt_handler_array)
# 36(%esp) %eflags
# 32(%esp) %cs
# 28(%esp) %eip
@@ -407,9 +408,9 @@ ENTRY(early_idt_handler_array)
i = i + 1
.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
.endr
-ENDPROC(early_idt_handler_array)
+SYM_FUNC_END(early_idt_handler_array)
-early_idt_handler_common:
+SYM_CODE_START_LOCAL(early_idt_handler_common)
/*
* The stack is the hardware frame, an error code or zero, and the
* vector number.
@@ -460,10 +461,10 @@ early_idt_handler_common:
decl %ss:early_recursion_flag
addl $4, %esp /* pop pt_regs->orig_ax */
iret
-ENDPROC(early_idt_handler_common)
+SYM_CODE_END(early_idt_handler_common)
/* This is the default interrupt "handler" :-) */
-ENTRY(early_ignore_irq)
+SYM_FUNC_START(early_ignore_irq)
cld
#ifdef CONFIG_PRINTK
pushl %eax
@@ -498,19 +499,16 @@ ENTRY(early_ignore_irq)
hlt_loop:
hlt
jmp hlt_loop
-ENDPROC(early_ignore_irq)
+SYM_FUNC_END(early_ignore_irq)
__INITDATA
.align 4
-GLOBAL(early_recursion_flag)
- .long 0
+SYM_DATA(early_recursion_flag, .long 0)
__REFDATA
.align 4
-ENTRY(initial_code)
- .long i386_start_kernel
-ENTRY(setup_once_ref)
- .long setup_once
+SYM_DATA(initial_code, .long i386_start_kernel)
+SYM_DATA(setup_once_ref, .long setup_once)
#ifdef CONFIG_PAGE_TABLE_ISOLATION
#define PGD_ALIGN (2 * PAGE_SIZE)
@@ -553,7 +551,7 @@ EXPORT_SYMBOL(empty_zero_page)
__PAGE_ALIGNED_DATA
/* Page-aligned for the benefit of paravirt? */
.align PGD_ALIGN
-ENTRY(initial_page_table)
+SYM_DATA_START(initial_page_table)
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
# if KPMDS == 3
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0
@@ -581,17 +579,18 @@ ENTRY(initial_page_table)
.fill 1024, 4, 0
#endif
+SYM_DATA_END(initial_page_table)
#endif
.data
.balign 4
-ENTRY(initial_stack)
- /*
- * The SIZEOF_PTREGS gap is a convention which helps the in-kernel
- * unwinder reliably detect the end of the stack.
- */
- .long init_thread_union + THREAD_SIZE - SIZEOF_PTREGS - \
- TOP_OF_KERNEL_STACK_PADDING;
+/*
+ * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder
+ * reliably detect the end of the stack.
+ */
+SYM_DATA(initial_stack,
+ .long init_thread_union + THREAD_SIZE -
+ SIZEOF_PTREGS - TOP_OF_KERNEL_STACK_PADDING)
__INITRODATA
int_msg:
@@ -607,27 +606,28 @@ int_msg:
*/
.data
-.globl boot_gdt_descr
-
ALIGN
# early boot GDT descriptor (must use 1:1 address mapping)
.word 0 # 32 bit align gdt_desc.address
-boot_gdt_descr:
+SYM_DATA_START_LOCAL(boot_gdt_descr)
.word __BOOT_DS+7
.long boot_gdt - __PAGE_OFFSET
+SYM_DATA_END(boot_gdt_descr)
# boot GDT descriptor (later on used by CPU#0):
.word 0 # 32 bit align gdt_desc.address
-ENTRY(early_gdt_descr)
+SYM_DATA_START(early_gdt_descr)
.word GDT_ENTRIES*8-1
.long gdt_page /* Overwritten for secondary CPUs */
+SYM_DATA_END(early_gdt_descr)
/*
* The boot_gdt must mirror the equivalent in setup.S and is
* used only for booting.
*/
.align L1_CACHE_BYTES
-ENTRY(boot_gdt)
+SYM_DATA_START(boot_gdt)
.fill GDT_ENTRY_BOOT_CS,8,0
.quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
.quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
+SYM_DATA_END(boot_gdt)