diff options
Diffstat (limited to 'arch/x86/entry/entry_64.S')
-rw-r--r-- | arch/x86/entry/entry_64.S | 34 |
1 files changed, 14 insertions, 20 deletions
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 044d18ebc43c..a9a8027a6c0e 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -36,7 +36,6 @@ #include <asm/smap.h> #include <asm/pgtable_types.h> #include <asm/export.h> -#include <asm/frame.h> #include <linux/err.h> .code64 @@ -212,7 +211,7 @@ entry_SYSCALL_64_fastpath: * If we see that no exit work is required (which we are required * to check with IRQs off), then we can go straight to SYSRET64. */ - DISABLE_INTERRUPTS(CLBR_NONE) + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF movq PER_CPU_VAR(current_task), %r11 testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) @@ -233,7 +232,7 @@ entry_SYSCALL_64_fastpath: * raise(3) will trigger this, for example. IRQs are off. */ TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) + ENABLE_INTERRUPTS(CLBR_ANY) SAVE_EXTRA_REGS movq %rsp, %rdi call syscall_return_slowpath /* returns with IRQs disabled */ @@ -265,12 +264,10 @@ return_from_SYSCALL_64: * * If width of "canonical tail" ever becomes variable, this will need * to be updated to remain correct on both old and new CPUs. + * + * Change top bits to match most significant bit (47th or 56th bit + * depending on paging mode) in the address. */ - .ifne __VIRTUAL_MASK_SHIFT - 47 - .error "virtual address width changed -- SYSRET checks need update" - .endif - - /* Change top 16 bits to be the sign-extension of 47th bit */ shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx @@ -343,7 +340,7 @@ ENTRY(stub_ptregs_64) * Called from fast path -- disable IRQs again, pop return address * and jump to slow path */ - DISABLE_INTERRUPTS(CLBR_NONE) + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF popq %rax jmp entry_SYSCALL64_slow_path @@ -409,19 +406,17 @@ END(__switch_to_asm) * r12: kernel thread arg */ ENTRY(ret_from_fork) - FRAME_BEGIN /* help unwinder find end of stack */ movq %rax, %rdi - call schedule_tail /* rdi: 'prev' task parameter */ + call schedule_tail /* rdi: 'prev' task parameter */ - testq %rbx, %rbx /* from kernel_thread? */ - jnz 1f /* kernel threads are uncommon */ + testq %rbx, %rbx /* from kernel_thread? */ + jnz 1f /* kernel threads are uncommon */ 2: - leaq FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */ + movq %rsp, %rdi call syscall_return_slowpath /* returns with IRQs disabled */ TRACE_IRQS_ON /* user mode is traced as IRQS on */ SWAPGS - FRAME_END jmp restore_regs_and_iret 1: @@ -518,7 +513,7 @@ common_interrupt: interrupt do_IRQ /* 0(%rsp): old RSP */ ret_from_intr: - DISABLE_INTERRUPTS(CLBR_NONE) + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF decl PER_CPU_VAR(irq_count) @@ -1051,7 +1046,7 @@ END(paranoid_entry) * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ ENTRY(paranoid_exit) - DISABLE_INTERRUPTS(CLBR_NONE) + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF_DEBUG testl %ebx, %ebx /* swapgs needed? */ jnz paranoid_exit_no_swapgs @@ -1156,10 +1151,9 @@ END(error_entry) * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode */ ENTRY(error_exit) - movl %ebx, %eax - DISABLE_INTERRUPTS(CLBR_NONE) + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF - testl %eax, %eax + testl %ebx, %ebx jnz retint_kernel jmp retint_user END(error_exit) |