diff options
Diffstat (limited to 'arch/alpha/kernel/entry.S')
-rw-r--r-- | arch/alpha/kernel/entry.S | 187 |
1 files changed, 92 insertions, 95 deletions
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index 2e09248f8324..f4d41b4538c2 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -15,10 +15,6 @@ .set noat .cfi_sections .debug_frame -/* Stack offsets. */ -#define SP_OFF 184 -#define SWITCH_STACK_SIZE 320 - .macro CFI_START_OSF_FRAME func .align 4 .globl \func @@ -159,7 +155,6 @@ .cfi_rel_offset $13, 32 .cfi_rel_offset $14, 40 .cfi_rel_offset $15, 48 - /* We don't really care about the FP registers for debugging. */ .endm .macro UNDO_SWITCH_STACK @@ -199,8 +194,8 @@ CFI_END_OSF_FRAME entArith CFI_START_OSF_FRAME entMM SAVE_ALL /* save $9 - $15 so the inline exception code can manipulate them. */ - subq $sp, 56, $sp - .cfi_adjust_cfa_offset 56 + subq $sp, 64, $sp + .cfi_adjust_cfa_offset 64 stq $9, 0($sp) stq $10, 8($sp) stq $11, 16($sp) @@ -215,7 +210,7 @@ CFI_START_OSF_FRAME entMM .cfi_rel_offset $13, 32 .cfi_rel_offset $14, 40 .cfi_rel_offset $15, 48 - addq $sp, 56, $19 + addq $sp, 64, $19 /* handle the fault */ lda $8, 0x3fff bic $sp, $8, $8 @@ -228,7 +223,7 @@ CFI_START_OSF_FRAME entMM ldq $13, 32($sp) ldq $14, 40($sp) ldq $15, 48($sp) - addq $sp, 56, $sp + addq $sp, 64, $sp .cfi_restore $9 .cfi_restore $10 .cfi_restore $11 @@ -236,7 +231,7 @@ CFI_START_OSF_FRAME entMM .cfi_restore $13 .cfi_restore $14 .cfi_restore $15 - .cfi_adjust_cfa_offset -56 + .cfi_adjust_cfa_offset -64 /* finish up the syscall as normal. */ br ret_from_sys_call CFI_END_OSF_FRAME entMM @@ -383,8 +378,8 @@ entUnaUser: .cfi_restore $0 .cfi_adjust_cfa_offset -256 SAVE_ALL /* setup normal kernel stack */ - lda $sp, -56($sp) - .cfi_adjust_cfa_offset 56 + lda $sp, -64($sp) + .cfi_adjust_cfa_offset 64 stq $9, 0($sp) stq $10, 8($sp) stq $11, 16($sp) @@ -400,7 +395,7 @@ entUnaUser: .cfi_rel_offset $14, 40 .cfi_rel_offset $15, 48 lda $8, 0x3fff - addq $sp, 56, $19 + addq $sp, 64, $19 bic $sp, $8, $8 jsr $26, do_entUnaUser ldq $9, 0($sp) @@ -410,7 +405,7 @@ entUnaUser: ldq $13, 32($sp) ldq $14, 40($sp) ldq $15, 48($sp) - lda $sp, 56($sp) + lda $sp, 64($sp) .cfi_restore $9 .cfi_restore $10 .cfi_restore $11 @@ -418,7 +413,7 @@ entUnaUser: .cfi_restore $13 .cfi_restore $14 .cfi_restore $15 - .cfi_adjust_cfa_offset -56 + .cfi_adjust_cfa_offset -64 br ret_from_sys_call CFI_END_OSF_FRAME entUna @@ -454,7 +449,7 @@ entSys: SAVE_ALL lda $8, 0x3fff bic $sp, $8, $8 - lda $4, NR_SYSCALLS($31) + lda $4, NR_syscalls($31) stq $16, SP_OFF+24($sp) lda $5, sys_call_table lda $27, sys_ni_syscall @@ -469,13 +464,16 @@ entSys: #ifdef CONFIG_AUDITSYSCALL lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT and $3, $6, $3 -#endif bne $3, strace +#else + blbs $3, strace /* check for SYSCALL_TRACE in disguise */ +#endif beq $4, 1f ldq $27, 0($5) 1: jsr $26, ($27), sys_ni_syscall ldgp $gp, 0($26) blt $0, $syscall_error /* the call failed */ +$ret_success: stq $0, 0($sp) stq $31, 72($sp) /* a3=0 => no error */ @@ -495,6 +493,10 @@ ret_to_user: and $17, _TIF_WORK_MASK, $2 bne $2, work_pending restore_all: + ldl $2, TI_STATUS($8) + and $2, TS_SAVED_FP | TS_RESTORE_FP, $3 + bne $3, restore_fpu +restore_other: .cfi_remember_state RESTORE_ALL call_pal PAL_rti @@ -503,7 +505,7 @@ ret_to_kernel: .cfi_restore_state lda $16, 7 call_pal PAL_swpipl - br restore_all + br restore_other .align 3 $syscall_error: @@ -525,11 +527,6 @@ $syscall_error: stq $1, 72($sp) /* a3 for return */ br ret_from_sys_call -$ret_success: - stq $0, 0($sp) - stq $31, 72($sp) /* a3=0 => no error */ - br ret_from_sys_call - /* * Do all cleanup when returning from all interrupts and system calls. * @@ -544,7 +541,7 @@ $ret_success: .align 4 .type work_pending, @function work_pending: - and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2 + and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL, $2 bne $2, $work_notifysig $work_resched: @@ -572,6 +569,14 @@ $work_notifysig: .type strace, @function strace: /* set up signal stack, call syscall_trace */ + // NB: if anyone adds preemption, this block will need to be protected + ldl $1, TI_STATUS($8) + and $1, TS_SAVED_FP, $3 + or $1, TS_SAVED_FP, $2 + bne $3, 1f + stl $2, TI_STATUS($8) + bsr $26, __save_fpu +1: DO_SWITCH_STACK jsr $26, syscall_trace_enter /* returns the syscall number */ UNDO_SWITCH_STACK @@ -585,7 +590,7 @@ strace: ldq $21, 88($sp) /* get the system call pointer.. */ - lda $1, NR_SYSCALLS($31) + lda $1, NR_syscalls($31) lda $2, sys_call_table lda $27, sys_ni_syscall cmpult $0, $1, $1 @@ -598,8 +603,8 @@ ret_from_straced: /* check return.. */ blt $0, $strace_error /* the call failed */ - stq $31, 72($sp) /* a3=0 => no error */ $strace_success: + stq $31, 72($sp) /* a3=0 => no error */ stq $0, 0($sp) /* save return value */ DO_SWITCH_STACK @@ -651,40 +656,6 @@ do_switch_stack: stq $14, 40($sp) stq $15, 48($sp) stq $26, 56($sp) - stt $f0, 64($sp) - stt $f1, 72($sp) - stt $f2, 80($sp) - stt $f3, 88($sp) - stt $f4, 96($sp) - stt $f5, 104($sp) - stt $f6, 112($sp) - stt $f7, 120($sp) - stt $f8, 128($sp) - stt $f9, 136($sp) - stt $f10, 144($sp) - stt $f11, 152($sp) - stt $f12, 160($sp) - stt $f13, 168($sp) - stt $f14, 176($sp) - stt $f15, 184($sp) - stt $f16, 192($sp) - stt $f17, 200($sp) - stt $f18, 208($sp) - stt $f19, 216($sp) - stt $f20, 224($sp) - stt $f21, 232($sp) - stt $f22, 240($sp) - stt $f23, 248($sp) - stt $f24, 256($sp) - stt $f25, 264($sp) - stt $f26, 272($sp) - stt $f27, 280($sp) - mf_fpcr $f0 # get fpcr - stt $f28, 288($sp) - stt $f29, 296($sp) - stt $f30, 304($sp) - stt $f0, 312($sp) # save fpcr in slot of $f31 - ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state. ret $31, ($1), 1 .cfi_endproc .size do_switch_stack, .-do_switch_stack @@ -703,54 +674,71 @@ undo_switch_stack: ldq $14, 40($sp) ldq $15, 48($sp) ldq $26, 56($sp) - ldt $f30, 312($sp) # get saved fpcr - ldt $f0, 64($sp) - ldt $f1, 72($sp) - ldt $f2, 80($sp) - ldt $f3, 88($sp) - mt_fpcr $f30 # install saved fpcr - ldt $f4, 96($sp) - ldt $f5, 104($sp) - ldt $f6, 112($sp) - ldt $f7, 120($sp) - ldt $f8, 128($sp) - ldt $f9, 136($sp) - ldt $f10, 144($sp) - ldt $f11, 152($sp) - ldt $f12, 160($sp) - ldt $f13, 168($sp) - ldt $f14, 176($sp) - ldt $f15, 184($sp) - ldt $f16, 192($sp) - ldt $f17, 200($sp) - ldt $f18, 208($sp) - ldt $f19, 216($sp) - ldt $f20, 224($sp) - ldt $f21, 232($sp) - ldt $f22, 240($sp) - ldt $f23, 248($sp) - ldt $f24, 256($sp) - ldt $f25, 264($sp) - ldt $f26, 272($sp) - ldt $f27, 280($sp) - ldt $f28, 288($sp) - ldt $f29, 296($sp) - ldt $f30, 304($sp) lda $sp, SWITCH_STACK_SIZE($sp) ret $31, ($1), 1 .cfi_endproc .size undo_switch_stack, .-undo_switch_stack + +#define FR(n) n * 8 + TI_FP($8) + .align 4 + .globl __save_fpu + .type __save_fpu, @function +__save_fpu: +#define V(n) stt $f##n, FR(n) + V( 0); V( 1); V( 2); V( 3) + V( 4); V( 5); V( 6); V( 7) + V( 8); V( 9); V(10); V(11) + V(12); V(13); V(14); V(15) + V(16); V(17); V(18); V(19) + V(20); V(21); V(22); V(23) + V(24); V(25); V(26); V(27) + mf_fpcr $f0 # get fpcr + V(28); V(29); V(30) + stt $f0, FR(31) # save fpcr in slot of $f31 + ldt $f0, FR(0) # don't let "__save_fpu" change fp state. + ret +#undef V + .size __save_fpu, .-__save_fpu + + .align 4 +restore_fpu: + and $3, TS_RESTORE_FP, $3 + bic $2, TS_SAVED_FP | TS_RESTORE_FP, $2 + beq $3, 1f +#define V(n) ldt $f##n, FR(n) + ldt $f30, FR(31) # get saved fpcr + V( 0); V( 1); V( 2); V( 3) + mt_fpcr $f30 # install saved fpcr + V( 4); V( 5); V( 6); V( 7) + V( 8); V( 9); V(10); V(11) + V(12); V(13); V(14); V(15) + V(16); V(17); V(18); V(19) + V(20); V(21); V(22); V(23) + V(24); V(25); V(26); V(27) + V(28); V(29); V(30) +1: stl $2, TI_STATUS($8) + br restore_other +#undef V + /* * The meat of the context switch code. */ - .align 4 .globl alpha_switch_to .type alpha_switch_to, @function .cfi_startproc alpha_switch_to: DO_SWITCH_STACK + ldl $1, TI_STATUS($8) + and $1, TS_RESTORE_FP, $3 + bne $3, 1f + or $1, TS_RESTORE_FP | TS_SAVED_FP, $2 + and $1, TS_SAVED_FP, $3 + stl $2, TI_STATUS($8) + bne $3, 1f + bsr $26, __save_fpu +1: call_pal PAL_swpctx lda $8, 0x3fff UNDO_SWITCH_STACK @@ -768,7 +756,7 @@ alpha_switch_to: .align 4 .ent ret_from_fork ret_from_fork: - lda $26, ret_from_sys_call + lda $26, ret_to_user mov $17, $16 jmp $31, schedule_tail .end ret_from_fork @@ -801,6 +789,14 @@ ret_from_kernel_thread: alpha_\name: .prologue 0 bsr $1, do_switch_stack + // NB: if anyone adds preemption, this block will need to be protected + ldl $1, TI_STATUS($8) + and $1, TS_SAVED_FP, $3 + or $1, TS_SAVED_FP, $2 + bne $3, 1f + stl $2, TI_STATUS($8) + bsr $26, __save_fpu +1: jsr $26, sys_\name ldq $26, 56($sp) lda $sp, SWITCH_STACK_SIZE($sp) @@ -811,6 +807,7 @@ alpha_\name: fork_like fork fork_like vfork fork_like clone +fork_like clone3 .macro sigreturn_like name .align 4 |