summaryrefslogtreecommitdiff
path: root/arch/nds32/kernel/ex-exit.S
blob: 6a2966c2d8c8f4e2af2dad5ede211de8c8f046fd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation

#include <linux/linkage.h>
#include <asm/unistd.h>
#include <asm/assembler.h>
#include <asm/nds32.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/current.h>
#include <asm/fpu.h>



#ifdef CONFIG_HWZOL
	.macro pop_zol
	mtusr	$r14, $LB
	mtusr	$r15, $LE
	mtusr	$r16, $LC
	.endm
#endif

	.macro	restore_user_regs_first
	setgie.d
	isb
#if defined(CONFIG_FPU)
	addi    $sp, $sp, OSP_OFFSET
	lmw.adm $r12, [$sp], $r25, #0x0
	sethi   $p0, hi20(has_fpu)
	lbsi 	$p0, [$p0+lo12(has_fpu)]
	beqz	$p0, 2f
	mtsr    $r25, $FUCOP_CTL
2:
#else
	addi	$sp, $sp, FUCOP_CTL_OFFSET
	lmw.adm $r12, [$sp], $r24, #0x0
#endif
	mtsr	$r12, $SP_USR
	mtsr	$r13, $IPC
#ifdef CONFIG_HWZOL
	pop_zol
#endif
	mtsr	$r19, $PSW
	mtsr	$r20, $IPSW
	mtsr    $r21, $P_IPSW
	mtsr	$r22, $P_IPC
	mtsr	$r23, $P_P0
	mtsr	$r24, $P_P1
	lmw.adm $sp, [$sp], $sp, #0xe
	.endm

	.macro	restore_user_regs_last
	pop	$p0
	cmovn	$sp, $p0, $p0

	iret
	nop

	.endm

	.macro	restore_user_regs
	restore_user_regs_first
	lmw.adm $r0, [$sp], $r25, #0x0
	addi	$sp, $sp, OSP_OFFSET
	restore_user_regs_last
	.endm

	.macro	fast_restore_user_regs
	restore_user_regs_first
	lmw.adm $r1, [$sp], $r25, #0x0
	addi	$sp, $sp, OSP_OFFSET-4
	restore_user_regs_last
	.endm

#ifdef CONFIG_PREEMPTION
	.macro	preempt_stop
	.endm
#else
	.macro	preempt_stop
	setgie.d
	isb
	.endm
#define	resume_kernel	no_work_pending
#endif

ENTRY(ret_from_exception)
	preempt_stop
ENTRY(ret_from_intr)

/*
 * judge Kernel or user mode
 *
 */
	lwi	$p0, [$sp+(#IPSW_OFFSET)]	! Check if in nested interrupt
	andi	$p0, $p0, #PSW_mskINTL
	bnez	$p0, resume_kernel		! done with iret
	j	resume_userspace


/*
 * This is the fast syscall return path.  We do as little as
 * possible here, and this includes saving $r0 back into the SVC
 * stack.
 * fixed: tsk - $r25, syscall # - $r7, syscall table pointer - $r8
 */
ENTRY(ret_fast_syscall)
	gie_disable
	lwi	$r1, [tsk+#TSK_TI_FLAGS]
	andi	$p1, $r1, #_TIF_WORK_MASK
	bnez	$p1, fast_work_pending
	fast_restore_user_regs			! iret

/*
 * Ok, we need to do extra processing,
 * enter the slow path returning from syscall, while pending work.
 */
fast_work_pending:
	swi	$r0, [$sp+(#R0_OFFSET)]		! what is different from ret_from_exception
work_pending:
	andi	$p1, $r1, #_TIF_NEED_RESCHED
	bnez	$p1, work_resched

	andi	$p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME
	beqz	$p1, no_work_pending

	move	$r0, $sp			! 'regs'
	gie_enable
	bal	do_notify_resume
	b       ret_slow_syscall
work_resched:
	bal	schedule			! path, return to user mode

/*
 * "slow" syscall return path.
 */
ENTRY(resume_userspace)
ENTRY(ret_slow_syscall)
	gie_disable
	lwi	$p0, [$sp+(#IPSW_OFFSET)]	! Check if in nested interrupt
	andi	$p0, $p0, #PSW_mskINTL
	bnez	$p0, no_work_pending		! done with iret
	lwi	$r1, [tsk+#TSK_TI_FLAGS]
	andi	$p1, $r1, #_TIF_WORK_MASK
	bnez	$p1, work_pending		! handle work_resched, sig_pend

no_work_pending:
#ifdef CONFIG_TRACE_IRQFLAGS
	lwi	$p0, [$sp+(#IPSW_OFFSET)]
	andi	$p0, $p0, #0x1
	la	$r10, __trace_hardirqs_off
	la	$r9, __trace_hardirqs_on
	cmovz	$r9, $p0, $r10
	jral	$r9
#endif
	restore_user_regs			! return from iret


/*
 * preemptive kernel
 */
#ifdef CONFIG_PREEMPTION
resume_kernel:
	gie_disable
	lwi	$t0, [tsk+#TSK_TI_PREEMPT]
	bnez	$t0, no_work_pending

	lwi	$t0, [tsk+#TSK_TI_FLAGS]
	andi	$p1, $t0, #_TIF_NEED_RESCHED
	beqz	$p1, no_work_pending

	lwi	$t0, [$sp+(#IPSW_OFFSET)]	! Interrupts off?
	andi	$t0, $t0, #1
	beqz	$t0, no_work_pending

	jal	preempt_schedule_irq
	b	no_work_pending
#endif

/*
 * This is how we return from a fork.
 */
ENTRY(ret_from_fork)
	bal	schedule_tail
	beqz	$r6, 1f				! r6 stores fn for kernel thread
	move	$r0, $r7			! prepare kernel thread arg
	jral	$r6
1:
	lwi	$r1, [tsk+#TSK_TI_FLAGS]		! check for syscall tracing
	andi	$p1, $r1, #_TIF_WORK_SYSCALL_LEAVE	! are we tracing syscalls?
	beqz	$p1, ret_slow_syscall
	move    $r0, $sp
	bal	syscall_trace_leave
	b	ret_slow_syscall