summaryrefslogtreecommitdiff
path: root/arch/sparc/include/asm/switch_to_64.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include/asm/switch_to_64.h')
-rw-r--r--arch/sparc/include/asm/switch_to_64.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h
index 10e76332dc99..d93963ff7caa 100644
--- a/arch/sparc/include/asm/switch_to_64.h
+++ b/arch/sparc/include/asm/switch_to_64.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __SPARC64_SWITCH_TO_64_H
#define __SPARC64_SWITCH_TO_64_H
@@ -14,15 +15,13 @@ do { \
* for l0/l1. It will use one for 'next' and the other to hold
* the output value of 'last'. 'next' is not referenced again
* past the invocation of switch_to in the scheduler, so we need
- * not preserve it's value. Hairy, but it lets us remove 2 loads
+ * not preserve its value. Hairy, but it lets us remove 2 loads
* and 2 stores in this critical code path. -DaveM
*/
#define switch_to(prev, next, last) \
do { save_and_clear_fpu(); \
- /* If you are tempted to conditionalize the following */ \
- /* so that ASI is only written if it changes, think again. */ \
__asm__ __volatile__("wr %%g0, %0, %%asi" \
- : : "r" (task_thread_info(next)->current_ds));\
+ : : "r" (ASI_AIUS)); \
trap_block[current_thread_info()->cpu].thread = \
task_thread_info(next); \
__asm__ __volatile__( \
@@ -66,6 +65,7 @@ do { save_and_clear_fpu(); \
} while(0)
void synchronize_user_stack(void);
-void fault_in_user_windows(void);
+struct pt_regs;
+void fault_in_user_windows(struct pt_regs *);
#endif /* __SPARC64_SWITCH_TO_64_H */