summaryrefslogtreecommitdiff
path: root/arch/xtensa
diff options
context:
space:
mode:
authorChris Zankel <chris@zankel.net>2008-02-12 13:17:07 -0800
committerChris Zankel <chris@zankel.net>2008-02-13 17:41:43 -0800
commitc658eac628aa8df040dfe614556d95e6da3a9ffb (patch)
treee2211e1d5c894c29e92d4c744f504b38410efe41 /arch/xtensa
parent71d28e6c285548106f551fde13ca6d589433d843 (diff)
[XTENSA] Add support for configurable registers and coprocessors
The Xtensa architecture allows to define custom instructions and registers. Registers that are bound to a coprocessor are only accessible if the corresponding enable bit is set, which allows to implement a 'lazy' context switch mechanism. Other registers needs to be saved and restore at the time of the context switch or during interrupt handling. This patch adds support for these additional states: - save and restore registers that are used by the compiler upon interrupt entry and exit. - context switch additional registers unbound to any coprocessor - 'lazy' context switch of registers bound to a coprocessor - ptrace interface to provide access to additional registers - update configuration files in include/asm-xtensa/variant-fsf Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'arch/xtensa')
-rw-r--r--arch/xtensa/kernel/asm-offsets.c16
-rw-r--r--arch/xtensa/kernel/coprocessor.S443
-rw-r--r--arch/xtensa/kernel/entry.S295
-rw-r--r--arch/xtensa/kernel/process.c261
-rw-r--r--arch/xtensa/kernel/ptrace.c347
-rw-r--r--arch/xtensa/kernel/signal.c65
-rw-r--r--arch/xtensa/kernel/traps.c16
7 files changed, 713 insertions, 730 deletions
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index 5d9ef515ca1e..ef63adadf7f4 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -63,6 +63,8 @@ int main(void)
DEFINE(PT_SIZE, sizeof(struct pt_regs));
DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
+ DEFINE(PT_XTREGS_OPT, offsetof(struct pt_regs, xtregs_opt));
+ DEFINE(XTREGS_OPT_SIZE, sizeof(xtregs_opt_t));
/* struct task_struct */
DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
@@ -76,7 +78,19 @@ int main(void)
/* struct thread_info (offset from start_struct) */
DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
- DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save));
+ DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
+#if XTENSA_HAVE_COPROCESSORS
+ DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp));
+ DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp));
+ DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp));
+ DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp));
+ DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp));
+ DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp));
+ DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp));
+ DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp));
+#endif
+ DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
+ DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
/* struct mm_struct */
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 01bcb9fcfcbd..2bc1e145c0a4 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -8,193 +8,328 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2003 - 2005 Tensilica Inc.
- *
- * Marc Gauthier <marc@tensilica.com> <marc@alumni.uwaterloo.ca>
+ * Copyright (C) 2003 - 2007 Tensilica Inc.
*/
-/*
- * This module contains a table that describes the layout of the various
- * custom registers and states associated with each coprocessor, as well
- * as those not associated with any coprocessor ("extra state").
- * This table is included with core dumps and is available via the ptrace
- * interface, allowing the layout of such register/state information to
- * be modified in the kernel without affecting the debugger. Each
- * register or state is identified using a 32-bit "libdb target number"
- * assigned when the Xtensa processor is generated.
- */
#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
#include <asm/processor.h>
+#include <asm/coprocessor.h>
+#include <asm/thread_info.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/signal.h>
+#include <asm/tlbflush.h>
-#if XCHAL_HAVE_CP
+/*
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: dispatch table
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: a3
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
-#define CP_LAST ((XCHAL_CP_MAX - 1) * COPROCESSOR_INFO_SIZE)
+/* IO protection is currently unsupported. */
-ENTRY(release_coprocessors)
+ENTRY(fast_io_protect)
+ wsr a0, EXCSAVE_1
+ movi a0, unrecoverable_exception
+ callx0 a0
- entry a1, 16
- # a2: task
- movi a3, 1 << XCHAL_CP_MAX # a3: coprocessor-bit
- movi a4, coprocessor_info+CP_LAST # a4: owner-table
- # a5: tmp
- movi a6, 0 # a6: 0
- rsil a7, LOCKLEVEL # a7: PS
+#if XTENSA_HAVE_COPROCESSORS
-1: /* Check if task is coprocessor owner of coprocessor[i]. */
+/*
+ * Macros for lazy context switch.
+ */
- l32i a5, a4, COPROCESSOR_INFO_OWNER
- srli a3, a3, 1
- beqz a3, 1f
- addi a4, a4, -8
- beq a2, a5, 1b
+#define SAVE_CP_REGS(x) \
+ .align 4; \
+ .Lsave_cp_regs_cp##x: \
+ .if XTENSA_HAVE_COPROCESSOR(x); \
+ xchal_cp##x##_store a2 a4 a5 a6 a7; \
+ .endif; \
+ jx a0
- /* Found an entry: Clear entry CPENABLE bit to disable CP. */
+#define SAVE_CP_REGS_TAB(x) \
+ .if XTENSA_HAVE_COPROCESSOR(x); \
+ .long .Lsave_cp_regs_cp##x - .Lsave_cp_regs_jump_table; \
+ .else; \
+ .long 0; \
+ .endif; \
+ .long THREAD_XTREGS_CP##x
- rsr a5, CPENABLE
- s32i a6, a4, COPROCESSOR_INFO_OWNER
- xor a5, a3, a5
- wsr a5, CPENABLE
- bnez a3, 1b
+#define LOAD_CP_REGS(x) \
+ .align 4; \
+ .Lload_cp_regs_cp##x: \
+ .if XTENSA_HAVE_COPROCESSOR(x); \
+ xchal_cp##x##_load a2 a4 a5 a6 a7; \
+ .endif; \
+ jx a0
-1: wsr a7, PS
- rsync
- retw
+#define LOAD_CP_REGS_TAB(x) \
+ .if XTENSA_HAVE_COPROCESSOR(x); \
+ .long .Lload_cp_regs_cp##x - .Lload_cp_regs_jump_table; \
+ .else; \
+ .long 0; \
+ .endif; \
+ .long THREAD_XTREGS_CP##x
+ SAVE_CP_REGS(0)
+ SAVE_CP_REGS(1)
+ SAVE_CP_REGS(2)
+ SAVE_CP_REGS(3)
+ SAVE_CP_REGS(4)
+ SAVE_CP_REGS(5)
+ SAVE_CP_REGS(6)
+ SAVE_CP_REGS(7)
-ENTRY(disable_coprocessor)
- entry sp, 16
- rsil a7, LOCKLEVEL
- rsr a3, CPENABLE
- movi a4, 1
- ssl a2
- sll a4, a4
- and a4, a3, a4
- xor a3, a3, a4
- wsr a3, CPENABLE
- wsr a7, PS
- rsync
- retw
+ LOAD_CP_REGS(0)
+ LOAD_CP_REGS(1)
+ LOAD_CP_REGS(2)
+ LOAD_CP_REGS(3)
+ LOAD_CP_REGS(4)
+ LOAD_CP_REGS(5)
+ LOAD_CP_REGS(6)
+ LOAD_CP_REGS(7)
-ENTRY(enable_coprocessor)
- entry sp, 16
- rsil a7, LOCKLEVEL
- rsr a3, CPENABLE
- movi a4, 1
- ssl a2
- sll a4, a4
- or a3, a3, a4
- wsr a3, CPENABLE
- wsr a7, PS
- rsync
- retw
+ .align 4
+.Lsave_cp_regs_jump_table:
+ SAVE_CP_REGS_TAB(0)
+ SAVE_CP_REGS_TAB(1)
+ SAVE_CP_REGS_TAB(2)
+ SAVE_CP_REGS_TAB(3)
+ SAVE_CP_REGS_TAB(4)
+ SAVE_CP_REGS_TAB(5)
+ SAVE_CP_REGS_TAB(6)
+ SAVE_CP_REGS_TAB(7)
+.Lload_cp_regs_jump_table:
+ LOAD_CP_REGS_TAB(0)
+ LOAD_CP_REGS_TAB(1)
+ LOAD_CP_REGS_TAB(2)
+ LOAD_CP_REGS_TAB(3)
+ LOAD_CP_REGS_TAB(4)
+ LOAD_CP_REGS_TAB(5)
+ LOAD_CP_REGS_TAB(6)
+ LOAD_CP_REGS_TAB(7)
-ENTRY(save_coprocessor_extra)
- entry sp, 16
- xchal_extra_store_funcbody
- retw
+/*
+ * coprocessor_save(buffer, index)
+ * a2 a3
+ * coprocessor_load(buffer, index)
+ * a2 a3
+ *
+ * Save or load coprocessor registers for coprocessor 'index'.
+ * The register values are saved to or loaded from them 'buffer' address.
+ *
+ * Note that these functions don't update the coprocessor_owner information!
+ *
+ */
-ENTRY(restore_coprocessor_extra)
- entry sp, 16
- xchal_extra_load_funcbody
+ENTRY(coprocessor_save)
+ entry a1, 32
+ s32i a0, a1, 0
+ movi a0, .Lsave_cp_regs_jump_table
+ addx8 a3, a3, a0
+ l32i a3, a3, 0
+ beqz a3, 1f
+ add a0, a0, a3
+ callx0 a0
+1: l32i a0, a1, 0
retw
-ENTRY(save_coprocessor_registers)
- entry sp, 16
- xchal_cpi_store_funcbody
+ENTRY(coprocessor_load)
+ entry a1, 32
+ s32i a0, a1, 0
+ movi a0, .Lload_cp_regs_jump_table
+ addx4 a3, a3, a0
+ l32i a3, a3, 0
+ beqz a3, 1f
+ add a0, a0, a3
+ callx0 a0
+1: l32i a0, a1, 0
retw
-ENTRY(restore_coprocessor_registers)
- entry sp, 16
- xchal_cpi_load_funcbody
+/*
+ * coprocessor_flush(struct task_info*, index)
+ * a2 a3
+ * coprocessor_restore(struct task_info*, index)
+ * a2 a3
+ *
+ * Save or load coprocessor registers for coprocessor 'index'.
+ * The register values are saved to or loaded from the coprocessor area
+ * inside the task_info structure.
+ *
+ * Note that these functions don't update the coprocessor_owner information!
+ *
+ */
+
+
+ENTRY(coprocessor_flush)
+ entry a1, 32
+ s32i a0, a1, 0
+ movi a0, .Lsave_cp_regs_jump_table
+ addx8 a3, a3, a0
+ l32i a4, a3, 4
+ l32i a3, a3, 0
+ add a2, a2, a4
+ beqz a3, 1f
+ add a0, a0, a3
+ callx0 a0
+1: l32i a0, a1, 0
retw
+ENTRY(coprocessor_restore)
+ entry a1, 32
+ s32i a0, a1, 0
+ movi a0, .Lload_cp_regs_jump_table
+ addx4 a3, a3, a0
+ l32i a4, a3, 4
+ l32i a3, a3, 0
+ add a2, a2, a4
+ beqz a3, 1f
+ add a0, a0, a3
+ callx0 a0
+1: l32i a0, a1, 0
+ retw
/*
- * The Xtensa compile-time HAL (core.h) XCHAL_*_SA_CONTENTS_LIBDB macros
- * describe the contents of coprocessor & extra save areas in terms of
- * undefined CONTENTS_LIBDB_{SREG,UREG,REGF} macros. We define these
- * latter macros here; they expand into a table of the format we want.
- * The general format is:
+ * Entry condition:
*
- * CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum,
- * bitmask, rsv2, rsv3)
- * CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum,
- * bitmask, rsv2, rsv3)
- * CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index,
- * numentries, contentsize, regname_base,
- * regfile_name, rsv2, rsv3)
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: dispatch table
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: a3
*
- * For this table, we only care about the <libdbnum>, <offset> and <size>
- * fields.
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*/
-/* Map all XCHAL CONTENTS macros to the reg_entry asm macro defined below: */
-
-#define CONTENTS_LIBDB_SREG(libdbnum,offset,size,align,rsv1,name,sregnum, \
- bitmask, rsv2, rsv3) \
- reg_entry libdbnum, offset, size ;
-#define CONTENTS_LIBDB_UREG(libdbnum,offset,size,align,rsv1,name,uregnum, \
- bitmask, rsv2, rsv3) \
- reg_entry libdbnum, offset, size ;
-#define CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, \
- numentries, contentsize, regname_base, \
- regfile_name, rsv2, rsv3) \
- reg_entry libdbnum, offset, size ;
-
-/* A single table entry: */
- .macro reg_entry libdbnum, offset, size
- .ifne (__last_offset-(__last_group_offset+\offset))
- /* padding entry */
- .word (0xFC000000+__last_offset-(__last_group_offset+\offset))
- .endif
- .word \libdbnum /* actual entry */
- .set __last_offset, __last_group_offset+\offset+\size
- .endm /* reg_entry */
-
-
-/* Table entry that marks the beginning of a group (coprocessor or "extra"): */
- .macro reg_group cpnum, num_entries, align
- .set __last_group_offset, (__last_offset + \align- 1) & -\align
- .ifne \num_entries
- .word 0xFD000000+(\cpnum<<16)+\num_entries
- .endif
- .endm /* reg_group */
+ENTRY(fast_coprocessor_double)
+ wsr a0, EXCSAVE_1
+ movi a0, unrecoverable_exception
+ callx0 a0
-/*
- * Register info tables.
- */
- .section .rodata, "a"
- .globl _xtensa_reginfo_tables
- .globl _xtensa_reginfo_table_size
- .align 4
-_xtensa_reginfo_table_size:
- .word _xtensa_reginfo_table_end - _xtensa_reginfo_tables
-
-_xtensa_reginfo_tables:
- .set __last_offset, 0
- reg_group 0xFF, XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM, XCHAL_EXTRA_SA_ALIGN
- XCHAL_EXTRA_SA_CONTENTS_LIBDB
- reg_group 0, XCHAL_CP0_SA_CONTENTS_LIBDB_NUM, XCHAL_CP0_SA_ALIGN
- XCHAL_CP0_SA_CONTENTS_LIBDB
- reg_group 1, XCHAL_CP1_SA_CONTENTS_LIBDB_NUM, XCHAL_CP1_SA_ALIGN
- XCHAL_CP1_SA_CONTENTS_LIBDB
- reg_group 2, XCHAL_CP2_SA_CONTENTS_LIBDB_NUM, XCHAL_CP2_SA_ALIGN
- XCHAL_CP2_SA_CONTENTS_LIBDB
- reg_group 3, XCHAL_CP3_SA_CONTENTS_LIBDB_NUM, XCHAL_CP3_SA_ALIGN
- XCHAL_CP3_SA_CONTENTS_LIBDB
- reg_group 4, XCHAL_CP4_SA_CONTENTS_LIBDB_NUM, XCHAL_CP4_SA_ALIGN
- XCHAL_CP4_SA_CONTENTS_LIBDB
- reg_group 5, XCHAL_CP5_SA_CONTENTS_LIBDB_NUM, XCHAL_CP5_SA_ALIGN
- XCHAL_CP5_SA_CONTENTS_LIBDB
- reg_group 6, XCHAL_CP6_SA_CONTENTS_LIBDB_NUM, XCHAL_CP6_SA_ALIGN
- XCHAL_CP6_SA_CONTENTS_LIBDB
- reg_group 7, XCHAL_CP7_SA_CONTENTS_LIBDB_NUM, XCHAL_CP7_SA_ALIGN
- XCHAL_CP7_SA_CONTENTS_LIBDB
- .word 0xFC000000 /* invalid register number,marks end of table*/
-_xtensa_reginfo_table_end:
-#endif
+ENTRY(fast_coprocessor)
+
+ /* Save remaining registers a1-a3 and SAR */
+
+ xsr a3, EXCSAVE_1
+ s32i a3, a2, PT_AREG3
+ rsr a3, SAR
+ s32i a1, a2, PT_AREG1
+ s32i a3, a2, PT_SAR
+ mov a1, a2
+ rsr a2, DEPC
+ s32i a2, a1, PT_AREG2
+
+ /*
+ * The hal macros require up to 4 temporary registers. We use a3..a6.
+ */
+
+ s32i a4, a1, PT_AREG4
+ s32i a5, a1, PT_AREG5
+ s32i a6, a1, PT_AREG6
+
+ /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
+
+ rsr a3, EXCCAUSE
+ addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
+
+ /* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/
+
+ ssl a3 # SAR: 32 - coprocessor_number
+ movi a2, 1
+ rsr a0, CPENABLE
+ sll a2, a2
+ or a0, a0, a2
+ wsr a0, CPENABLE
+ rsync
+
+ /* Retrieve previous owner. (a3 still holds CP number) */
+
+ movi a0, coprocessor_owner # list of owners
+ addx4 a0, a3, a0 # entry for CP
+ l32i a4, a0, 0
+
+ beqz a4, 1f # skip 'save' if no previous owner
+
+ /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */
+
+ l32i a5, a4, THREAD_CPENABLE
+ xor a5, a5, a2 # (1 << cp-id) still in a2
+ s32i a5, a4, THREAD_CPENABLE
+
+ /*
+ * Get context save area and 'call' save routine.
+ * (a4 still holds previous owner (thread_info), a3 CP number)
+ */
+
+ movi a5, .Lsave_cp_regs_jump_table
+ movi a0, 2f # a0: 'return' address
+ addx8 a3, a3, a5 # a3: coprocessor number
+ l32i a2, a3, 4 # a2: xtregs offset
+ l32i a3, a3, 0 # a3: jump offset
+ add a2, a2, a4
+ add a4, a3, a5 # a4: address of save routine
+ jx a4
+
+ /* Note that only a0 and a1 were preserved. */
+
+2: rsr a3, EXCCAUSE
+ addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
+ movi a0, coprocessor_owner
+ addx4 a0, a3, a0
+
+ /* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */
+
+1: GET_THREAD_INFO (a4, a1)
+ s32i a4, a0, 0
+
+ /* Get context save area and 'call' load routine. */
+
+ movi a5, .Lload_cp_regs_jump_table
+ movi a0, 1f
+ addx8 a3, a3, a5
+ l32i a2, a3, 4 # a2: xtregs offset
+ l32i a3, a3, 0 # a3: jump offset
+ add a2, a2, a4
+ add a4, a3, a5
+ jx a4
+
+ /* Restore all registers and return from exception handler. */
+
+1: l32i a6, a1, PT_AREG6
+ l32i a5, a1, PT_AREG5
+ l32i a4, a1, PT_AREG4
+
+ l32i a0, a1, PT_SAR
+ l32i a3, a1, PT_AREG3
+ l32i a2, a1, PT_AREG2
+ wsr a0, SAR
+ l32i a0, a1, PT_AREG0
+ l32i a1, a1, PT_AREG1
+
+ rfe
+
+ .data
+ENTRY(coprocessor_owner)
+ .fill XCHAL_CP_MAX, 4, 0
+
+#endif /* XTENSA_HAVE_COPROCESSORS */
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index b51ddb0dcf28..24770b6a5e4c 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -25,6 +25,7 @@
#include <asm/page.h>
#include <asm/signal.h>
#include <asm/tlbflush.h>
+#include <asm/variant/tie-asm.h>
/* Unimplemented features. */
@@ -213,19 +214,7 @@ _user_exception:
/* We are back to the original stack pointer (a1) */
-2:
-#if XCHAL_EXTRA_SA_SIZE
-
- /* For user exceptions, save the extra state into the user's TCB.
- * Note: We must assume that xchal_extra_store_funcbody destroys a2..a15
- */
-
- GET_CURRENT(a2,a1)
- addi a2, a2, THREAD_CP_SAVE
- xchal_extra_store_funcbody
-#endif
-
- /* Now, jump to the common exception handler. */
+2: /* Now, jump to the common exception handler. */
j common_exception
@@ -381,6 +370,10 @@ common_exception:
s32i a2, a1, PT_LBEG
s32i a3, a1, PT_LEND
+ /* Save optional registers. */
+
+ save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
+
/* Go to second-level dispatcher. Set up parameters to pass to the
* exception handler and call the exception handler.
*/
@@ -452,22 +445,6 @@ common_exception_return:
4: /* a2 holds GET_CURRENT(a2,a1) */
-#if XCHAL_EXTRA_SA_SIZE
-
- /* For user exceptions, restore the extra state from the user's TCB. */
-
- /* Note: a2 still contains GET_CURRENT(a2,a1) */
- addi a2, a2, THREAD_CP_SAVE
- xchal_extra_load_funcbody
-
- /* We must assume that xchal_extra_store_funcbody destroys
- * registers a2..a15. FIXME, this list can eventually be
- * reduced once real register requirements of the macro are
- * finalized. */
-
-#endif /* XCHAL_EXTRA_SA_SIZE */
-
-
/* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
l32i a2, a1, PT_WINDOWBASE
@@ -614,6 +591,12 @@ kernel_exception_exit:
common_exception_exit:
+ /* Restore optional registers. */
+
+ load_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
+
+ /* Restore address registers. */
+
_bbsi.l a2, 1, 1f
l32i a4, a1, PT_AREG4
l32i a5, a1, PT_AREG5
@@ -1146,7 +1129,6 @@ CATCH
* excsave_1: a3
*
* Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
- * Note: We don't need to save a2 in depc (return value)
*/
ENTRY(fast_syscall_spill_registers)
@@ -1162,29 +1144,31 @@ ENTRY(fast_syscall_spill_registers)
rsr a0, SAR
xsr a3, EXCSAVE_1 # restore a3 and excsave_1
- s32i a0, a2, PT_AREG4 # store SAR to PT_AREG4
s32i a3, a2, PT_AREG3
+ s32i a4, a2, PT_AREG4
+ s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
/* The spill routine might clobber a7, a11, and a15. */
- s32i a7, a2, PT_AREG5
- s32i a11, a2, PT_AREG6
- s32i a15, a2, PT_AREG7
+ s32i a7, a2, PT_AREG7
+ s32i a11, a2, PT_AREG11
+ s32i a15, a2, PT_AREG15
- call0 _spill_registers # destroys a3, DEPC, and SAR
+ call0 _spill_registers # destroys a3, a4, and SAR
/* Advance PC, restore registers and SAR, and return from exception. */
- l32i a3, a2, PT_AREG4
+ l32i a3, a2, PT_AREG5
+ l32i a4, a2, PT_AREG4
l32i a0, a2, PT_AREG0
wsr a3, SAR
l32i a3, a2, PT_AREG3
/* Restore clobbered registers. */
- l32i a7, a2, PT_AREG5
- l32i a11, a2, PT_AREG6
- l32i a15, a2, PT_AREG7
+ l32i a7, a2, PT_AREG7
+ l32i a11, a2, PT_AREG11
+ l32i a15, a2, PT_AREG15
movi a2, 0
rfe
@@ -1257,9 +1241,9 @@ fast_syscall_spill_registers_fixup:
movi a3, exc_table
rsr a0, EXCCAUSE
- addx4 a0, a0, a3 # find entry in table
- l32i a0, a0, EXC_TABLE_FAST_USER # load handler
- jx a0
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_USER # load handler
+ jx a0
fast_syscall_spill_registers_fixup_return:
@@ -1297,7 +1281,7 @@ fast_syscall_spill_registers_fixup_return:
* This is not a real function. The following conditions must be met:
*
* - must be called with call0.
- * - uses DEPC, a3 and SAR.
+ * - uses a3, a4 and SAR.
* - the last 'valid' register of each frame are clobbered.
* - the caller must have registered a fixup handler
* (or be inside a critical section)
@@ -1309,41 +1293,39 @@ ENTRY(_spill_registers)
/*
* Rotate ws so that the current windowbase is at bit 0.
* Assume ws = xxxwww1yy (www1 current window frame).
- * Rotate ws right so that a2 = yyxxxwww1.
+ * Rotate ws right so that a4 = yyxxxwww1.
*/
- wsr a2, DEPC # preserve a2
- rsr a2, WINDOWBASE
+ rsr a4, WINDOWBASE
rsr a3, WINDOWSTART # a3 = xxxwww1yy
- ssr a2 # holds WB
- slli a2, a3, WSBITS
- or a3, a3, a2 # a3 = xxxwww1yyxxxwww1yy
+ ssr a4 # holds WB
+ slli a4, a3, WSBITS
+ or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy
srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
/* We are done if there are no more than the current register frame. */
extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
- movi a2, (1 << (WSBITS-1))
+ movi a4, (1 << (WSBITS-1))
_beqz a3, .Lnospill # only one active frame? jump
/* We want 1 at the top, so that we return to the current windowbase */
- or a3, a3, a2 # 1yyxxxwww
+ or a3, a3, a4 # 1yyxxxwww
/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
wsr a3, WINDOWSTART # save shifted windowstart
- neg a2, a3
- and a3, a2, a3 # first bit set from right: 000010000
+ neg a4, a3
+ and a3, a4, a3 # first bit set from right: 000010000
- ffs_ws a2, a3 # a2: shifts to skip empty frames
+ ffs_ws a4, a3 # a4: shifts to skip empty frames
movi a3, WSBITS
- sub a2, a3, a2 # WSBITS-a2:number of 0-bits from right
- ssr a2 # save in SAR for later.
+ sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right
+ ssr a4 # save in SAR for later.
rsr a3, WINDOWBASE
- add a3, a3, a2
- rsr a2, DEPC # restore a2
+ add a3, a3, a4
wsr a3, WINDOWBASE
rsync
@@ -1373,7 +1355,6 @@ ENTRY(_spill_registers)
j .Lc12c
.Lnospill:
- rsr a2, DEPC
ret
.Lloop: _bbsi.l a3, 1, .Lc4
@@ -1810,154 +1791,6 @@ ENTRY(fast_store_prohibited)
1: j _user_exception
-#if XCHAL_EXTRA_SA_SIZE
-
-#warning fast_coprocessor untested
-
-/*
- * Entry condition:
- *
- * a0: trashed, original value saved on stack (PT_AREG0)
- * a1: a1
- * a2: new stack pointer, original in DEPC
- * a3: dispatch table
- * depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
- *
- * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
- * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
- */
-
-ENTRY(fast_coprocessor_double)
- wsr a0, EXCSAVE_1
- movi a0, unrecoverable_exception
- callx0 a0
-
-ENTRY(fast_coprocessor)
-
- /* Fatal if we are in a double exception. */
-
- l32i a0, a2, PT_DEPC
- _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_coprocessor_double
-
- /* Save some registers a1, a3, a4, SAR */
-
- xsr a3, EXCSAVE_1
- s32i a3, a2, PT_AREG3
- rsr a3, SAR
- s32i a4, a2, PT_AREG4
- s32i a1, a2, PT_AREG1
- s32i a5, a1, PT_AREG5
- s32i a3, a2, PT_SAR
- mov a1, a2
-
- /* Currently, the HAL macros only guarantee saving a0 and a1.
- * These can and will be refined in the future, but for now,
- * just save the remaining registers of a2...a15.
- */
- s32i a6, a1, PT_AREG6
- s32i a7, a1, PT_AREG7
- s32i a8, a1, PT_AREG8
- s32i a9, a1, PT_AREG9
- s32i a10, a1, PT_AREG10
- s32i a11, a1, PT_AREG11
- s32i a12, a1, PT_AREG12
- s32i a13, a1, PT_AREG13
- s32i a14, a1, PT_AREG14
- s32i a15, a1, PT_AREG15
-
- /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
-
- rsr a0, EXCCAUSE
- addi a3, a0, -XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED
-
- /* Set corresponding CPENABLE bit */
-
- movi a4, 1
- ssl a3 # SAR: 32 - coprocessor_number
- rsr a5, CPENABLE
- sll a4, a4
- or a4, a5, a4
- wsr a4, CPENABLE
- rsync
- movi a5, coprocessor_info # list of owner and offset into cp_save
- addx8 a0, a4, a5 # entry for CP
-
- bne a4, a5, .Lload # bit wasn't set before, cp not in use
-
- /* Now compare the current task with the owner of the coprocessor.
- * If they are the same, there is no reason to save or restore any
- * coprocessor state. Having already enabled the coprocessor,
- * branch ahead to return.
- */
- GET_CURRENT(a5,a1)
- l32i a4, a0, COPROCESSOR_INFO_OWNER # a4: current owner for this CP
- beq a4, a5, .Ldone
-
- /* Find location to dump current coprocessor state:
- * task_struct->task_cp_save_offset + coprocessor_offset[coprocessor]
- *
- * Note: a0 pointer to the entry in the coprocessor owner table,
- * a3 coprocessor number,
- * a4 current owner of coprocessor.
- */
- l32i a5, a0, COPROCESSOR_INFO_OFFSET
- addi a2, a4, THREAD_CP_SAVE
- add a2, a2, a5
-
- /* Store current coprocessor states. (a5 still has CP number) */
-
- xchal_cpi_store_funcbody
-
- /* The macro might have destroyed a3 (coprocessor number), but
- * SAR still has 32 - coprocessor_number!
- */
- movi a3, 32
- rsr a4, SAR
- sub a3, a3, a4
-
-.Lload: /* A new task now owns the corpocessors. Save its TCB pointer into
- * the coprocessor owner table.
- *
- * Note: a0 pointer to the entry in the coprocessor owner table,
- * a3 coprocessor number.
- */
- GET_CURRENT(a4,a1)
- s32i a4, a0, 0
-
- /* Find location from where to restore the current coprocessor state.*/
-
- l32i a5, a0, COPROCESSOR_INFO_OFFSET
- addi a2, a4, THREAD_CP_SAVE
- add a2, a2, a4
-
- xchal_cpi_load_funcbody
-
- /* We must assume that the xchal_cpi_store_funcbody macro destroyed
- * registers a2..a15.
- */
-
-.Ldone: l32i a15, a1, PT_AREG15
- l32i a14, a1, PT_AREG14
- l32i a13, a1, PT_AREG13
- l32i a12, a1, PT_AREG12
- l32i a11, a1, PT_AREG11
- l32i a10, a1, PT_AREG10
- l32i a9, a1, PT_AREG9
- l32i a8, a1, PT_AREG8
- l32i a7, a1, PT_AREG7
- l32i a6, a1, PT_AREG6
- l32i a5, a1, PT_AREG5
- l32i a4, a1, PT_AREG4
- l32i a3, a1, PT_AREG3
- l32i a2, a1, PT_AREG2
- l32i a0, a1, PT_AREG0
- l32i a1, a1, PT_AREG1
-
- rfe
-
-#endif /* XCHAL_EXTRA_SA_SIZE */
-
/*
* System Calls.
*
@@ -2066,20 +1899,36 @@ ENTRY(_switch_to)
entry a1, 16
- mov a4, a3 # preserve a3
+ mov a12, a2 # preserve 'prev' (a2)
+ mov a13, a3 # and 'next' (a3)
- s32i a0, a2, THREAD_RA # save return address
- s32i a1, a2, THREAD_SP # save stack pointer
+ l32i a4, a2, TASK_THREAD_INFO
+ l32i a5, a3, TASK_THREAD_INFO
- /* Disable ints while we manipulate the stack pointer; spill regs. */
+ save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
- movi a5, (1 << PS_EXCM_BIT) | LOCKLEVEL
- xsr a5, PS
+ s32i a0, a12, THREAD_RA # save return address
+ s32i a1, a12, THREAD_SP # save stack pointer
+
+ /* Disable ints while we manipulate the stack pointer. */
+
+ movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
+ xsr a14, PS
rsr a3, EXCSAVE_1
rsync
s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
- call0 _spill_registers
+ /* Switch CPENABLE */
+
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+ l32i a3, a5, THREAD_CPENABLE
+ xsr a3, CPENABLE
+ s32i a3, a4, THREAD_CPENABLE
+#endif
+
+ /* Flush register file. */
+
+ call0 _spill_registers # destroys a3, a4, and SAR
/* Set kernel stack (and leave critical section)
* Note: It's save to set it here. The stack will not be overwritten
@@ -2087,19 +1936,21 @@ ENTRY(_switch_to)
* we return from kernel space.
*/
- l32i a0, a4, TASK_THREAD_INFO
rsr a3, EXCSAVE_1 # exc_table
- movi a1, 0
- addi a0, a0, PT_REGS_OFFSET
- s32i a1, a3, EXC_TABLE_FIXUP
- s32i a0, a3, EXC_TABLE_KSTK
+ movi a6, 0
+ addi a7, a5, PT_REGS_OFFSET
+ s32i a6, a3, EXC_TABLE_FIXUP
+ s32i a7, a3, EXC_TABLE_KSTK
/* restore context of the task that 'next' addresses */
- l32i a0, a4, THREAD_RA /* restore return address */
- l32i a1, a4, THREAD_SP /* restore stack pointer */
+ l32i a0, a13, THREAD_RA # restore return address
+ l32i a1, a13, THREAD_SP # restore stack pointer
+
+ load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
- wsr a5, PS
+ wsr a14, PS
+ mov a2, a12 # return 'prev'
rsync
retw
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 026138d641a4..9185597eb6a0 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -52,6 +52,55 @@ void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
+#if XTENSA_HAVE_COPROCESSORS
+
+void coprocessor_release_all(struct thread_info *ti)
+{
+ unsigned long cpenable;
+ int i;
+
+ /* Make sure we don't switch tasks during this operation. */
+
+ preempt_disable();
+
+ /* Walk through all cp owners and release it for the requested one. */
+
+ cpenable = ti->cpenable;
+
+ for (i = 0; i < XCHAL_CP_MAX; i++) {
+ if (coprocessor_owner[i] == ti) {
+ coprocessor_owner[i] = 0;
+ cpenable &= ~(1 << i);
+ }
+ }
+
+ ti->cpenable = cpenable;
+ coprocessor_clear_cpenable();
+
+ preempt_enable();
+}
+
+void coprocessor_flush_all(struct thread_info *ti)
+{
+ unsigned long cpenable;
+ int i;
+
+ preempt_disable();
+
+ cpenable = ti->cpenable;
+
+ for (i = 0; i < XCHAL_CP_MAX; i++) {
+ if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
+ coprocessor_flush(ti, i);
+ cpenable >>= 1;
+ }
+
+ preempt_enable();
+}
+
+#endif
+
+
/*
* Powermanagement idle function, if any is provided by the platform.
*/
@@ -71,15 +120,36 @@ void cpu_idle(void)
}
/*
- * Free current thread data structures etc..
+ * This is called when the thread calls exit().
*/
-
void exit_thread(void)
{
+#if XTENSA_HAVE_COPROCESSORS
+ coprocessor_release_all(current_thread_info());
+#endif
}
+/*
+ * Flush thread state. This is called when a thread does an execve()
+ * Note that we flush coprocessor registers for the case execve fails.
+ */
void flush_thread(void)
{
+#if XTENSA_HAVE_COPROCESSORS
+ struct thread_info *ti = current_thread_info();
+ coprocessor_flush_all(ti);
+ coprocessor_release_all(ti);
+#endif
+}
+
+/*
+ * This is called before the thread is copied.
+ */
+void prepare_to_copy(struct task_struct *tsk)
+{
+#if XTENSA_HAVE_COPROCESSORS
+ coprocessor_flush_all(task_thread_info(tsk));
+#endif
}
/*
@@ -107,6 +177,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
struct task_struct * p, struct pt_regs * regs)
{
struct pt_regs *childregs;
+ struct thread_info *ti;
unsigned long tos;
int user_mode = user_mode(regs);
@@ -128,13 +199,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
p->set_child_tid = p->clear_child_tid = NULL;
p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
p->thread.sp = (unsigned long)childregs;
+
if (user_mode(regs)) {
int len = childregs->wmask & ~0xf;
childregs->areg[1] = usp;
memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
&regs->areg[XCHAL_NUM_AREGS - len/4], len);
-
+// FIXME: we need to set THREADPTR in thread_info...
if (clone_flags & CLONE_SETTLS)
childregs->areg[2] = childregs->areg[6];
@@ -142,6 +214,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
/* In kernel space, we start a new thread with a new stack. */
childregs->wmask = 1;
}
+
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+ ti = task_thread_info(p);
+ ti->cpenable = 0;
+#endif
+
return 0;
}
@@ -179,10 +257,6 @@ unsigned long get_wchan(struct task_struct *p)
}
/*
- * do_copy_regs() gathers information from 'struct pt_regs' and
- * 'current->thread.areg[]' to fill in the xtensa_gregset_t
- * structure.
- *
* xtensa_gregset_t and 'struct pt_regs' are vastly different formats
* of processor registers. Besides different ordering,
* xtensa_gregset_t contains non-live register information that
@@ -191,9 +265,20 @@ unsigned long get_wchan(struct task_struct *p)
*
*/
-void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
- struct task_struct *tsk)
+void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
{
+ unsigned long wb, ws, wm;
+ int live, last;
+
+ wb = regs->windowbase;
+ ws = regs->windowstart;
+ wm = regs->wmask;
+ ws = ((ws >> wb) | (ws << (WSBITS - wb))) & ((1 << WSBITS) - 1);
+
+ /* Don't leak any random bits. */
+
+ memset(elfregs, 0, sizeof (elfregs));
+
/* Note: PS.EXCM is not set while user task is running; its
* being set in regs->ps is for exception handling convenience.
*/
@@ -204,159 +289,18 @@ void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
elfregs->lend = regs->lend;
elfregs->lcount = regs->lcount;
elfregs->sar = regs->sar;
+ elfregs->windowstart = ws;
- memcpy (elfregs->a, regs->areg, sizeof(elfregs->a));
-}
-
-void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
-{
- do_copy_regs ((xtensa_gregset_t *)elfregs, regs, current);
-}
-
-
-/* The inverse of do_copy_regs(). No error or sanity checking. */
-
-void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
- struct task_struct *tsk)
-{
- const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
- unsigned long ps;
-
- /* Note: PS.EXCM is not set while user task is running; it
- * needs to be set in regs->ps is for exception handling convenience.
- */
-
- ps = (regs->ps & ~ps_mask) | (elfregs->ps & ps_mask) | (1<<PS_EXCM_BIT);
- regs->ps = ps;
- regs->pc = elfregs->pc;
- regs->lbeg = elfregs->lbeg;
- regs->lend = elfregs->lend;
- regs->lcount = elfregs->lcount;
- regs->sar = elfregs->sar;
-
- memcpy (regs->areg, elfregs->a, sizeof(regs->areg));
-}
-
-/*
- * do_save_fpregs() gathers information from 'struct pt_regs' and
- * 'current->thread' to fill in the elf_fpregset_t structure.
- *
- * Core files and ptrace use elf_fpregset_t.
- */
-
-void do_save_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
- struct task_struct *tsk)
-{
-#if XCHAL_HAVE_CP
-
- extern unsigned char _xtensa_reginfo_tables[];
- extern unsigned _xtensa_reginfo_table_size;
- int i;
- unsigned long flags;
-
- /* Before dumping coprocessor state from memory,
- * ensure any live coprocessor contents for this
- * task are first saved to memory:
- */
- local_irq_save(flags);
-
- for (i = 0; i < XCHAL_CP_MAX; i++) {
- if (tsk == coprocessor_info[i].owner) {
- enable_coprocessor(i);
- save_coprocessor_registers(
- tsk->thread.cp_save+coprocessor_info[i].offset,i);
- disable_coprocessor(i);
- }
- }
-
- local_irq_restore(flags);
-
- /* Now dump coprocessor & extra state: */
- memcpy((unsigned char*)fpregs,
- _xtensa_reginfo_tables, _xtensa_reginfo_table_size);
- memcpy((unsigned char*)fpregs + _xtensa_reginfo_table_size,
- tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
-#endif
+ live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
+ last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
+ memcpy(elfregs->a, regs->areg, live * 4);
+ memcpy(elfregs->a + last, regs->areg + last, (wm >> 4) * 16);
}
-/*
- * The inverse of do_save_fpregs().
- * Copies coprocessor and extra state from fpregs into regs and tsk->thread.
- * Returns 0 on success, non-zero if layout doesn't match.
- */
-
-int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
- struct task_struct *tsk)
+int dump_fpu(void)
{
-#if XCHAL_HAVE_CP
-
- extern unsigned char _xtensa_reginfo_tables[];
- extern unsigned _xtensa_reginfo_table_size;
- int i;
- unsigned long flags;
-
- /* Make sure save area layouts match.
- * FIXME: in the future we could allow restoring from
- * a different layout of the same registers, by comparing
- * fpregs' table with _xtensa_reginfo_tables and matching
- * entries and copying registers one at a time.
- * Not too sure yet whether that's very useful.
- */
-
- if( memcmp((unsigned char*)fpregs,
- _xtensa_reginfo_tables, _xtensa_reginfo_table_size) ) {
- return -1;
- }
-
- /* Before restoring coprocessor state from memory,
- * ensure any live coprocessor contents for this
- * task are first invalidated.
- */
-
- local_irq_save(flags);
-
- for (i = 0; i < XCHAL_CP_MAX; i++) {
- if (tsk == coprocessor_info[i].owner) {
- enable_coprocessor(i);
- save_coprocessor_registers(
- tsk->thread.cp_save+coprocessor_info[i].offset,i);
- coprocessor_info[i].owner = 0;
- disable_coprocessor(i);
- }
- }
-
- local_irq_restore(flags);
-
- /* Now restore coprocessor & extra state: */
-
- memcpy(tsk->thread.cp_save,
- (unsigned char*)fpregs + _xtensa_reginfo_table_size,
- XTENSA_CP_EXTRA_SIZE);
-#endif
return 0;
}
-/*
- * Fill in the CP structure for a core dump for a particular task.
- */
-
-int
-dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
-{
- return 0; /* no coprocessors active on this processor */
-}
-
-/*
- * Fill in the CP structure for a core dump.
- * This includes any FPU coprocessor.
- * Here, we dump all coprocessors, and other ("extra") custom state.
- *
- * This function is called by elf_core_dump() in fs/binfmt_elf.c
- * (in which case 'regs' comes from calls to do_coredump, see signals.c).
- */
-int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
-{
- return dump_task_fpu(regs, current, r);
-}
asmlinkage
long xtensa_clone(unsigned long clone_flags, unsigned long newsp,
@@ -370,8 +314,8 @@ long xtensa_clone(unsigned long clone_flags, unsigned long newsp,
}
/*
- * * xtensa_execve() executes a new program.
- * */
+ * xtensa_execve() executes a new program.
+ */
asmlinkage
long xtensa_execve(char __user *name, char __user * __user *argv,
@@ -386,7 +330,6 @@ long xtensa_execve(char __user *name, char __user * __user *argv,
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
- // FIXME: release coprocessor??
error = do_execve(filename, argv, envp, regs);
if (error == 0) {
task_lock(current);
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 5533c7850d53..f6669d605125 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
*
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
* Chris Zankel <chris@zankel.net>
@@ -28,14 +28,10 @@
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/elf.h>
-
-#define TEST_KERNEL // verify kernel operations FIXME: remove
-
+#include <asm/coprocessor.h>
/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure single step bits etc are not set.
+ * Called by kernel/ptrace.c when detaching to disable single stepping.
*/
void ptrace_disable(struct task_struct *child)
@@ -43,136 +39,233 @@ void ptrace_disable(struct task_struct *child)
/* Nothing to do.. */
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+int ptrace_getregs(struct task_struct *child, void __user *uregs)
{
- int ret = -EPERM;
+ struct pt_regs *regs = task_pt_regs(child);
+ xtensa_gregset_t __user *gregset = uregs;
+ unsigned long wb = regs->windowbase;
+ unsigned long ws = regs->windowstart;
+ unsigned long wm = regs->wmask;
+ int ret = 0;
+ int live, last;
+
+ if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
+ return -EIO;
+
+ /* Norm windowstart to a windowbase of 0. */
+
+ ws = ((ws>>wb) | (ws<<(WSBITS-wb))) & ((1<<WSBITS)-1);
+
+ ret |= __put_user(regs->pc, &gregset->pc);
+ ret |= __put_user(regs->ps & ~(1 << PS_EXCM_BIT), &gregset->ps);
+ ret |= __put_user(regs->lbeg, &gregset->lbeg);
+ ret |= __put_user(regs->lend, &gregset->lend);
+ ret |= __put_user(regs->lcount, &gregset->lcount);
+ ret |= __put_user(ws, &gregset->windowstart);
+
+ live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
+ last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
+ ret |= __copy_to_user(gregset->a, regs->areg, live * 4);
+ ret |= __copy_to_user(gregset->a + last, regs->areg + last, (wm>>4)*16);
+
+ return ret ? -EFAULT : 0;
+}
- switch (request) {
- case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA:
- ret = generic_ptrace_peekdata(child, addr, data);
- goto out;
+int ptrace_setregs(struct task_struct *child, void __user *uregs)
+{
+ struct pt_regs *regs = task_pt_regs(child);
+ xtensa_gregset_t *gregset = uregs;
+ const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
+ unsigned long wm = regs->wmask;
+ unsigned long ps;
+ int ret = 0;
+ int live, last;
+
+ if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
+ return -EIO;
+
+ ret |= __get_user(regs->pc, &gregset->pc);
+ ret |= __get_user(ps, &gregset->ps);
+ ret |= __get_user(regs->lbeg, &gregset->lbeg);
+ ret |= __get_user(regs->lend, &gregset->lend);
+ ret |= __get_user(regs->lcount, &gregset->lcount);
+
+ regs->ps = (regs->ps & ~ps_mask) | (ps & ps_mask) | (1 << PS_EXCM_BIT);
+
+ live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
+ last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
+ ret |= __copy_from_user(regs->areg, gregset->a, live * 4);
+ ret |= __copy_from_user(regs->areg+last, gregset->a+last, (wm>>4)*16);
+
+ return ret ? -EFAULT : 0;
+}
- /* Read the word at location addr in the USER area. */
- case PTRACE_PEEKUSR:
- {
- struct pt_regs *regs;
- unsigned long tmp;
+int ptrace_getxregs(struct task_struct *child, void __user *uregs)
+{
+ struct pt_regs *regs = task_pt_regs(child);
+ struct thread_info *ti = task_thread_info(child);
+ elf_xtregs_t __user *xtregs = uregs;
+ int ret = 0;
+
+ if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t)))
+ return -EIO;
+
+#if XTENSA_HAVE_COPROCESSORS
+ /* Flush all coprocessor registers to memory. */
+ coprocessor_flush_all(ti);
+ ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp,
+ sizeof(xtregs_coprocessor_t));
+#endif
+ ret |= __copy_to_user(&xtregs->opt, &regs->xtregs_opt,
+ sizeof(xtregs->opt));
+ ret |= __copy_to_user(&xtregs->user,&ti->xtregs_user,
+ sizeof(xtregs->user));
- regs = task_pt_regs(child);
- tmp = 0; /* Default return value. */
+ return ret ? -EFAULT : 0;
+}
+
+int ptrace_setxregs(struct task_struct *child, void __user *uregs)
+{
+ struct thread_info *ti = task_thread_info(child);
+ struct pt_regs *regs = task_pt_regs(child);
+ elf_xtregs_t *xtregs = uregs;
+ int ret = 0;
+
+#if XTENSA_HAVE_COPROCESSORS
+ /* Flush all coprocessors before we overwrite them. */
+ coprocessor_flush_all(ti);
+ coprocessor_release_all(ti);
+
+ ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
+ sizeof(xtregs_coprocessor_t));
+#endif
+ ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
+ sizeof(xtregs->opt));
+ ret |= __copy_from_user(&ti->xtregs_user, &xtregs->user,
+ sizeof(xtregs->user));
+
+ return ret ? -EFAULT : 0;
+}
+
+int ptrace_peekusr(struct task_struct *child, long regno, long __user *ret)
+{
+ struct pt_regs *regs;
+ unsigned long tmp;
+
+ regs = task_pt_regs(child);
+ tmp = 0; /* Default return value. */
- switch(addr) {
+ switch(regno) {
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
- {
- int ar = addr - REG_AR_BASE - regs->windowbase * 4;
- ar &= (XCHAL_NUM_AREGS - 1);
- if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
- tmp = regs->areg[ar];
- else
- ret = -EIO;
+ tmp = regs->areg[regno - REG_AR_BASE];
break;
- }
+
case REG_A_BASE ... REG_A_BASE + 15:
- tmp = regs->areg[addr - REG_A_BASE];
+ tmp = regs->areg[regno - REG_A_BASE];
break;
+
case REG_PC:
tmp = regs->pc;
break;
+
case REG_PS:
/* Note: PS.EXCM is not set while user task is running;
* its being set in regs is for exception handling
* convenience. */
tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
break;
+
case REG_WB:
- tmp = regs->windowbase;
- break;
+ break; /* tmp = 0 */
+
case REG_WS:
- tmp = regs->windowstart;
+ {
+ unsigned long wb = regs->windowbase;
+ unsigned long ws = regs->windowstart;
+ tmp = ((ws>>wb) | (ws<<(WSBITS-wb))) & ((1<<WSBITS)-1);
break;
+ }
case REG_LBEG:
tmp = regs->lbeg;
break;
+
case REG_LEND:
tmp = regs->lend;
break;
+
case REG_LCOUNT:
tmp = regs->lcount;
break;
+
case REG_SAR:
tmp = regs->sar;
break;
- case REG_DEPC:
- tmp = regs->depc;
- break;
- case REG_EXCCAUSE:
- tmp = regs->exccause;
- break;
- case REG_EXCVADDR:
- tmp = regs->excvaddr;
- break;
+
case SYSCALL_NR:
tmp = regs->syscall;
break;
- default:
- tmp = 0;
- ret = -EIO;
- goto out;
- }
- ret = put_user(tmp, (unsigned long *) data);
- goto out;
- }
- case PTRACE_POKETEXT: /* write the word at location addr. */
- case PTRACE_POKEDATA:
- ret = generic_ptrace_pokedata(child, addr, data);
- goto out;
+ default:
+ return -EIO;
+ }
+ return put_user(tmp, ret);
+}
- case PTRACE_POKEUSR:
- {
- struct pt_regs *regs;
- regs = task_pt_regs(child);
+int ptrace_pokeusr(struct task_struct *child, long regno, long val)
+{
+ struct pt_regs *regs;
+ regs = task_pt_regs(child);
- switch (addr) {
+ switch (regno) {
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
- {
- int ar = addr - REG_AR_BASE - regs->windowbase * 4;
- if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
- regs->areg[ar & (XCHAL_NUM_AREGS - 1)] = data;
- else
- ret = -EIO;
+ regs->areg[regno - REG_AR_BASE] = val;
break;
- }
+
case REG_A_BASE ... REG_A_BASE + 15:
- regs->areg[addr - REG_A_BASE] = data;
+ regs->areg[regno - REG_A_BASE] = val;
break;
+
case REG_PC:
- regs->pc = data;
+ regs->pc = val;
break;
+
case SYSCALL_NR:
- regs->syscall = data;
- break;
-#ifdef TEST_KERNEL
- case REG_WB:
- regs->windowbase = data;
+ regs->syscall = val;
break;
- case REG_WS:
- regs->windowstart = data;
- break;
-#endif
default:
- /* The rest are not allowed. */
- ret = -EIO;
- break;
- }
+ return -EIO;
+ }
+ return 0;
+}
+
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+{
+ int ret = -EPERM;
+
+ switch (request) {
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
+ break;
+
+ case PTRACE_PEEKUSR: /* read register specified by addr. */
+ ret = ptrace_peekusr(child, addr, (void __user *) data);
+ break;
+
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ ret = generic_ptrace_pokedata(child, addr, data);
+ break;
+
+ case PTRACE_POKEUSR: /* write register specified by addr. */
+ ret = ptrace_pokeusr(child, addr, data);
break;
- }
/* continue and stop at next (return from) syscall */
+
case PTRACE_SYSCALL:
case PTRACE_CONT: /* restart after signal. */
{
@@ -217,98 +310,26 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_GETREGS:
- {
- /* 'data' points to user memory in which to write.
- * Mainly due to the non-live register values, we
- * reformat the register values into something more
- * standard. For convenience, we use the handy
- * elf_gregset_t format. */
-
- xtensa_gregset_t format;
- struct pt_regs *regs = task_pt_regs(child);
-
- do_copy_regs (&format, regs, child);
-
- /* Now, copy to user space nice and easy... */
- ret = 0;
- if (copy_to_user((void *)data, &format, sizeof(elf_gregset_t)))
- ret = -EFAULT;
+ ret = ptrace_getregs(child, (void __user *) data);
break;
- }
case PTRACE_SETREGS:
- {
- /* 'data' points to user memory that contains the new
- * values in the elf_gregset_t format. */
-
- xtensa_gregset_t format;
- struct pt_regs *regs = task_pt_regs(child);
-
- if (copy_from_user(&format,(void *)data,sizeof(elf_gregset_t))){
- ret = -EFAULT;
- break;
- }
-
- /* FIXME: Perhaps we want some sanity checks on
- * these user-space values? See ARM version. Are
- * debuggers a security concern? */
-
- do_restore_regs (&format, regs, child);
-
- ret = 0;
- break;
- }
-
- case PTRACE_GETFPREGS:
- {
- /* 'data' points to user memory in which to write.
- * For convenience, we use the handy
- * elf_fpregset_t format. */
-
- elf_fpregset_t fpregs;
- struct pt_regs *regs = task_pt_regs(child);
-
- do_save_fpregs (&fpregs, regs, child);
-
- /* Now, copy to user space nice and easy... */
- ret = 0;
- if (copy_to_user((void *)data, &fpregs, sizeof(elf_fpregset_t)))
- ret = -EFAULT;
-
+ ret = ptrace_setregs(child, (void __user *) data);
break;
- }
-
- case PTRACE_SETFPREGS:
- {
- /* 'data' points to user memory that contains the new
- * values in the elf_fpregset_t format.
- */
- elf_fpregset_t fpregs;
- struct pt_regs *regs = task_pt_regs(child);
- ret = 0;
- if (copy_from_user(&fpregs, (void *)data, sizeof(elf_fpregset_t))) {
- ret = -EFAULT;
- break;
- }
-
- if (do_restore_fpregs (&fpregs, regs, child))
- ret = -EIO;
+ case PTRACE_GETXTREGS:
+ ret = ptrace_getxregs(child, (void __user *) data);
break;
- }
- case PTRACE_GETFPREGSIZE:
- /* 'data' points to 'unsigned long' set to the size
- * of elf_fpregset_t
- */
- ret = put_user(sizeof(elf_fpregset_t), (unsigned long *) data);
+ case PTRACE_SETXTREGS:
+ ret = ptrace_setxregs(child, (void __user *) data);
break;
default:
ret = ptrace_request(child, request, addr, data);
- goto out;
+ break;
}
- out:
+
return ret;
}
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 42d9fd8a4225..299be42d116b 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -35,13 +35,17 @@ asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
extern struct task_struct *coproc_owners[];
-extern void release_all_cp (struct task_struct *);
-
struct rt_sigframe
{
struct siginfo info;
struct ucontext uc;
- cp_state_t cpstate;
+ struct {
+ xtregs_opt_t opt;
+ xtregs_user_t user;
+#if XTENSA_HAVE_COPROCESSORS
+ xtregs_coprocessor_t cp;
+#endif
+ } xtregs;
unsigned char retcode[6];
unsigned int window[4];
};
@@ -132,9 +136,10 @@ errout:
*/
static int
-setup_sigcontext(struct sigcontext __user *sc, cp_state_t *cpstate,
- struct pt_regs *regs)
+setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs)
{
+ struct sigcontext __user *sc = &frame->uc.uc_mcontext;
+ struct thread_info *ti = current_thread_info();
int err = 0;
#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
@@ -148,21 +153,32 @@ setup_sigcontext(struct sigcontext __user *sc, cp_state_t *cpstate,
err |= flush_window_regs_user(regs);
err |= __copy_to_user (sc->sc_a, regs->areg, 16 * 4);
+ err |= __put_user(0, &sc->sc_xtregs);
- // err |= __copy_to_user (sc->sc_a, regs->areg, XCHAL_NUM_AREGS * 4)
+ if (err)
+ return err;
-#if XCHAL_HAVE_CP
-# error Coprocessors unsupported
- err |= save_cpextra(cpstate);
- err |= __put_user(err ? NULL : cpstate, &sc->sc_cpstate);
+#if XTENSA_HAVE_COPROCESSORS
+ coprocessor_flush_all(ti);
+ coprocessor_release_all(ti);
+ err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp,
+ sizeof (frame->xtregs.cp));
#endif
+ err |= __copy_to_user(&frame->xtregs.opt, &regs->xtregs_opt,
+ sizeof (xtregs_opt_t));
+ err |= __copy_to_user(&frame->xtregs.user, &ti->xtregs_user,
+ sizeof (xtregs_user_t));
+
+ err |= __put_user(err ? NULL : &frame->xtregs, &sc->sc_xtregs);
return err;
}
static int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
{
+ struct sigcontext __user *sc = &frame->uc.uc_mcontext;
+ struct thread_info *ti = current_thread_info();
unsigned int err = 0;
unsigned long ps;
@@ -180,6 +196,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
regs->windowbase = 0;
regs->windowstart = 1;
+ regs->syscall = -1; /* disable syscall checks */
+
/* For PS, restore only PS.CALLINC.
* Assume that all other bits are either the same as for the signal
* handler, or the user mode value doesn't matter (e.g. PS.OWB).
@@ -195,8 +213,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
err |= __copy_from_user(regs->areg, sc->sc_a, 16 * 4);
-#if XCHAL_HAVE_CP
-# error Coprocessors unsupported
+ if (err)
+ return err;
+
/* The signal handler may have used coprocessors in which
* case they are still enabled. We disable them to force a
* reloading of the original task's CP state by the lazy
@@ -204,20 +223,20 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
* Also, we essentially discard any coprocessor state that the
* signal handler created. */
- if (!err) {
- struct task_struct *tsk = current;
- release_all_cp(tsk);
- err |= __copy_from_user(tsk->thread.cpextra, sc->sc_cpstate,
- XTENSA_CP_EXTRA_SIZE);
- }
+#if XTENSA_HAVE_COPROCESSORS
+ coprocessor_release_all(ti);
+ err |= __copy_from_user(&ti->xtregs_cp, &frame->xtregs.cp,
+ sizeof (frame->xtregs.cp));
#endif
+ err |= __copy_from_user(&ti->xtregs_user, &frame->xtregs.user,
+ sizeof (xtregs_user_t));
+ err |= __copy_from_user(&regs->xtregs_opt, &frame->xtregs.opt,
+ sizeof (xtregs_opt_t));
- regs->syscall = -1; /* disable syscall checks */
return err;
}
-
/*
* Do a signal return; undo the signal stack.
*/
@@ -246,7 +265,7 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+ if (restore_sigcontext(regs, frame))
goto badframe;
ret = regs->areg[2];
@@ -359,7 +378,7 @@ static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
err |= __put_user(sas_ss_flags(regs->areg[1]),
&frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
- err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->cpstate, regs);
+ err |= setup_sigcontext(frame, regs);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
/* Create sys_rt_sigreturn syscall in stack frame */
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 6f722f91ba92..c7a021d9f696 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -118,28 +118,28 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
-#if (XCHAL_CP_MASK & 1)
+#if XTENSA_HAVE_COPROCESSOR(0)
COPROCESSOR(0),
#endif
-#if (XCHAL_CP_MASK & 2)
+#if XTENSA_HAVE_COPROCESSOR(1)
COPROCESSOR(1),
#endif
-#if (XCHAL_CP_MASK & 4)
+#if XTENSA_HAVE_COPROCESSOR(2)
COPROCESSOR(2),
#endif
-#if (XCHAL_CP_MASK & 8)
+#if XTENSA_HAVE_COPROCESSOR(3)
COPROCESSOR(3),
#endif
-#if (XCHAL_CP_MASK & 16)
+#if XTENSA_HAVE_COPROCESSOR(4)
COPROCESSOR(4),
#endif
-#if (XCHAL_CP_MASK & 32)
+#if XTENSA_HAVE_COPROCESSOR(5)
COPROCESSOR(5),
#endif
-#if (XCHAL_CP_MASK & 64)
+#if XTENSA_HAVE_COPROCESSOR(6)
COPROCESSOR(6),
#endif
-#if (XCHAL_CP_MASK & 128)
+#if XTENSA_HAVE_COPROCESSOR(7)
COPROCESSOR(7),
#endif
{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug },