summaryrefslogtreecommitdiff
path: root/arch/sh/kernel/unwinder.c
diff options
context:
space:
mode:
authorMatt Fleming <matt@console-pimps.org>2009-08-16 21:54:48 +0100
committerMatt Fleming <matt@console-pimps.org>2009-08-21 13:02:44 +0100
commitb344e24a8e8ceda83d1285d22e3e5baf4f5e42d3 (patch)
tree4b9500264a797736b48b59c3f0977277ace53386 /arch/sh/kernel/unwinder.c
parent97efbbd5886e27b61c19c77d41f6491f5d96fbd0 (diff)
sh: unwinder: Introduce UNWINDER_BUG() and UNWINDER_BUG_ON()
We can't assume that if we execute the unwinder code and the unwinder was already running that it has faulted. Clearly two kernel threads can invoke the unwinder at the same time and may be running simultaneously. The previous approach used BUG() and BUG_ON() in the unwinder code to detect whether the unwinder was incapable of unwinding the stack, and that the next available unwinder should be used instead. A better approach is to explicitly invoke a trap handler to switch unwinders when the current unwinder cannot continue. Signed-off-by: Matt Fleming <matt@console-pimps.org>
Diffstat (limited to 'arch/sh/kernel/unwinder.c')
-rw-r--r--arch/sh/kernel/unwinder.c33
1 files changed, 27 insertions, 6 deletions
diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c
index 2b30fa28b440..b9c122abe251 100644
--- a/arch/sh/kernel/unwinder.c
+++ b/arch/sh/kernel/unwinder.c
@@ -53,8 +53,6 @@ static struct list_head unwinder_list = {
static DEFINE_SPINLOCK(unwinder_lock);
-static atomic_t unwinder_running = ATOMIC_INIT(0);
-
/**
* select_unwinder - Select the best registered stack unwinder.
*
@@ -122,6 +120,8 @@ int unwinder_register(struct unwinder *u)
return ret;
}
+int unwinder_faulted = 0;
+
/*
* Unwind the call stack and pass information to the stacktrace_ops
* functions. Also handle the case where we need to switch to a new
@@ -144,19 +144,40 @@ void unwind_stack(struct task_struct *task, struct pt_regs *regs,
* Hopefully this will give us a semi-reliable stacktrace so we
* can diagnose why curr_unwinder->dump() faulted.
*/
- if (atomic_inc_return(&unwinder_running) != 1) {
+ if (unwinder_faulted) {
spin_lock_irqsave(&unwinder_lock, flags);
- if (!list_is_singular(&unwinder_list)) {
+ /* Make sure no one beat us to changing the unwinder */
+ if (unwinder_faulted && !list_is_singular(&unwinder_list)) {
list_del(&curr_unwinder->list);
curr_unwinder = select_unwinder();
+
+ unwinder_faulted = 0;
}
spin_unlock_irqrestore(&unwinder_lock, flags);
- atomic_dec(&unwinder_running);
}
curr_unwinder->dump(task, regs, sp, ops, data);
+}
+
+/*
+ * Trap handler for UWINDER_BUG() statements. We must switch to the
+ * unwinder with the next highest rating.
+ */
+BUILD_TRAP_HANDLER(unwinder)
+{
+ insn_size_t insn;
+ TRAP_HANDLER_DECL;
+
+ /* Rewind */
+ regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+ insn = *(insn_size_t *)instruction_pointer(regs);
+
+ /* Switch unwinders when unwind_stack() is called */
+ unwinder_faulted = 1;
- atomic_dec(&unwinder_running);
+#ifdef CONFIG_BUG
+ handle_BUG(regs);
+#endif
}