summaryrefslogtreecommitdiff
path: root/arch/sparc
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2010-05-22 00:36:56 -0600
committerGrant Likely <grant.likely@secretlab.ca>2010-05-22 00:36:56 -0600
commitcf9b59e9d3e008591d1f54830f570982bb307a0d (patch)
tree113478ce8fd8c832ba726ffdf59b82cb46356476 /arch/sparc
parent44504b2bebf8b5823c59484e73096a7d6574471d (diff)
parentf4b87dee923342505e1ddba8d34ce9de33e75050 (diff)
Merge remote branch 'origin' into secretlab/next-devicetree
Merging in current state of Linus' tree to deal with merge conflicts and build failures in vio.c after merge. Conflicts: drivers/i2c/busses/i2c-cpm.c drivers/i2c/busses/i2c-mpc.c drivers/net/gianfar.c Also fixed up one line in arch/powerpc/kernel/vio.c to use the correct node pointer. Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/sparc/include/asm/atomic_64.h4
-rw-r--r--arch/sparc/include/asm/bitops_64.h11
-rw-r--r--arch/sparc/include/asm/cache.h2
-rw-r--r--arch/sparc/include/asm/thread_info_32.h2
-rw-r--r--arch/sparc/include/asm/thread_info_64.h4
-rw-r--r--arch/sparc/kernel/irq_64.c20
-rw-r--r--arch/sparc/kernel/kgdb_32.c6
-rw-r--r--arch/sparc/kernel/kgdb_64.c6
-rw-r--r--arch/sparc/kernel/kstack.h19
-rw-r--r--arch/sparc/kernel/nmi.c7
-rw-r--r--arch/sparc/kernel/perf_event.c14
-rw-r--r--arch/sparc/kernel/process_64.c1
-rw-r--r--arch/sparc/kernel/rtrap_64.S12
-rw-r--r--arch/sparc/kernel/stacktrace.c23
-rw-r--r--arch/sparc/kernel/time_32.c18
-rw-r--r--arch/sparc/kernel/traps_64.c14
-rw-r--r--arch/sparc/kernel/unaligned_64.c6
-rw-r--r--arch/sparc/lib/mcount.S8
20 files changed, 129 insertions, 52 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 9908d477ccd9..d6781ce687e2 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -75,7 +75,7 @@ config ARCH_USES_GETTIMEOFFSET
config GENERIC_CMOS_UPDATE
bool
- default y if SPARC64
+ default y
config GENERIC_CLOCKEVENTS
bool
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index f0d343c3b956..7ae128b19d3f 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -25,7 +25,7 @@ extern int atomic_cmpxchg(atomic_t *, int, int);
extern int atomic_add_unless(atomic_t *, int, int);
extern void atomic_set(atomic_t *, int);
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index f2e48009989e..2050ca02c423 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -13,8 +13,8 @@
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
-#define atomic64_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic64_read(v) (*(volatile long *)&(v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
#define atomic64_set(v, i) (((v)->counter) = i)
diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h
index e72ac9cdfb98..766121a67a24 100644
--- a/arch/sparc/include/asm/bitops_64.h
+++ b/arch/sparc/include/asm/bitops_64.h
@@ -44,7 +44,7 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
#ifdef ULTRA_HAS_POPULATION_COUNT
-static inline unsigned int hweight64(unsigned long w)
+static inline unsigned int __arch_hweight64(unsigned long w)
{
unsigned int res;
@@ -52,7 +52,7 @@ static inline unsigned int hweight64(unsigned long w)
return res;
}
-static inline unsigned int hweight32(unsigned int w)
+static inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res;
@@ -60,7 +60,7 @@ static inline unsigned int hweight32(unsigned int w)
return res;
}
-static inline unsigned int hweight16(unsigned int w)
+static inline unsigned int __arch_hweight16(unsigned int w)
{
unsigned int res;
@@ -68,7 +68,7 @@ static inline unsigned int hweight16(unsigned int w)
return res;
}
-static inline unsigned int hweight8(unsigned int w)
+static inline unsigned int __arch_hweight8(unsigned int w)
{
unsigned int res;
@@ -78,9 +78,10 @@ static inline unsigned int hweight8(unsigned int w)
#else
-#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/arch_hweight.h>
#endif
+#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
index 41f85ae4bd4a..78b07009f60a 100644
--- a/arch/sparc/include/asm/cache.h
+++ b/arch/sparc/include/asm/cache.h
@@ -7,6 +7,8 @@
#ifndef _SPARC_CACHE_H
#define _SPARC_CACHE_H
+#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES 32
#define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index 844d73a0340c..9dd0318d3ddf 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -132,7 +132,7 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
* this quantum (SMP) */
#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
* TIF_NEED_RESCHED */
-#define TIF_MEMDIE 10
+#define TIF_MEMDIE 10 /* is terminating due to OOM killer */
#define TIF_FREEZE 11 /* is freezing for suspend */
/* as above, but as bit values */
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 9e2d9447f2ad..fb2ea7705a46 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -111,7 +111,7 @@ struct thread_info {
#define THREAD_SHIFT PAGE_SHIFT
#endif /* PAGE_SHIFT == 13 */
-#define PREEMPT_ACTIVE 0x4000000
+#define PREEMPT_ACTIVE 0x10000000
/*
* macros/functions for gaining access to the thread information structure
@@ -223,7 +223,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
* an immediate value in instructions such as andcc.
*/
/* flag bit 12 is available */
-#define TIF_MEMDIE 13
+#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
#define TIF_POLLING_NRFLAG 14
#define TIF_FREEZE 15 /* is freezing for suspend */
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 454ce3a25273..830d70a3e20b 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -22,6 +22,7 @@
#include <linux/seq_file.h>
#include <linux/ftrace.h>
#include <linux/irq.h>
+#include <linux/kmemleak.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
@@ -46,6 +47,7 @@
#include "entry.h"
#include "cpumap.h"
+#include "kstack.h"
#define NUM_IVECS (IMAP_INR + 1)
@@ -712,24 +714,6 @@ void ack_bad_irq(unsigned int virt_irq)
void *hardirq_stack[NR_CPUS];
void *softirq_stack[NR_CPUS];
-static __attribute__((always_inline)) void *set_hardirq_stack(void)
-{
- void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
-
- __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
- if (orig_sp < sp ||
- orig_sp > (sp + THREAD_SIZE)) {
- sp += THREAD_SIZE - 192 - STACK_BIAS;
- __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
- }
-
- return orig_sp;
-}
-static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
-{
- __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
-}
-
void __irq_entry handler_irq(int irq, struct pt_regs *regs)
{
unsigned long pstate, bucket_pa;
diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
index 04df4edc0073..539243b236fa 100644
--- a/arch/sparc/kernel/kgdb_32.c
+++ b/arch/sparc/kernel/kgdb_32.c
@@ -158,6 +158,12 @@ void kgdb_arch_exit(void)
{
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->pc = ip;
+ regs->npc = regs->pc + 4;
+}
+
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x7d */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index 0a2bd0f99fc1..768290a6c028 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -181,6 +181,12 @@ void kgdb_arch_exit(void)
{
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->tpc = ip;
+ regs->tnpc = regs->tpc + 4;
+}
+
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x72 */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h
index 5247283d1c03..53dfb92e09fb 100644
--- a/arch/sparc/kernel/kstack.h
+++ b/arch/sparc/kernel/kstack.h
@@ -61,4 +61,23 @@ check_magic:
}
+static inline __attribute__((always_inline)) void *set_hardirq_stack(void)
+{
+ void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
+
+ __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
+ if (orig_sp < sp ||
+ orig_sp > (sp + THREAD_SIZE)) {
+ sp += THREAD_SIZE - 192 - STACK_BIAS;
+ __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
+ }
+
+ return orig_sp;
+}
+
+static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
+{
+ __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
+}
+
#endif /* _KSTACK_H */
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 75a3d1a25356..a4bd7ba74c89 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -23,6 +23,8 @@
#include <asm/ptrace.h>
#include <asm/pcr.h>
+#include "kstack.h"
+
/* We don't have a real NMI on sparc64, but we can fake one
* up using profiling counter overflow interrupts and interrupt
* levels.
@@ -92,6 +94,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
{
unsigned int sum, touched = 0;
+ void *orig_sp;
clear_softint(1 << irq);
@@ -99,6 +102,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
nmi_enter();
+ orig_sp = set_hardirq_stack();
+
if (notify_die(DIE_NMI, "nmi", regs, 0,
pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
touched = 1;
@@ -124,6 +129,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
pcr_ops->write(pcr_enable);
}
+ restore_hardirq_stack(orig_sp);
+
nmi_exit();
}
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index e2771939341d..34ce49f80eac 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -14,6 +14,7 @@
#include <linux/perf_event.h>
#include <linux/kprobes.h>
+#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/kdebug.h>
#include <linux/mutex.h>
@@ -1276,6 +1277,9 @@ static void perf_callchain_kernel(struct pt_regs *regs,
struct perf_callchain_entry *entry)
{
unsigned long ksp, fp;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ int graph = 0;
+#endif
callchain_store(entry, PERF_CONTEXT_KERNEL);
callchain_store(entry, regs->tpc);
@@ -1303,6 +1307,16 @@ static void perf_callchain_kernel(struct pt_regs *regs,
fp = (unsigned long)sf->fp + STACK_BIAS;
}
callchain_store(entry, pc);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+ int index = current->curr_ret_stack;
+ if (current->ret_stack && index >= graph) {
+ pc = current->ret_stack[index - graph].ret;
+ callchain_store(entry, pc);
+ graph++;
+ }
+ }
+#endif
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index a5cf3864b31f..dbe81a368b45 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -202,6 +202,7 @@ void show_regs(struct pt_regs *regs)
regs->u_regs[15]);
printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
show_regwindow(regs);
+ show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
}
struct global_reg_snapshot global_reg_snapshot[NR_CPUS];
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 83f1873c6c13..090b9e9ad5e3 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -130,7 +130,17 @@ rtrap_xcall:
nop
call trace_hardirqs_on
nop
- wrpr %l4, %pil
+ /* Do not actually set the %pil here. We will do that
+ * below after we clear PSTATE_IE in the %pstate register.
+ * If we re-enable interrupts here, we can recurse down
+ * the hardirq stack potentially endlessly, causing a
+ * stack overflow.
+ *
+ * It is tempting to put this test and trace_hardirqs_on
+ * call at the 'rt_continue' label, but that will not work
+ * as that path hits unconditionally and we do not want to
+ * execute this in NMI return paths, for example.
+ */
#endif
rtrap_no_irq_enable:
andcc %l1, TSTATE_PRIV, %l3
diff --git a/arch/sparc/kernel/stacktrace.c b/arch/sparc/kernel/stacktrace.c
index acb12f673757..3e0815349630 100644
--- a/arch/sparc/kernel/stacktrace.c
+++ b/arch/sparc/kernel/stacktrace.c
@@ -1,6 +1,7 @@
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
+#include <linux/ftrace.h>
#include <linux/module.h>
#include <asm/ptrace.h>
#include <asm/stacktrace.h>
@@ -12,6 +13,10 @@ static void __save_stack_trace(struct thread_info *tp,
bool skip_sched)
{
unsigned long ksp, fp;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ struct task_struct *t;
+ int graph = 0;
+#endif
if (tp == current_thread_info()) {
stack_trace_flush();
@@ -21,6 +26,9 @@ static void __save_stack_trace(struct thread_info *tp,
}
fp = ksp + STACK_BIAS;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ t = tp->task;
+#endif
do {
struct sparc_stackf *sf;
struct pt_regs *regs;
@@ -44,8 +52,21 @@ static void __save_stack_trace(struct thread_info *tp,
if (trace->skip > 0)
trace->skip--;
- else if (!skip_sched || !in_sched_functions(pc))
+ else if (!skip_sched || !in_sched_functions(pc)) {
trace->entries[trace->nr_entries++] = pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+ int index = t->curr_ret_stack;
+ if (t->ret_stack && index >= graph) {
+ pc = t->ret_stack[index - graph].ret;
+ if (trace->nr_entries <
+ trace->max_entries)
+ trace->entries[trace->nr_entries++] = pc;
+ graph++;
+ }
+ }
+#endif
+ }
} while (trace->nr_entries < trace->max_entries);
}
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 217ba275cabf..e404b063be2c 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -78,6 +78,11 @@ __volatile__ unsigned int *master_l10_counter;
u32 (*do_arch_gettimeoffset)(void);
+int update_persistent_clock(struct timespec now)
+{
+ return set_rtc_mmss(now.tv_sec);
+}
+
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
@@ -87,9 +92,6 @@ u32 (*do_arch_gettimeoffset)(void);
static irqreturn_t timer_interrupt(int dummy, void *dev_id)
{
- /* last time the cmos clock got updated */
- static long last_rtc_update;
-
#ifndef CONFIG_SMP
profile_tick(CPU_PROFILING);
#endif
@@ -101,16 +103,6 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id)
do_timer(1);
- /* Determine when to update the Mostek clock. */
- if (ntp_synced() &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
- (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
- }
write_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 9da57f032983..42ad2ba85010 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -17,6 +17,7 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/kdebug.h>
+#include <linux/ftrace.h>
#include <linux/gfp.h>
#include <asm/smp.h>
@@ -2154,6 +2155,9 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
unsigned long fp, thread_base, ksp;
struct thread_info *tp;
int count = 0;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ int graph = 0;
+#endif
ksp = (unsigned long) _ksp;
if (!tsk)
@@ -2193,6 +2197,16 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
}
printk(" [%016lx] %pS\n", pc, (void *) pc);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+ int index = tsk->curr_ret_stack;
+ if (tsk->ret_stack && index >= graph) {
+ pc = tsk->ret_stack[index - graph].ret;
+ printk(" [%016lx] %pS\n", pc, (void *) pc);
+ graph++;
+ }
+ }
+#endif
} while (++count < 16);
}
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index ebce43018c49..c752c4c479bd 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -50,7 +50,7 @@ static inline enum direction decode_direction(unsigned int insn)
}
/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
-static inline int decode_access_size(unsigned int insn)
+static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
{
unsigned int tmp;
@@ -66,7 +66,7 @@ static inline int decode_access_size(unsigned int insn)
return 2;
else {
printk("Impossible unaligned trap. insn=%08x\n", insn);
- die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs);
+ die_if_kernel("Byte sized unaligned access?!?!", regs);
/* GCC should never warn that control reaches the end
* of this function without returning a value because
@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
enum direction dir = decode_direction(insn);
- int size = decode_access_size(insn);
+ int size = decode_access_size(regs, insn);
int orig_asi, asi;
current_thread_info()->kern_una_regs = regs;
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
index 3753e3c6e176..3ad6cbdc2163 100644
--- a/arch/sparc/lib/mcount.S
+++ b/arch/sparc/lib/mcount.S
@@ -34,7 +34,7 @@ mcount:
cmp %g1, %g2
be,pn %icc, 1f
mov %i7, %g3
- save %sp, -128, %sp
+ save %sp, -176, %sp
mov %g3, %o1
jmpl %g1, %o7
mov %i7, %o0
@@ -56,7 +56,7 @@ mcount:
nop
5: mov %i7, %g2
mov %fp, %g3
- save %sp, -128, %sp
+ save %sp, -176, %sp
mov %g2, %l0
ba,pt %xcc, ftrace_graph_caller
mov %g3, %l1
@@ -85,7 +85,7 @@ ftrace_caller:
lduw [%g1 + %lo(function_trace_stop)], %g1
brnz,pn %g1, ftrace_stub
mov %fp, %g3
- save %sp, -128, %sp
+ save %sp, -176, %sp
mov %g2, %o1
mov %g2, %l0
mov %g3, %l1
@@ -120,7 +120,7 @@ ENTRY(ftrace_graph_caller)
END(ftrace_graph_caller)
ENTRY(return_to_handler)
- save %sp, -128, %sp
+ save %sp, -176, %sp
call ftrace_return_to_handler
mov %fp, %o0
jmpl %o0 + 8, %g0