diff options
Diffstat (limited to 'kernel/trace/trace_stack.c')
| -rw-r--r-- | kernel/trace/trace_stack.c | 70 | 
1 files changed, 55 insertions, 15 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 3bdb44bde4b7..d0871bc0aca5 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -10,6 +10,7 @@  #include <linux/debugfs.h>  #include <linux/ftrace.h>  #include <linux/module.h> +#include <linux/sysctl.h>  #include <linux/init.h>  #include <linux/fs.h>  #include "trace.h" @@ -31,6 +32,10 @@ static raw_spinlock_t max_stack_lock =  static int stack_trace_disabled __read_mostly;  static DEFINE_PER_CPU(int, trace_active); +static DEFINE_MUTEX(stack_sysctl_mutex); + +int stack_tracer_enabled; +static int last_stack_tracer_enabled;  static inline void check_stack(void)  { @@ -48,7 +53,7 @@ static inline void check_stack(void)  	if (!object_is_on_stack(&this_size))  		return; -	raw_local_irq_save(flags); +	local_irq_save(flags);  	__raw_spin_lock(&max_stack_lock);  	/* a race could have already updated it */ @@ -78,6 +83,7 @@ static inline void check_stack(void)  	 * on a new max, so it is far from a fast path.  	 */  	while (i < max_stack_trace.nr_entries) { +		int found = 0;  		stack_dump_index[i] = this_size;  		p = start; @@ -86,17 +92,19 @@ static inline void check_stack(void)  			if (*p == stack_dump_trace[i]) {  				this_size = stack_dump_index[i++] =  					(top - p) * sizeof(unsigned long); +				found = 1;  				/* Start the search from here */  				start = p + 1;  			}  		} -		i++; +		if (!found) +			i++;  	}   out:  	__raw_spin_unlock(&max_stack_lock); -	raw_local_irq_restore(flags); +	local_irq_restore(flags);  }  static void @@ -107,8 +115,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)  	if (unlikely(!ftrace_enabled || stack_trace_disabled))  		return; -	resched = need_resched(); -	preempt_disable_notrace(); +	resched = ftrace_preempt_disable();  	cpu = raw_smp_processor_id();  	/* no atomic needed, we only modify this variable by this cpu */ @@ -120,10 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)   out:  	per_cpu(trace_active, cpu)--;  	/* prevent recursion in schedule */ -	if (resched) -		preempt_enable_no_resched_notrace(); -	else -		preempt_enable_notrace(); +	ftrace_preempt_enable(resched);  }  static struct ftrace_ops trace_ops __read_mostly = @@ -166,16 +170,16 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,  	if (ret < 0)  		return ret; -	raw_local_irq_save(flags); +	local_irq_save(flags);  	__raw_spin_lock(&max_stack_lock);  	*ptr = val;  	__raw_spin_unlock(&max_stack_lock); -	raw_local_irq_restore(flags); +	local_irq_restore(flags);  	return count;  } -static struct file_operations stack_max_size_fops = { +static const struct file_operations stack_max_size_fops = {  	.open		= tracing_open_generic,  	.read		= stack_max_size_read,  	.write		= stack_max_size_write, @@ -273,7 +277,7 @@ static int t_show(struct seq_file *m, void *v)  	return 0;  } -static struct seq_operations stack_trace_seq_ops = { +static const struct seq_operations stack_trace_seq_ops = {  	.start		= t_start,  	.next		= t_next,  	.stop		= t_stop, @@ -289,12 +293,47 @@ static int stack_trace_open(struct inode *inode, struct file *file)  	return ret;  } -static struct file_operations stack_trace_fops = { +static const struct file_operations stack_trace_fops = {  	.open		= stack_trace_open,  	.read		= seq_read,  	.llseek		= seq_lseek,  }; +int +stack_trace_sysctl(struct ctl_table *table, int write, +		   struct file *file, void __user *buffer, size_t *lenp, +		   loff_t *ppos) +{ +	int ret; + +	mutex_lock(&stack_sysctl_mutex); + +	ret = proc_dointvec(table, write, file, buffer, lenp, ppos); + +	if (ret || !write || +	    (last_stack_tracer_enabled == stack_tracer_enabled)) +		goto out; + +	last_stack_tracer_enabled = stack_tracer_enabled; + +	if (stack_tracer_enabled) +		register_ftrace_function(&trace_ops); +	else +		unregister_ftrace_function(&trace_ops); + + out: +	mutex_unlock(&stack_sysctl_mutex); +	return ret; +} + +static __init int enable_stacktrace(char *str) +{ +	stack_tracer_enabled = 1; +	last_stack_tracer_enabled = 1; +	return 1; +} +__setup("stacktrace", enable_stacktrace); +  static __init int stack_trace_init(void)  {  	struct dentry *d_tracer; @@ -312,7 +351,8 @@ static __init int stack_trace_init(void)  	if (!entry)  		pr_warning("Could not create debugfs 'stack_trace' entry\n"); -	register_ftrace_function(&trace_ops); +	if (stack_tracer_enabled) +		register_ftrace_function(&trace_ops);  	return 0;  }  | 
