trace_stack.c 7.7 KB
Newer Older
Steven Rostedt's avatar
Steven Rostedt committed
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include <linux/module.h>
13
#include <linux/sysctl.h>
Steven Rostedt's avatar
Steven Rostedt committed
14 15 16 17 18 19
#include <linux/init.h>
#include <linux/fs.h>
#include "trace.h"

#define STACK_TRACE_ENTRIES 500

20 21 22 23
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
static unsigned stack_dump_index[STACK_TRACE_ENTRIES];

Steven Rostedt's avatar
Steven Rostedt committed
24 25 26 27 28 29
static struct stack_trace max_stack_trace = {
	.max_entries		= STACK_TRACE_ENTRIES,
	.entries		= stack_dump_trace,
};

static unsigned long max_stack_size;
30
static arch_spinlock_t max_stack_lock =
31
	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt's avatar
Steven Rostedt committed
32 33 34

static int stack_trace_disabled __read_mostly;
static DEFINE_PER_CPU(int, trace_active);
35 36 37 38
static DEFINE_MUTEX(stack_sysctl_mutex);

int stack_tracer_enabled;
static int last_stack_tracer_enabled;
Steven Rostedt's avatar
Steven Rostedt committed
39 40 41

static inline void check_stack(void)
{
42 43 44
	unsigned long this_size, flags;
	unsigned long *p, *top, *start;
	int i;
Steven Rostedt's avatar
Steven Rostedt committed
45 46 47 48 49 50 51

	this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
	this_size = THREAD_SIZE - this_size;

	if (this_size <= max_stack_size)
		return;

52 53 54 55
	/* we do not handle interrupt stacks yet */
	if (!object_is_on_stack(&this_size))
		return;

56
	local_irq_save(flags);
57
	arch_spin_lock(&max_stack_lock);
Steven Rostedt's avatar
Steven Rostedt committed
58 59 60 61 62 63 64 65

	/* a race could have already updated it */
	if (this_size <= max_stack_size)
		goto out;

	max_stack_size = this_size;

	max_stack_trace.nr_entries	= 0;
66
	max_stack_trace.skip		= 3;
Steven Rostedt's avatar
Steven Rostedt committed
67 68 69

	save_stack_trace(&max_stack_trace);

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
	/*
	 * Now find where in the stack these are.
	 */
	i = 0;
	start = &this_size;
	top = (unsigned long *)
		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);

	/*
	 * Loop through all the entries. One of the entries may
	 * for some reason be missed on the stack, so we may
	 * have to account for them. If they are all there, this
	 * loop will only happen once. This code only takes place
	 * on a new max, so it is far from a fast path.
	 */
	while (i < max_stack_trace.nr_entries) {
86
		int found = 0;
87 88 89 90 91 92 93 94

		stack_dump_index[i] = this_size;
		p = start;

		for (; p < top && i < max_stack_trace.nr_entries; p++) {
			if (*p == stack_dump_trace[i]) {
				this_size = stack_dump_index[i++] =
					(top - p) * sizeof(unsigned long);
95
				found = 1;
96 97 98 99 100
				/* Start the search from here */
				start = p + 1;
			}
		}

101 102
		if (!found)
			i++;
103 104
	}

Steven Rostedt's avatar
Steven Rostedt committed
105
 out:
106
	arch_spin_unlock(&max_stack_lock);
107
	local_irq_restore(flags);
Steven Rostedt's avatar
Steven Rostedt committed
108 109 110 111 112 113 114 115 116 117
}

static void
stack_trace_call(unsigned long ip, unsigned long parent_ip)
{
	int cpu, resched;

	if (unlikely(!ftrace_enabled || stack_trace_disabled))
		return;

118
	resched = ftrace_preempt_disable();
Steven Rostedt's avatar
Steven Rostedt committed
119 120 121 122 123 124 125 126 127 128 129

	cpu = raw_smp_processor_id();
	/* no atomic needed, we only modify this variable by this cpu */
	if (per_cpu(trace_active, cpu)++ != 0)
		goto out;

	check_stack();

 out:
	per_cpu(trace_active, cpu)--;
	/* prevent recursion in schedule */
130
	ftrace_preempt_enable(resched);
Steven Rostedt's avatar
Steven Rostedt committed
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
}

static struct ftrace_ops trace_ops __read_mostly =
{
	.func = stack_trace_call,
};

static ssize_t
stack_max_size_read(struct file *filp, char __user *ubuf,
		    size_t count, loff_t *ppos)
{
	unsigned long *ptr = filp->private_data;
	char buf[64];
	int r;

	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
	if (r > sizeof(buf))
		r = sizeof(buf);
	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
}

static ssize_t
stack_max_size_write(struct file *filp, const char __user *ubuf,
		     size_t count, loff_t *ppos)
{
	long *ptr = filp->private_data;
	unsigned long val, flags;
	char buf[64];
	int ret;
160
	int cpu;
Steven Rostedt's avatar
Steven Rostedt committed
161 162 163 164 165 166 167 168 169 170 171 172 173

	if (count >= sizeof(buf))
		return -EINVAL;

	if (copy_from_user(&buf, ubuf, count))
		return -EFAULT;

	buf[count] = 0;

	ret = strict_strtoul(buf, 10, &val);
	if (ret < 0)
		return ret;

174
	local_irq_save(flags);
175 176 177 178 179 180 181 182 183

	/*
	 * In case we trace inside arch_spin_lock() or after (NMI),
	 * we will cause circular lock, so we also need to increase
	 * the percpu trace_active here.
	 */
	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)++;

184
	arch_spin_lock(&max_stack_lock);
Steven Rostedt's avatar
Steven Rostedt committed
185
	*ptr = val;
186
	arch_spin_unlock(&max_stack_lock);
187 188

	per_cpu(trace_active, cpu)--;
189
	local_irq_restore(flags);
Steven Rostedt's avatar
Steven Rostedt committed
190 191 192 193

	return count;
}

194
static const struct file_operations stack_max_size_fops = {
Steven Rostedt's avatar
Steven Rostedt committed
195 196 197 198 199 200
	.open		= tracing_open_generic,
	.read		= stack_max_size_read,
	.write		= stack_max_size_write,
};

static void *
Li Zefan's avatar
Li Zefan committed
201
__next(struct seq_file *m, loff_t *pos)
Steven Rostedt's avatar
Steven Rostedt committed
202
{
Li Zefan's avatar
Li Zefan committed
203
	long n = *pos - 1;
Steven Rostedt's avatar
Steven Rostedt committed
204

Li Zefan's avatar
Li Zefan committed
205
	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
Steven Rostedt's avatar
Steven Rostedt committed
206 207
		return NULL;

Li Zefan's avatar
Li Zefan committed
208
	m->private = (void *)n;
209
	return &m->private;
Steven Rostedt's avatar
Steven Rostedt committed
210 211
}

Li Zefan's avatar
Li Zefan committed
212 213
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt's avatar
Steven Rostedt committed
214
{
Li Zefan's avatar
Li Zefan committed
215 216 217
	(*pos)++;
	return __next(m, pos);
}
Steven Rostedt's avatar
Steven Rostedt committed
218

Li Zefan's avatar
Li Zefan committed
219 220
static void *t_start(struct seq_file *m, loff_t *pos)
{
221 222
	int cpu;

Steven Rostedt's avatar
Steven Rostedt committed
223
	local_irq_disable();
224 225 226 227

	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)++;

228
	arch_spin_lock(&max_stack_lock);
Steven Rostedt's avatar
Steven Rostedt committed
229

230 231 232
	if (*pos == 0)
		return SEQ_START_TOKEN;

Li Zefan's avatar
Li Zefan committed
233
	return __next(m, pos);
Steven Rostedt's avatar
Steven Rostedt committed
234 235 236 237
}

static void t_stop(struct seq_file *m, void *p)
{
238 239
	int cpu;

240
	arch_spin_unlock(&max_stack_lock);
241 242 243 244

	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)--;

Steven Rostedt's avatar
Steven Rostedt committed
245 246 247
	local_irq_enable();
}

248
static int trace_lookup_stack(struct seq_file *m, long i)
Steven Rostedt's avatar
Steven Rostedt committed
249
{
250
	unsigned long addr = stack_dump_trace[i];
Steven Rostedt's avatar
Steven Rostedt committed
251

252
	return seq_printf(m, "%pF\n", (void *)addr);
Steven Rostedt's avatar
Steven Rostedt committed
253 254
}

255 256 257 258 259 260 261 262 263 264 265
static void print_disabled(struct seq_file *m)
{
	seq_puts(m, "#\n"
		 "#  Stack tracer disabled\n"
		 "#\n"
		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
		 "# kernel command line\n"
		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
		 "#\n");
}

Steven Rostedt's avatar
Steven Rostedt committed
266 267
static int t_show(struct seq_file *m, void *v)
{
268
	long i;
269 270
	int size;

271
	if (v == SEQ_START_TOKEN) {
272
		seq_printf(m, "        Depth    Size   Location"
273
			   "    (%d entries)\n"
274
			   "        -----    ----   --------\n",
275
			   max_stack_trace.nr_entries - 1);
276 277 278 279

		if (!stack_tracer_enabled && !max_stack_size)
			print_disabled(m);

280 281
		return 0;
	}
Steven Rostedt's avatar
Steven Rostedt committed
282

283 284
	i = *(long *)v;

285 286
	if (i >= max_stack_trace.nr_entries ||
	    stack_dump_trace[i] == ULONG_MAX)
Steven Rostedt's avatar
Steven Rostedt committed
287 288
		return 0;

289 290 291 292 293 294 295 296 297
	if (i+1 == max_stack_trace.nr_entries ||
	    stack_dump_trace[i+1] == ULONG_MAX)
		size = stack_dump_index[i];
	else
		size = stack_dump_index[i] - stack_dump_index[i+1];

	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);

	trace_lookup_stack(m, i);
Steven Rostedt's avatar
Steven Rostedt committed
298 299 300 301

	return 0;
}

302
static const struct seq_operations stack_trace_seq_ops = {
Steven Rostedt's avatar
Steven Rostedt committed
303 304 305 306 307 308 309 310
	.start		= t_start,
	.next		= t_next,
	.stop		= t_stop,
	.show		= t_show,
};

static int stack_trace_open(struct inode *inode, struct file *file)
{
311
	return seq_open(file, &stack_trace_seq_ops);
Steven Rostedt's avatar
Steven Rostedt committed
312 313
}

314
static const struct file_operations stack_trace_fops = {
Steven Rostedt's avatar
Steven Rostedt committed
315 316 317
	.open		= stack_trace_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
318
	.release	= seq_release,
Steven Rostedt's avatar
Steven Rostedt committed
319 320
};

321 322
int
stack_trace_sysctl(struct ctl_table *table, int write,
323
		   void __user *buffer, size_t *lenp,
324 325 326 327 328 329
		   loff_t *ppos)
{
	int ret;

	mutex_lock(&stack_sysctl_mutex);

330
	ret = proc_dointvec(table, write, buffer, lenp, ppos);
331 332

	if (ret || !write ||
333
	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
334 335
		goto out;

336
	last_stack_tracer_enabled = !!stack_tracer_enabled;
337 338 339 340 341 342 343 344 345 346 347 348 349

	if (stack_tracer_enabled)
		register_ftrace_function(&trace_ops);
	else
		unregister_ftrace_function(&trace_ops);

 out:
	mutex_unlock(&stack_sysctl_mutex);
	return ret;
}

static __init int enable_stacktrace(char *str)
{
350 351
	stack_tracer_enabled = 1;
	last_stack_tracer_enabled = 1;
352 353 354 355
	return 1;
}
__setup("stacktrace", enable_stacktrace);

Steven Rostedt's avatar
Steven Rostedt committed
356 357 358 359 360 361
static __init int stack_trace_init(void)
{
	struct dentry *d_tracer;

	d_tracer = tracing_init_dentry();

362 363
	trace_create_file("stack_max_size", 0644, d_tracer,
			&max_stack_size, &stack_max_size_fops);
Steven Rostedt's avatar
Steven Rostedt committed
364

365 366
	trace_create_file("stack_trace", 0444, d_tracer,
			NULL, &stack_trace_fops);
Steven Rostedt's avatar
Steven Rostedt committed
367

368
	if (stack_tracer_enabled)
369
		register_ftrace_function(&trace_ops);
Steven Rostedt's avatar
Steven Rostedt committed
370 371 372 373 374

	return 0;
}

device_initcall(stack_trace_init);