trace_stack.c 10.7 KB
Newer Older
Steven Rostedt's avatar
Steven Rostedt committed
1 2 3 4 5 6 7 8 9 10 11
/*
 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/module.h>
12
#include <linux/sysctl.h>
Steven Rostedt's avatar
Steven Rostedt committed
13
#include <linux/init.h>
14 15 16

#include <asm/setup.h>

Steven Rostedt's avatar
Steven Rostedt committed
17 18 19 20
#include "trace.h"

#define STACK_TRACE_ENTRIES 500

21
#ifdef CC_USING_FENTRY
22
# define fentry		1
23
#else
24
# define fentry		0
25 26
#endif

27 28 29 30
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
static unsigned stack_dump_index[STACK_TRACE_ENTRIES];

31 32 33 34 35
/*
 * Reserve one entry for the passed in ip. This will allow
 * us to remove most or all of the stack size overhead
 * added by the stack tracer itself.
 */
Steven Rostedt's avatar
Steven Rostedt committed
36
static struct stack_trace max_stack_trace = {
37 38
	.max_entries		= STACK_TRACE_ENTRIES - 1,
	.entries		= &stack_dump_trace[1],
Steven Rostedt's avatar
Steven Rostedt committed
39 40 41
};

static unsigned long max_stack_size;
42
static arch_spinlock_t max_stack_lock =
43
	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt's avatar
Steven Rostedt committed
44 45

static DEFINE_PER_CPU(int, trace_active);
46 47 48 49
static DEFINE_MUTEX(stack_sysctl_mutex);

int stack_tracer_enabled;
static int last_stack_tracer_enabled;
Steven Rostedt's avatar
Steven Rostedt committed
50

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
static inline void print_max_stack(void)
{
	long i;
	int size;

	pr_emerg("        Depth    Size   Location    (%d entries)\n"
			   "        -----    ----   --------\n",
			   max_stack_trace.nr_entries - 1);

	for (i = 0; i < max_stack_trace.nr_entries; i++) {
		if (stack_dump_trace[i] == ULONG_MAX)
			break;
		if (i+1 == max_stack_trace.nr_entries ||
				stack_dump_trace[i+1] == ULONG_MAX)
			size = stack_dump_index[i];
		else
			size = stack_dump_index[i] - stack_dump_index[i+1];

		pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_dump_index[i],
				size, (void *)stack_dump_trace[i]);
	}
}

74
static inline void
75
check_stack(unsigned long ip, unsigned long *stack)
Steven Rostedt's avatar
Steven Rostedt committed
76
{
77
	unsigned long this_size, flags; unsigned long *p, *top, *start;
78 79
	static int tracer_frame;
	int frame_size = ACCESS_ONCE(tracer_frame);
80
	int i;
Steven Rostedt's avatar
Steven Rostedt committed
81

82
	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
Steven Rostedt's avatar
Steven Rostedt committed
83
	this_size = THREAD_SIZE - this_size;
84 85
	/* Remove the frame of the tracer */
	this_size -= frame_size;
Steven Rostedt's avatar
Steven Rostedt committed
86 87 88 89

	if (this_size <= max_stack_size)
		return;

90
	/* we do not handle interrupt stacks yet */
91
	if (!object_is_on_stack(stack))
92 93
		return;

94
	local_irq_save(flags);
95
	arch_spin_lock(&max_stack_lock);
Steven Rostedt's avatar
Steven Rostedt committed
96

97 98 99 100
	/* In case another CPU set the tracer_frame on us */
	if (unlikely(!frame_size))
		this_size -= tracer_frame;

Steven Rostedt's avatar
Steven Rostedt committed
101 102 103 104 105 106
	/* a race could have already updated it */
	if (this_size <= max_stack_size)
		goto out;

	max_stack_size = this_size;

107 108 109 110 111 112
	max_stack_trace.nr_entries = 0;

	if (using_ftrace_ops_list_func())
		max_stack_trace.skip = 4;
	else
		max_stack_trace.skip = 3;
Steven Rostedt's avatar
Steven Rostedt committed
113 114 115

	save_stack_trace(&max_stack_trace);

116
	/*
117 118 119
	 * Add the passed in ip from the function tracer.
	 * Searching for this on the stack will skip over
	 * most of the overhead from the stack tracer itself.
120
	 */
121 122
	stack_dump_trace[0] = ip;
	max_stack_trace.nr_entries++;
123

124 125 126 127
	/*
	 * Now find where in the stack these are.
	 */
	i = 0;
128
	start = stack;
129 130 131 132 133 134 135 136 137 138 139
	top = (unsigned long *)
		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);

	/*
	 * Loop through all the entries. One of the entries may
	 * for some reason be missed on the stack, so we may
	 * have to account for them. If they are all there, this
	 * loop will only happen once. This code only takes place
	 * on a new max, so it is far from a fast path.
	 */
	while (i < max_stack_trace.nr_entries) {
140
		int found = 0;
141 142 143 144 145 146 147 148

		stack_dump_index[i] = this_size;
		p = start;

		for (; p < top && i < max_stack_trace.nr_entries; p++) {
			if (*p == stack_dump_trace[i]) {
				this_size = stack_dump_index[i++] =
					(top - p) * sizeof(unsigned long);
149
				found = 1;
150 151
				/* Start the search from here */
				start = p + 1;
152 153 154 155 156 157 158 159 160 161 162 163
				/*
				 * We do not want to show the overhead
				 * of the stack tracer stack in the
				 * max stack. If we haven't figured
				 * out what that is, then figure it out
				 * now.
				 */
				if (unlikely(!tracer_frame) && i == 1) {
					tracer_frame = (p - stack) *
						sizeof(unsigned long);
					max_stack_size -= tracer_frame;
				}
164 165 166
			}
		}

167 168
		if (!found)
			i++;
169 170
	}

171
	if (task_stack_end_corrupted(current)) {
172 173 174 175
		print_max_stack();
		BUG();
	}

Steven Rostedt's avatar
Steven Rostedt committed
176
 out:
177
	arch_spin_unlock(&max_stack_lock);
178
	local_irq_restore(flags);
Steven Rostedt's avatar
Steven Rostedt committed
179 180 181
}

static void
182 183
stack_trace_call(unsigned long ip, unsigned long parent_ip,
		 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt's avatar
Steven Rostedt committed
184
{
185
	unsigned long stack;
186
	int cpu;
Steven Rostedt's avatar
Steven Rostedt committed
187

188
	preempt_disable_notrace();
Steven Rostedt's avatar
Steven Rostedt committed
189 190 191 192 193 194

	cpu = raw_smp_processor_id();
	/* no atomic needed, we only modify this variable by this cpu */
	if (per_cpu(trace_active, cpu)++ != 0)
		goto out;

195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
	/*
	 * When fentry is used, the traced function does not get
	 * its stack frame set up, and we lose the parent.
	 * The ip is pretty useless because the function tracer
	 * was called before that function set up its stack frame.
	 * In this case, we use the parent ip.
	 *
	 * By adding the return address of either the parent ip
	 * or the current ip we can disregard most of the stack usage
	 * caused by the stack tracer itself.
	 *
	 * The function tracer always reports the address of where the
	 * mcount call was, but the stack will hold the return address.
	 */
	if (fentry)
		ip = parent_ip;
	else
		ip += MCOUNT_INSN_SIZE;

	check_stack(ip, &stack);
Steven Rostedt's avatar
Steven Rostedt committed
215 216 217 218

 out:
	per_cpu(trace_active, cpu)--;
	/* prevent recursion in schedule */
219
	preempt_enable_notrace();
Steven Rostedt's avatar
Steven Rostedt committed
220 221 222 223 224
}

static struct ftrace_ops trace_ops __read_mostly =
{
	.func = stack_trace_call,
225
	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt's avatar
Steven Rostedt committed
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
};

static ssize_t
stack_max_size_read(struct file *filp, char __user *ubuf,
		    size_t count, loff_t *ppos)
{
	unsigned long *ptr = filp->private_data;
	char buf[64];
	int r;

	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
	if (r > sizeof(buf))
		r = sizeof(buf);
	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
}

static ssize_t
stack_max_size_write(struct file *filp, const char __user *ubuf,
		     size_t count, loff_t *ppos)
{
	long *ptr = filp->private_data;
	unsigned long val, flags;
	int ret;
249
	int cpu;
Steven Rostedt's avatar
Steven Rostedt committed
250

251 252
	ret = kstrtoul_from_user(ubuf, count, 10, &val);
	if (ret)
Steven Rostedt's avatar
Steven Rostedt committed
253 254
		return ret;

255
	local_irq_save(flags);
256 257 258 259 260 261 262 263 264

	/*
	 * In case we trace inside arch_spin_lock() or after (NMI),
	 * we will cause circular lock, so we also need to increase
	 * the percpu trace_active here.
	 */
	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)++;

265
	arch_spin_lock(&max_stack_lock);
Steven Rostedt's avatar
Steven Rostedt committed
266
	*ptr = val;
267
	arch_spin_unlock(&max_stack_lock);
268 269

	per_cpu(trace_active, cpu)--;
270
	local_irq_restore(flags);
Steven Rostedt's avatar
Steven Rostedt committed
271 272 273 274

	return count;
}

275
static const struct file_operations stack_max_size_fops = {
Steven Rostedt's avatar
Steven Rostedt committed
276 277 278
	.open		= tracing_open_generic,
	.read		= stack_max_size_read,
	.write		= stack_max_size_write,
279
	.llseek		= default_llseek,
Steven Rostedt's avatar
Steven Rostedt committed
280 281 282
};

static void *
Li Zefan's avatar
Li Zefan committed
283
__next(struct seq_file *m, loff_t *pos)
Steven Rostedt's avatar
Steven Rostedt committed
284
{
Li Zefan's avatar
Li Zefan committed
285
	long n = *pos - 1;
Steven Rostedt's avatar
Steven Rostedt committed
286

Li Zefan's avatar
Li Zefan committed
287
	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
Steven Rostedt's avatar
Steven Rostedt committed
288 289
		return NULL;

Li Zefan's avatar
Li Zefan committed
290
	m->private = (void *)n;
291
	return &m->private;
Steven Rostedt's avatar
Steven Rostedt committed
292 293
}

Li Zefan's avatar
Li Zefan committed
294 295
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt's avatar
Steven Rostedt committed
296
{
Li Zefan's avatar
Li Zefan committed
297 298 299
	(*pos)++;
	return __next(m, pos);
}
Steven Rostedt's avatar
Steven Rostedt committed
300

Li Zefan's avatar
Li Zefan committed
301 302
static void *t_start(struct seq_file *m, loff_t *pos)
{
303 304
	int cpu;

Steven Rostedt's avatar
Steven Rostedt committed
305
	local_irq_disable();
306 307 308 309

	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)++;

310
	arch_spin_lock(&max_stack_lock);
Steven Rostedt's avatar
Steven Rostedt committed
311

312 313 314
	if (*pos == 0)
		return SEQ_START_TOKEN;

Li Zefan's avatar
Li Zefan committed
315
	return __next(m, pos);
Steven Rostedt's avatar
Steven Rostedt committed
316 317 318 319
}

static void t_stop(struct seq_file *m, void *p)
{
320 321
	int cpu;

322
	arch_spin_unlock(&max_stack_lock);
323 324 325 326

	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)--;

Steven Rostedt's avatar
Steven Rostedt committed
327 328 329
	local_irq_enable();
}

330
static int trace_lookup_stack(struct seq_file *m, long i)
Steven Rostedt's avatar
Steven Rostedt committed
331
{
332
	unsigned long addr = stack_dump_trace[i];
Steven Rostedt's avatar
Steven Rostedt committed
333

334
	return seq_printf(m, "%pS\n", (void *)addr);
Steven Rostedt's avatar
Steven Rostedt committed
335 336
}

337 338 339 340 341 342 343 344 345 346 347
static void print_disabled(struct seq_file *m)
{
	seq_puts(m, "#\n"
		 "#  Stack tracer disabled\n"
		 "#\n"
		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
		 "# kernel command line\n"
		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
		 "#\n");
}

Steven Rostedt's avatar
Steven Rostedt committed
348 349
static int t_show(struct seq_file *m, void *v)
{
350
	long i;
351 352
	int size;

353
	if (v == SEQ_START_TOKEN) {
354
		seq_printf(m, "        Depth    Size   Location"
355
			   "    (%d entries)\n"
356
			   "        -----    ----   --------\n",
357
			   max_stack_trace.nr_entries - 1);
358 359 360 361

		if (!stack_tracer_enabled && !max_stack_size)
			print_disabled(m);

362 363
		return 0;
	}
Steven Rostedt's avatar
Steven Rostedt committed
364

365 366
	i = *(long *)v;

367 368
	if (i >= max_stack_trace.nr_entries ||
	    stack_dump_trace[i] == ULONG_MAX)
Steven Rostedt's avatar
Steven Rostedt committed
369 370
		return 0;

371 372 373 374 375 376 377 378 379
	if (i+1 == max_stack_trace.nr_entries ||
	    stack_dump_trace[i+1] == ULONG_MAX)
		size = stack_dump_index[i];
	else
		size = stack_dump_index[i] - stack_dump_index[i+1];

	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);

	trace_lookup_stack(m, i);
Steven Rostedt's avatar
Steven Rostedt committed
380 381 382 383

	return 0;
}

384
static const struct seq_operations stack_trace_seq_ops = {
Steven Rostedt's avatar
Steven Rostedt committed
385 386 387 388 389 390 391 392
	.start		= t_start,
	.next		= t_next,
	.stop		= t_stop,
	.show		= t_show,
};

static int stack_trace_open(struct inode *inode, struct file *file)
{
393
	return seq_open(file, &stack_trace_seq_ops);
Steven Rostedt's avatar
Steven Rostedt committed
394 395
}

396
static const struct file_operations stack_trace_fops = {
Steven Rostedt's avatar
Steven Rostedt committed
397 398 399
	.open		= stack_trace_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
400
	.release	= seq_release,
Steven Rostedt's avatar
Steven Rostedt committed
401 402
};

403 404 405 406 407 408 409 410 411 412 413
static int
stack_trace_filter_open(struct inode *inode, struct file *file)
{
	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
				 inode, file);
}

static const struct file_operations stack_trace_filter_fops = {
	.open = stack_trace_filter_open,
	.read = seq_read,
	.write = ftrace_filter_write,
414
	.llseek = tracing_lseek,
415 416 417
	.release = ftrace_regex_release,
};

418 419
int
stack_trace_sysctl(struct ctl_table *table, int write,
420
		   void __user *buffer, size_t *lenp,
421 422 423 424 425 426
		   loff_t *ppos)
{
	int ret;

	mutex_lock(&stack_sysctl_mutex);

427
	ret = proc_dointvec(table, write, buffer, lenp, ppos);
428 429

	if (ret || !write ||
430
	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
431 432
		goto out;

433
	last_stack_tracer_enabled = !!stack_tracer_enabled;
434 435 436 437 438 439 440 441 442 443 444

	if (stack_tracer_enabled)
		register_ftrace_function(&trace_ops);
	else
		unregister_ftrace_function(&trace_ops);

 out:
	mutex_unlock(&stack_sysctl_mutex);
	return ret;
}

445 446
static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;

447 448
static __init int enable_stacktrace(char *str)
{
449 450 451
	if (strncmp(str, "_filter=", 8) == 0)
		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);

452 453
	stack_tracer_enabled = 1;
	last_stack_tracer_enabled = 1;
454 455 456 457
	return 1;
}
__setup("stacktrace", enable_stacktrace);

Steven Rostedt's avatar
Steven Rostedt committed
458 459 460 461 462
static __init int stack_trace_init(void)
{
	struct dentry *d_tracer;

	d_tracer = tracing_init_dentry();
463
	if (IS_ERR(d_tracer))
464
		return 0;
Steven Rostedt's avatar
Steven Rostedt committed
465

466 467
	trace_create_file("stack_max_size", 0644, d_tracer,
			&max_stack_size, &stack_max_size_fops);
Steven Rostedt's avatar
Steven Rostedt committed
468

469 470
	trace_create_file("stack_trace", 0444, d_tracer,
			NULL, &stack_trace_fops);
Steven Rostedt's avatar
Steven Rostedt committed
471

472 473 474
	trace_create_file("stack_trace_filter", 0444, d_tracer,
			NULL, &stack_trace_filter_fops);

475 476 477
	if (stack_trace_filter_buf[0])
		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);

478
	if (stack_tracer_enabled)
479
		register_ftrace_function(&trace_ops);
Steven Rostedt's avatar
Steven Rostedt committed
480 481 482 483 484

	return 0;
}

device_initcall(stack_trace_init);