trace_stack.c 10.8 KB
Newer Older
Steven Rostedt's avatar
Steven Rostedt committed
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include <linux/module.h>
13
#include <linux/sysctl.h>
Steven Rostedt's avatar
Steven Rostedt committed
14 15
#include <linux/init.h>
#include <linux/fs.h>
16 17 18

#include <asm/setup.h>

Steven Rostedt's avatar
Steven Rostedt committed
19 20 21 22
#include "trace.h"

#define STACK_TRACE_ENTRIES 500

23
#ifdef CC_USING_FENTRY
24
# define fentry		1
25
#else
26
# define fentry		0
27 28
#endif

29 30 31 32
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
static unsigned stack_dump_index[STACK_TRACE_ENTRIES];

33 34 35 36 37
/*
 * Reserve one entry for the passed in ip. This will allow
 * us to remove most or all of the stack size overhead
 * added by the stack tracer itself.
 */
Steven Rostedt's avatar
Steven Rostedt committed
38
static struct stack_trace max_stack_trace = {
39 40
	.max_entries		= STACK_TRACE_ENTRIES - 1,
	.entries		= &stack_dump_trace[1],
Steven Rostedt's avatar
Steven Rostedt committed
41 42 43
};

static unsigned long max_stack_size;
44
static arch_spinlock_t max_stack_lock =
45
	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt's avatar
Steven Rostedt committed
46 47

static DEFINE_PER_CPU(int, trace_active);
48 49 50 51
static DEFINE_MUTEX(stack_sysctl_mutex);

int stack_tracer_enabled;
static int last_stack_tracer_enabled;
Steven Rostedt's avatar
Steven Rostedt committed
52

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
static inline void print_max_stack(void)
{
	long i;
	int size;

	pr_emerg("        Depth    Size   Location    (%d entries)\n"
			   "        -----    ----   --------\n",
			   max_stack_trace.nr_entries - 1);

	for (i = 0; i < max_stack_trace.nr_entries; i++) {
		if (stack_dump_trace[i] == ULONG_MAX)
			break;
		if (i+1 == max_stack_trace.nr_entries ||
				stack_dump_trace[i+1] == ULONG_MAX)
			size = stack_dump_index[i];
		else
			size = stack_dump_index[i] - stack_dump_index[i+1];

		pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_dump_index[i],
				size, (void *)stack_dump_trace[i]);
	}
}

76
static inline void
77
check_stack(unsigned long ip, unsigned long *stack)
Steven Rostedt's avatar
Steven Rostedt committed
78
{
79
	unsigned long this_size, flags; unsigned long *p, *top, *start;
80 81
	static int tracer_frame;
	int frame_size = ACCESS_ONCE(tracer_frame);
82
	int i;
Steven Rostedt's avatar
Steven Rostedt committed
83

84
	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
Steven Rostedt's avatar
Steven Rostedt committed
85
	this_size = THREAD_SIZE - this_size;
86 87
	/* Remove the frame of the tracer */
	this_size -= frame_size;
Steven Rostedt's avatar
Steven Rostedt committed
88 89 90 91

	if (this_size <= max_stack_size)
		return;

92
	/* we do not handle interrupt stacks yet */
93
	if (!object_is_on_stack(stack))
94 95
		return;

96
	local_irq_save(flags);
97
	arch_spin_lock(&max_stack_lock);
Steven Rostedt's avatar
Steven Rostedt committed
98

99 100 101 102
	/* In case another CPU set the tracer_frame on us */
	if (unlikely(!frame_size))
		this_size -= tracer_frame;

Steven Rostedt's avatar
Steven Rostedt committed
103 104 105 106 107 108
	/* a race could have already updated it */
	if (this_size <= max_stack_size)
		goto out;

	max_stack_size = this_size;

109 110 111 112 113 114
	max_stack_trace.nr_entries = 0;

	if (using_ftrace_ops_list_func())
		max_stack_trace.skip = 4;
	else
		max_stack_trace.skip = 3;
Steven Rostedt's avatar
Steven Rostedt committed
115 116 117

	save_stack_trace(&max_stack_trace);

118
	/*
119 120 121
	 * Add the passed in ip from the function tracer.
	 * Searching for this on the stack will skip over
	 * most of the overhead from the stack tracer itself.
122
	 */
123 124
	stack_dump_trace[0] = ip;
	max_stack_trace.nr_entries++;
125

126 127 128 129
	/*
	 * Now find where in the stack these are.
	 */
	i = 0;
130
	start = stack;
131 132 133 134 135 136 137 138 139 140 141
	top = (unsigned long *)
		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);

	/*
	 * Loop through all the entries. One of the entries may
	 * for some reason be missed on the stack, so we may
	 * have to account for them. If they are all there, this
	 * loop will only happen once. This code only takes place
	 * on a new max, so it is far from a fast path.
	 */
	while (i < max_stack_trace.nr_entries) {
142
		int found = 0;
143 144 145 146 147 148 149 150

		stack_dump_index[i] = this_size;
		p = start;

		for (; p < top && i < max_stack_trace.nr_entries; p++) {
			if (*p == stack_dump_trace[i]) {
				this_size = stack_dump_index[i++] =
					(top - p) * sizeof(unsigned long);
151
				found = 1;
152 153
				/* Start the search from here */
				start = p + 1;
154 155 156 157 158 159 160 161 162 163 164 165
				/*
				 * We do not want to show the overhead
				 * of the stack tracer stack in the
				 * max stack. If we haven't figured
				 * out what that is, then figure it out
				 * now.
				 */
				if (unlikely(!tracer_frame) && i == 1) {
					tracer_frame = (p - stack) *
						sizeof(unsigned long);
					max_stack_size -= tracer_frame;
				}
166 167 168
			}
		}

169 170
		if (!found)
			i++;
171 172
	}

173
	if (*end_of_stack(current) != STACK_END_MAGIC) {
174 175 176 177
		print_max_stack();
		BUG();
	}

Steven Rostedt's avatar
Steven Rostedt committed
178
 out:
179
	arch_spin_unlock(&max_stack_lock);
180
	local_irq_restore(flags);
Steven Rostedt's avatar
Steven Rostedt committed
181 182 183
}

static void
184 185
stack_trace_call(unsigned long ip, unsigned long parent_ip,
		 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt's avatar
Steven Rostedt committed
186
{
187
	unsigned long stack;
188
	int cpu;
Steven Rostedt's avatar
Steven Rostedt committed
189

190
	preempt_disable_notrace();
Steven Rostedt's avatar
Steven Rostedt committed
191 192 193 194 195 196

	cpu = raw_smp_processor_id();
	/* no atomic needed, we only modify this variable by this cpu */
	if (per_cpu(trace_active, cpu)++ != 0)
		goto out;

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
	/*
	 * When fentry is used, the traced function does not get
	 * its stack frame set up, and we lose the parent.
	 * The ip is pretty useless because the function tracer
	 * was called before that function set up its stack frame.
	 * In this case, we use the parent ip.
	 *
	 * By adding the return address of either the parent ip
	 * or the current ip we can disregard most of the stack usage
	 * caused by the stack tracer itself.
	 *
	 * The function tracer always reports the address of where the
	 * mcount call was, but the stack will hold the return address.
	 */
	if (fentry)
		ip = parent_ip;
	else
		ip += MCOUNT_INSN_SIZE;

	check_stack(ip, &stack);
Steven Rostedt's avatar
Steven Rostedt committed
217 218 219 220

 out:
	per_cpu(trace_active, cpu)--;
	/* prevent recursion in schedule */
221
	preempt_enable_notrace();
Steven Rostedt's avatar
Steven Rostedt committed
222 223 224 225 226
}

static struct ftrace_ops trace_ops __read_mostly =
{
	.func = stack_trace_call,
227
	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt's avatar
Steven Rostedt committed
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
};

static ssize_t
stack_max_size_read(struct file *filp, char __user *ubuf,
		    size_t count, loff_t *ppos)
{
	unsigned long *ptr = filp->private_data;
	char buf[64];
	int r;

	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
	if (r > sizeof(buf))
		r = sizeof(buf);
	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
}

static ssize_t
stack_max_size_write(struct file *filp, const char __user *ubuf,
		     size_t count, loff_t *ppos)
{
	long *ptr = filp->private_data;
	unsigned long val, flags;
	int ret;
251
	int cpu;
Steven Rostedt's avatar
Steven Rostedt committed
252

253 254
	ret = kstrtoul_from_user(ubuf, count, 10, &val);
	if (ret)
Steven Rostedt's avatar
Steven Rostedt committed
255 256
		return ret;

257
	local_irq_save(flags);
258 259 260 261 262 263 264 265 266

	/*
	 * In case we trace inside arch_spin_lock() or after (NMI),
	 * we will cause circular lock, so we also need to increase
	 * the percpu trace_active here.
	 */
	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)++;

267
	arch_spin_lock(&max_stack_lock);
Steven Rostedt's avatar
Steven Rostedt committed
268
	*ptr = val;
269
	arch_spin_unlock(&max_stack_lock);
270 271

	per_cpu(trace_active, cpu)--;
272
	local_irq_restore(flags);
Steven Rostedt's avatar
Steven Rostedt committed
273 274 275 276

	return count;
}

277
static const struct file_operations stack_max_size_fops = {
Steven Rostedt's avatar
Steven Rostedt committed
278 279 280
	.open		= tracing_open_generic,
	.read		= stack_max_size_read,
	.write		= stack_max_size_write,
281
	.llseek		= default_llseek,
Steven Rostedt's avatar
Steven Rostedt committed
282 283 284
};

static void *
Li Zefan's avatar
Li Zefan committed
285
__next(struct seq_file *m, loff_t *pos)
Steven Rostedt's avatar
Steven Rostedt committed
286
{
Li Zefan's avatar
Li Zefan committed
287
	long n = *pos - 1;
Steven Rostedt's avatar
Steven Rostedt committed
288

Li Zefan's avatar
Li Zefan committed
289
	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
Steven Rostedt's avatar
Steven Rostedt committed
290 291
		return NULL;

Li Zefan's avatar
Li Zefan committed
292
	m->private = (void *)n;
293
	return &m->private;
Steven Rostedt's avatar
Steven Rostedt committed
294 295
}

Li Zefan's avatar
Li Zefan committed
296 297
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt's avatar
Steven Rostedt committed
298
{
Li Zefan's avatar
Li Zefan committed
299 300 301
	(*pos)++;
	return __next(m, pos);
}
Steven Rostedt's avatar
Steven Rostedt committed
302

Li Zefan's avatar
Li Zefan committed
303 304
static void *t_start(struct seq_file *m, loff_t *pos)
{
305 306
	int cpu;

Steven Rostedt's avatar
Steven Rostedt committed
307
	local_irq_disable();
308 309 310 311

	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)++;

312
	arch_spin_lock(&max_stack_lock);
Steven Rostedt's avatar
Steven Rostedt committed
313

314 315 316
	if (*pos == 0)
		return SEQ_START_TOKEN;

Li Zefan's avatar
Li Zefan committed
317
	return __next(m, pos);
Steven Rostedt's avatar
Steven Rostedt committed
318 319 320 321
}

static void t_stop(struct seq_file *m, void *p)
{
322 323
	int cpu;

324
	arch_spin_unlock(&max_stack_lock);
325 326 327 328

	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)--;

Steven Rostedt's avatar
Steven Rostedt committed
329 330 331
	local_irq_enable();
}

332
static int trace_lookup_stack(struct seq_file *m, long i)
Steven Rostedt's avatar
Steven Rostedt committed
333
{
334
	unsigned long addr = stack_dump_trace[i];
Steven Rostedt's avatar
Steven Rostedt committed
335

336
	return seq_printf(m, "%pS\n", (void *)addr);
Steven Rostedt's avatar
Steven Rostedt committed
337 338
}

339 340 341 342 343 344 345 346 347 348 349
static void print_disabled(struct seq_file *m)
{
	seq_puts(m, "#\n"
		 "#  Stack tracer disabled\n"
		 "#\n"
		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
		 "# kernel command line\n"
		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
		 "#\n");
}

Steven Rostedt's avatar
Steven Rostedt committed
350 351
static int t_show(struct seq_file *m, void *v)
{
352
	long i;
353 354
	int size;

355
	if (v == SEQ_START_TOKEN) {
356
		seq_printf(m, "        Depth    Size   Location"
357
			   "    (%d entries)\n"
358
			   "        -----    ----   --------\n",
359
			   max_stack_trace.nr_entries - 1);
360 361 362 363

		if (!stack_tracer_enabled && !max_stack_size)
			print_disabled(m);

364 365
		return 0;
	}
Steven Rostedt's avatar
Steven Rostedt committed
366

367 368
	i = *(long *)v;

369 370
	if (i >= max_stack_trace.nr_entries ||
	    stack_dump_trace[i] == ULONG_MAX)
Steven Rostedt's avatar
Steven Rostedt committed
371 372
		return 0;

373 374 375 376 377 378 379 380 381
	if (i+1 == max_stack_trace.nr_entries ||
	    stack_dump_trace[i+1] == ULONG_MAX)
		size = stack_dump_index[i];
	else
		size = stack_dump_index[i] - stack_dump_index[i+1];

	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);

	trace_lookup_stack(m, i);
Steven Rostedt's avatar
Steven Rostedt committed
382 383 384 385

	return 0;
}

386
static const struct seq_operations stack_trace_seq_ops = {
Steven Rostedt's avatar
Steven Rostedt committed
387 388 389 390 391 392 393 394
	.start		= t_start,
	.next		= t_next,
	.stop		= t_stop,
	.show		= t_show,
};

static int stack_trace_open(struct inode *inode, struct file *file)
{
395
	return seq_open(file, &stack_trace_seq_ops);
Steven Rostedt's avatar
Steven Rostedt committed
396 397
}

398
static const struct file_operations stack_trace_fops = {
Steven Rostedt's avatar
Steven Rostedt committed
399 400 401
	.open		= stack_trace_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
402
	.release	= seq_release,
Steven Rostedt's avatar
Steven Rostedt committed
403 404
};

405 406 407 408 409 410 411 412 413 414 415
static int
stack_trace_filter_open(struct inode *inode, struct file *file)
{
	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
				 inode, file);
}

static const struct file_operations stack_trace_filter_fops = {
	.open = stack_trace_filter_open,
	.read = seq_read,
	.write = ftrace_filter_write,
416
	.llseek = tracing_lseek,
417 418 419
	.release = ftrace_regex_release,
};

420 421
int
stack_trace_sysctl(struct ctl_table *table, int write,
422
		   void __user *buffer, size_t *lenp,
423 424 425 426 427 428
		   loff_t *ppos)
{
	int ret;

	mutex_lock(&stack_sysctl_mutex);

429
	ret = proc_dointvec(table, write, buffer, lenp, ppos);
430 431

	if (ret || !write ||
432
	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
433 434
		goto out;

435
	last_stack_tracer_enabled = !!stack_tracer_enabled;
436 437 438 439 440 441 442 443 444 445 446

	if (stack_tracer_enabled)
		register_ftrace_function(&trace_ops);
	else
		unregister_ftrace_function(&trace_ops);

 out:
	mutex_unlock(&stack_sysctl_mutex);
	return ret;
}

447 448
static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;

449 450
static __init int enable_stacktrace(char *str)
{
451 452 453
	if (strncmp(str, "_filter=", 8) == 0)
		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);

454 455
	stack_tracer_enabled = 1;
	last_stack_tracer_enabled = 1;
456 457 458 459
	return 1;
}
__setup("stacktrace", enable_stacktrace);

Steven Rostedt's avatar
Steven Rostedt committed
460 461 462 463 464
static __init int stack_trace_init(void)
{
	struct dentry *d_tracer;

	d_tracer = tracing_init_dentry();
465 466
	if (!d_tracer)
		return 0;
Steven Rostedt's avatar
Steven Rostedt committed
467

468 469
	trace_create_file("stack_max_size", 0644, d_tracer,
			&max_stack_size, &stack_max_size_fops);
Steven Rostedt's avatar
Steven Rostedt committed
470

471 472
	trace_create_file("stack_trace", 0444, d_tracer,
			NULL, &stack_trace_fops);
Steven Rostedt's avatar
Steven Rostedt committed
473

474 475 476
	trace_create_file("stack_trace_filter", 0444, d_tracer,
			NULL, &stack_trace_filter_fops);

477 478 479
	if (stack_trace_filter_buf[0])
		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);

480
	if (stack_tracer_enabled)
481
		register_ftrace_function(&trace_ops);
Steven Rostedt's avatar
Steven Rostedt committed
482 483 484 485 486

	return 0;
}

device_initcall(stack_trace_init);