trace_stack.c 11.7 KB
Newer Older
Steven Rostedt's avatar
Steven Rostedt committed
1 2 3 4
/*
 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
 *
 */
5
#include <linux/sched/task_stack.h>
Steven Rostedt's avatar
Steven Rostedt committed
6 7 8 9 10 11 12
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/module.h>
13
#include <linux/sysctl.h>
Steven Rostedt's avatar
Steven Rostedt committed
14
#include <linux/init.h>
15 16 17

#include <asm/setup.h>

Steven Rostedt's avatar
Steven Rostedt committed
18 19
#include "trace.h"

20 21
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
22
unsigned stack_trace_index[STACK_TRACE_ENTRIES];
23

24 25 26 27 28
/*
 * Reserve one entry for the passed in ip. This will allow
 * us to remove most or all of the stack size overhead
 * added by the stack tracer itself.
 */
29
struct stack_trace stack_trace_max = {
30
	.max_entries		= STACK_TRACE_ENTRIES - 1,
31
	.entries		= &stack_dump_trace[0],
Steven Rostedt's avatar
Steven Rostedt committed
32 33
};

34
unsigned long stack_trace_max_size;
35
arch_spinlock_t stack_trace_max_lock =
36
	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt's avatar
Steven Rostedt committed
37 38

static DEFINE_PER_CPU(int, trace_active);
39 40 41 42
static DEFINE_MUTEX(stack_sysctl_mutex);

int stack_tracer_enabled;
static int last_stack_tracer_enabled;
Steven Rostedt's avatar
Steven Rostedt committed
43

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/**
 * stack_tracer_disable - temporarily disable the stack tracer
 *
 * There's a few locations (namely in RCU) where stack tracing
 * cannot be executed. This function is used to disable stack
 * tracing during those critical sections.
 *
 * This function must be called with preemption or interrupts
 * disabled and stack_tracer_enable() must be called shortly after
 * while preemption or interrupts are still disabled.
 */
void stack_tracer_disable(void)
{
	/* Preemption or interupts must be disabled */
	if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
	this_cpu_inc(trace_active);
}

/**
 * stack_tracer_enable - re-enable the stack tracer
 *
 * After stack_tracer_disable() is called, stack_tracer_enable()
 * must be called shortly afterward.
 */
void stack_tracer_enable(void)
{
	if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
	this_cpu_dec(trace_active);
}

76
void stack_trace_print(void)
77 78 79 80 81 82
{
	long i;
	int size;

	pr_emerg("        Depth    Size   Location    (%d entries)\n"
			   "        -----    ----   --------\n",
83
			   stack_trace_max.nr_entries);
84

85
	for (i = 0; i < stack_trace_max.nr_entries; i++) {
86 87
		if (stack_dump_trace[i] == ULONG_MAX)
			break;
88
		if (i+1 == stack_trace_max.nr_entries ||
89
				stack_dump_trace[i+1] == ULONG_MAX)
90
			size = stack_trace_index[i];
91
		else
92
			size = stack_trace_index[i] - stack_trace_index[i+1];
93

94
		pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_trace_index[i],
95 96 97 98
				size, (void *)stack_dump_trace[i]);
	}
}

99
/*
100
 * When arch-specific code overrides this function, the following
101
 * data should be filled up, assuming stack_trace_max_lock is held to
102 103 104 105 106 107
 * prevent concurrent updates.
 *     stack_trace_index[]
 *     stack_trace_max
 *     stack_trace_max_size
 */
void __weak
108
check_stack(unsigned long ip, unsigned long *stack)
Steven Rostedt's avatar
Steven Rostedt committed
109
{
110
	unsigned long this_size, flags; unsigned long *p, *top, *start;
111 112
	static int tracer_frame;
	int frame_size = ACCESS_ONCE(tracer_frame);
113
	int i, x;
Steven Rostedt's avatar
Steven Rostedt committed
114

115
	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
Steven Rostedt's avatar
Steven Rostedt committed
116
	this_size = THREAD_SIZE - this_size;
117 118
	/* Remove the frame of the tracer */
	this_size -= frame_size;
Steven Rostedt's avatar
Steven Rostedt committed
119

120
	if (this_size <= stack_trace_max_size)
Steven Rostedt's avatar
Steven Rostedt committed
121 122
		return;

123
	/* we do not handle interrupt stacks yet */
124
	if (!object_is_on_stack(stack))
125 126
		return;

127 128 129 130
	/* Can't do this from NMI context (can cause deadlocks) */
	if (in_nmi())
		return;

131
	local_irq_save(flags);
132
	arch_spin_lock(&stack_trace_max_lock);
Steven Rostedt's avatar
Steven Rostedt committed
133

134 135 136 137 138 139
	/*
	 * RCU may not be watching, make it see us.
	 * The stack trace code uses rcu_sched.
	 */
	rcu_irq_enter();

140 141 142 143
	/* In case another CPU set the tracer_frame on us */
	if (unlikely(!frame_size))
		this_size -= tracer_frame;

Steven Rostedt's avatar
Steven Rostedt committed
144
	/* a race could have already updated it */
145
	if (this_size <= stack_trace_max_size)
Steven Rostedt's avatar
Steven Rostedt committed
146 147
		goto out;

148
	stack_trace_max_size = this_size;
Steven Rostedt's avatar
Steven Rostedt committed
149

150 151
	stack_trace_max.nr_entries = 0;
	stack_trace_max.skip = 3;
Steven Rostedt's avatar
Steven Rostedt committed
152

153
	save_stack_trace(&stack_trace_max);
Steven Rostedt's avatar
Steven Rostedt committed
154

155
	/* Skip over the overhead of the stack tracer itself */
156
	for (i = 0; i < stack_trace_max.nr_entries; i++) {
157 158 159
		if (stack_dump_trace[i] == ip)
			break;
	}
160

161 162 163 164 165 166 167
	/*
	 * Some archs may not have the passed in ip in the dump.
	 * If that happens, we need to show everything.
	 */
	if (i == stack_trace_max.nr_entries)
		i = 0;

168 169 170
	/*
	 * Now find where in the stack these are.
	 */
171
	x = 0;
172
	start = stack;
173 174 175 176 177 178 179 180 181 182
	top = (unsigned long *)
		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);

	/*
	 * Loop through all the entries. One of the entries may
	 * for some reason be missed on the stack, so we may
	 * have to account for them. If they are all there, this
	 * loop will only happen once. This code only takes place
	 * on a new max, so it is far from a fast path.
	 */
183
	while (i < stack_trace_max.nr_entries) {
184
		int found = 0;
185

186
		stack_trace_index[x] = this_size;
187 188
		p = start;

189
		for (; p < top && i < stack_trace_max.nr_entries; p++) {
190 191
			if (stack_dump_trace[i] == ULONG_MAX)
				break;
192 193 194 195 196
			/*
			 * The READ_ONCE_NOCHECK is used to let KASAN know that
			 * this is not a stack-out-of-bounds error.
			 */
			if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
197
				stack_dump_trace[x] = stack_dump_trace[i++];
198
				this_size = stack_trace_index[x++] =
199
					(top - p) * sizeof(unsigned long);
200
				found = 1;
201 202
				/* Start the search from here */
				start = p + 1;
203 204 205 206 207 208 209
				/*
				 * We do not want to show the overhead
				 * of the stack tracer stack in the
				 * max stack. If we haven't figured
				 * out what that is, then figure it out
				 * now.
				 */
210
				if (unlikely(!tracer_frame)) {
211 212
					tracer_frame = (p - stack) *
						sizeof(unsigned long);
213
					stack_trace_max_size -= tracer_frame;
214
				}
215 216 217
			}
		}

218 219
		if (!found)
			i++;
220 221
	}

222
	stack_trace_max.nr_entries = x;
223 224 225
	for (; x < i; x++)
		stack_dump_trace[x] = ULONG_MAX;

226
	if (task_stack_end_corrupted(current)) {
227
		stack_trace_print();
228 229 230
		BUG();
	}

Steven Rostedt's avatar
Steven Rostedt committed
231
 out:
232
	rcu_irq_exit();
233
	arch_spin_unlock(&stack_trace_max_lock);
234
	local_irq_restore(flags);
Steven Rostedt's avatar
Steven Rostedt committed
235 236 237
}

static void
238 239
stack_trace_call(unsigned long ip, unsigned long parent_ip,
		 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt's avatar
Steven Rostedt committed
240
{
241
	unsigned long stack;
Steven Rostedt's avatar
Steven Rostedt committed
242

243
	preempt_disable_notrace();
Steven Rostedt's avatar
Steven Rostedt committed
244 245

	/* no atomic needed, we only modify this variable by this cpu */
246 247
	__this_cpu_inc(trace_active);
	if (__this_cpu_read(trace_active) != 1)
Steven Rostedt's avatar
Steven Rostedt committed
248 249
		goto out;

250
	ip += MCOUNT_INSN_SIZE;
251 252

	check_stack(ip, &stack);
Steven Rostedt's avatar
Steven Rostedt committed
253 254

 out:
255
	__this_cpu_dec(trace_active);
Steven Rostedt's avatar
Steven Rostedt committed
256
	/* prevent recursion in schedule */
257
	preempt_enable_notrace();
Steven Rostedt's avatar
Steven Rostedt committed
258 259 260 261 262
}

static struct ftrace_ops trace_ops __read_mostly =
{
	.func = stack_trace_call,
263
	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt's avatar
Steven Rostedt committed
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
};

static ssize_t
stack_max_size_read(struct file *filp, char __user *ubuf,
		    size_t count, loff_t *ppos)
{
	unsigned long *ptr = filp->private_data;
	char buf[64];
	int r;

	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
	if (r > sizeof(buf))
		r = sizeof(buf);
	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
}

static ssize_t
stack_max_size_write(struct file *filp, const char __user *ubuf,
		     size_t count, loff_t *ppos)
{
	long *ptr = filp->private_data;
	unsigned long val, flags;
	int ret;

288 289
	ret = kstrtoul_from_user(ubuf, count, 10, &val);
	if (ret)
Steven Rostedt's avatar
Steven Rostedt committed
290 291
		return ret;

292
	local_irq_save(flags);
293 294 295 296 297 298

	/*
	 * In case we trace inside arch_spin_lock() or after (NMI),
	 * we will cause circular lock, so we also need to increase
	 * the percpu trace_active here.
	 */
299
	__this_cpu_inc(trace_active);
300

301
	arch_spin_lock(&stack_trace_max_lock);
Steven Rostedt's avatar
Steven Rostedt committed
302
	*ptr = val;
303
	arch_spin_unlock(&stack_trace_max_lock);
304

305
	__this_cpu_dec(trace_active);
306
	local_irq_restore(flags);
Steven Rostedt's avatar
Steven Rostedt committed
307 308 309 310

	return count;
}

311
static const struct file_operations stack_max_size_fops = {
Steven Rostedt's avatar
Steven Rostedt committed
312 313 314
	.open		= tracing_open_generic,
	.read		= stack_max_size_read,
	.write		= stack_max_size_write,
315
	.llseek		= default_llseek,
Steven Rostedt's avatar
Steven Rostedt committed
316 317 318
};

static void *
Li Zefan's avatar
Li Zefan committed
319
__next(struct seq_file *m, loff_t *pos)
Steven Rostedt's avatar
Steven Rostedt committed
320
{
Li Zefan's avatar
Li Zefan committed
321
	long n = *pos - 1;
Steven Rostedt's avatar
Steven Rostedt committed
322

323
	if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
Steven Rostedt's avatar
Steven Rostedt committed
324 325
		return NULL;

Li Zefan's avatar
Li Zefan committed
326
	m->private = (void *)n;
327
	return &m->private;
Steven Rostedt's avatar
Steven Rostedt committed
328 329
}

Li Zefan's avatar
Li Zefan committed
330 331
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt's avatar
Steven Rostedt committed
332
{
Li Zefan's avatar
Li Zefan committed
333 334 335
	(*pos)++;
	return __next(m, pos);
}
Steven Rostedt's avatar
Steven Rostedt committed
336

Li Zefan's avatar
Li Zefan committed
337 338
static void *t_start(struct seq_file *m, loff_t *pos)
{
Steven Rostedt's avatar
Steven Rostedt committed
339
	local_irq_disable();
340

341
	__this_cpu_inc(trace_active);
342

343
	arch_spin_lock(&stack_trace_max_lock);
Steven Rostedt's avatar
Steven Rostedt committed
344

345 346 347
	if (*pos == 0)
		return SEQ_START_TOKEN;

Li Zefan's avatar
Li Zefan committed
348
	return __next(m, pos);
Steven Rostedt's avatar
Steven Rostedt committed
349 350 351 352
}

static void t_stop(struct seq_file *m, void *p)
{
353
	arch_spin_unlock(&stack_trace_max_lock);
354

355
	__this_cpu_dec(trace_active);
356

Steven Rostedt's avatar
Steven Rostedt committed
357 358 359
	local_irq_enable();
}

360
static void trace_lookup_stack(struct seq_file *m, long i)
Steven Rostedt's avatar
Steven Rostedt committed
361
{
362
	unsigned long addr = stack_dump_trace[i];
Steven Rostedt's avatar
Steven Rostedt committed
363

364
	seq_printf(m, "%pS\n", (void *)addr);
Steven Rostedt's avatar
Steven Rostedt committed
365 366
}

367 368 369 370 371 372 373 374 375 376 377
static void print_disabled(struct seq_file *m)
{
	seq_puts(m, "#\n"
		 "#  Stack tracer disabled\n"
		 "#\n"
		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
		 "# kernel command line\n"
		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
		 "#\n");
}

Steven Rostedt's avatar
Steven Rostedt committed
378 379
static int t_show(struct seq_file *m, void *v)
{
380
	long i;
381 382
	int size;

383
	if (v == SEQ_START_TOKEN) {
384
		seq_printf(m, "        Depth    Size   Location"
385
			   "    (%d entries)\n"
386
			   "        -----    ----   --------\n",
387
			   stack_trace_max.nr_entries);
388

389
		if (!stack_tracer_enabled && !stack_trace_max_size)
390 391
			print_disabled(m);

392 393
		return 0;
	}
Steven Rostedt's avatar
Steven Rostedt committed
394

395 396
	i = *(long *)v;

397
	if (i >= stack_trace_max.nr_entries ||
398
	    stack_dump_trace[i] == ULONG_MAX)
Steven Rostedt's avatar
Steven Rostedt committed
399 400
		return 0;

401
	if (i+1 == stack_trace_max.nr_entries ||
402
	    stack_dump_trace[i+1] == ULONG_MAX)
403
		size = stack_trace_index[i];
404
	else
405
		size = stack_trace_index[i] - stack_trace_index[i+1];
406

407
	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_trace_index[i], size);
408 409

	trace_lookup_stack(m, i);
Steven Rostedt's avatar
Steven Rostedt committed
410 411 412 413

	return 0;
}

414
static const struct seq_operations stack_trace_seq_ops = {
Steven Rostedt's avatar
Steven Rostedt committed
415 416 417 418 419 420 421 422
	.start		= t_start,
	.next		= t_next,
	.stop		= t_stop,
	.show		= t_show,
};

static int stack_trace_open(struct inode *inode, struct file *file)
{
423
	return seq_open(file, &stack_trace_seq_ops);
Steven Rostedt's avatar
Steven Rostedt committed
424 425
}

426
static const struct file_operations stack_trace_fops = {
Steven Rostedt's avatar
Steven Rostedt committed
427 428 429
	.open		= stack_trace_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
430
	.release	= seq_release,
Steven Rostedt's avatar
Steven Rostedt committed
431 432
};

433 434 435 436 437 438 439 440 441 442 443
static int
stack_trace_filter_open(struct inode *inode, struct file *file)
{
	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
				 inode, file);
}

static const struct file_operations stack_trace_filter_fops = {
	.open = stack_trace_filter_open,
	.read = seq_read,
	.write = ftrace_filter_write,
444
	.llseek = tracing_lseek,
445 446 447
	.release = ftrace_regex_release,
};

448 449
int
stack_trace_sysctl(struct ctl_table *table, int write,
450
		   void __user *buffer, size_t *lenp,
451 452 453 454 455 456
		   loff_t *ppos)
{
	int ret;

	mutex_lock(&stack_sysctl_mutex);

457
	ret = proc_dointvec(table, write, buffer, lenp, ppos);
458 459

	if (ret || !write ||
460
	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
461 462
		goto out;

463
	last_stack_tracer_enabled = !!stack_tracer_enabled;
464 465 466 467 468 469 470 471 472 473 474

	if (stack_tracer_enabled)
		register_ftrace_function(&trace_ops);
	else
		unregister_ftrace_function(&trace_ops);

 out:
	mutex_unlock(&stack_sysctl_mutex);
	return ret;
}

475 476
static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;

477 478
static __init int enable_stacktrace(char *str)
{
479 480 481
	if (strncmp(str, "_filter=", 8) == 0)
		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);

482 483
	stack_tracer_enabled = 1;
	last_stack_tracer_enabled = 1;
484 485 486 487
	return 1;
}
__setup("stacktrace", enable_stacktrace);

Steven Rostedt's avatar
Steven Rostedt committed
488 489 490 491 492
static __init int stack_trace_init(void)
{
	struct dentry *d_tracer;

	d_tracer = tracing_init_dentry();
493
	if (IS_ERR(d_tracer))
494
		return 0;
Steven Rostedt's avatar
Steven Rostedt committed
495

496
	trace_create_file("stack_max_size", 0644, d_tracer,
497
			&stack_trace_max_size, &stack_max_size_fops);
Steven Rostedt's avatar
Steven Rostedt committed
498

499 500
	trace_create_file("stack_trace", 0444, d_tracer,
			NULL, &stack_trace_fops);
Steven Rostedt's avatar
Steven Rostedt committed
501

502 503 504
	trace_create_file("stack_trace_filter", 0444, d_tracer,
			NULL, &stack_trace_filter_fops);

505 506 507
	if (stack_trace_filter_buf[0])
		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);

508
	if (stack_tracer_enabled)
509
		register_ftrace_function(&trace_ops);
Steven Rostedt's avatar
Steven Rostedt committed
510 511 512 513 514

	return 0;
}

device_initcall(stack_trace_init);