trace_functions_graph.c 36.3 KB
Newer Older
1 2 3
/*
 *
 * Function graph tracer.
4
 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 6 7 8 9 10
 * Mostly borrowed from function tracer which
 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/uaccess.h>
#include <linux/ftrace.h>
11
#include <linux/slab.h>
12 13 14
#include <linux/fs.h>

#include "trace.h"
15
#include "trace_output.h"
16

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
static bool kill_ftrace_graph;

/**
 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
 *
 * ftrace_graph_stop() is called when a severe error is detected in
 * the function graph tracing. This function is called by the critical
 * paths of function graph to keep those paths from doing any more harm.
 */
bool ftrace_graph_is_dead(void)
{
	return kill_ftrace_graph;
}

/**
 * ftrace_graph_stop - set to permanently disable function graph tracincg
 *
 * In case of an error int function graph tracing, this is called
 * to try to keep function graph tracing from causing any more harm.
 * Usually this is pretty severe and this is called to try to at least
 * get a warning out to the user.
 */
void ftrace_graph_stop(void)
{
	kill_ftrace_graph = true;
}

44 45 46
/* When set, irq functions will be ignored */
static int ftrace_graph_skip_irqs;

47
struct fgraph_cpu_data {
48 49
	pid_t		last_pid;
	int		depth;
50
	int		depth_irq;
51
	int		ignore;
52
	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
53 54 55
};

struct fgraph_data {
56
	struct fgraph_cpu_data __percpu *cpu_data;
57 58 59 60 61 62

	/* Place to preserve last processed entry. */
	struct ftrace_graph_ent_entry	ent;
	struct ftrace_graph_ret_entry	ret;
	int				failed;
	int				cpu;
63 64
};

65
#define TRACE_GRAPH_INDENT	2
66

67 68
static unsigned int max_depth;

69
static struct tracer_opt trace_opts[] = {
70
	/* Display overruns? (for self-debug purpose) */
71 72 73 74 75
	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
	/* Display CPU ? */
	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
	/* Display Overhead ? */
	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
76 77
	/* Display proc name/pid */
	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
78 79 80 81
	/* Display duration of execution */
	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
	/* Display absolute time of an entry */
	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
82 83
	/* Display interrupts */
	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
84 85
	/* Display function name after trailing } */
	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
86 87 88 89
	/* Include sleep time (scheduled out) between entry and return */
	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
	/* Include time within nested functions */
	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
90 91 92 93
	{ } /* Empty entry */
};

static struct tracer_flags tracer_flags = {
94
	/* Don't display overruns, proc, or tail by default */
95
	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
96 97
	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
98 99 100
	.opts = trace_opts
};

101
static struct trace_array *graph_array;
102

103 104 105 106 107 108
/*
 * DURATION column is being also used to display IRQ signs,
 * following values are used by print_graph_irq and others
 * to fill in space into DURATION column.
 */
enum {
109 110 111
	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
112 113
};

114
static void
115 116
print_graph_duration(struct trace_array *tr, unsigned long long duration,
		     struct trace_seq *s, u32 flags);
117

118 119
/* Add a function return address to the trace stack on thread info.*/
int
120 121
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
			 unsigned long frame_pointer)
122
{
123
	unsigned long long calltime;
124 125
	int index;

126 127 128
	if (unlikely(ftrace_graph_is_dead()))
		return -EBUSY;

129 130 131
	if (!current->ret_stack)
		return -EBUSY;

132 133 134 135 136 137
	/*
	 * We must make sure the ret_stack is tested before we read
	 * anything else.
	 */
	smp_rmb();

138 139 140 141 142 143
	/* The return trace stack is full */
	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
		atomic_inc(&current->trace_overrun);
		return -EBUSY;
	}

144 145 146 147 148 149 150 151 152 153 154 155 156 157
	/*
	 * The curr_ret_stack is an index to ftrace return stack of
	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
	 * DEPTH) when the function graph tracer is used.  To support
	 * filtering out specific functions, it makes the index
	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
	 * so when it sees a negative index the ftrace will ignore
	 * the record.  And the index gets recovered when returning
	 * from the filtered function by adding the FTRACE_NOTRACE_
	 * DEPTH and then it'll continue to record functions normally.
	 *
	 * The curr_ret_stack is initialized to -1 and get increased
	 * in this function.  So it can be less than -1 only if it was
	 * filtered out via ftrace_graph_notrace_addr() which can be
158
	 * set from set_graph_notrace file in tracefs by user.
159 160 161 162
	 */
	if (current->curr_ret_stack < -1)
		return -EBUSY;

163 164
	calltime = trace_clock_local();

165
	index = ++current->curr_ret_stack;
166 167
	if (ftrace_graph_notrace_addr(func))
		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
168 169 170
	barrier();
	current->ret_stack[index].ret = ret;
	current->ret_stack[index].func = func;
171
	current->ret_stack[index].calltime = calltime;
172
	current->ret_stack[index].subtime = 0;
173
	current->ret_stack[index].fp = frame_pointer;
174
	*depth = current->curr_ret_stack;
175 176 177 178 179

	return 0;
}

/* Retrieve a function return address to the trace stack on thread info.*/
180
static void
181 182
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
			unsigned long frame_pointer)
183 184 185 186 187
{
	int index;

	index = current->curr_ret_stack;

188 189 190 191 192 193 194 195 196 197 198
	/*
	 * A negative index here means that it's just returned from a
	 * notrace'd function.  Recover index to get an original
	 * return address.  See ftrace_push_return_trace().
	 *
	 * TODO: Need to check whether the stack gets corrupted.
	 */
	if (index < 0)
		index += FTRACE_NOTRACE_DEPTH;

	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
199 200 201 202 203 204 205
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic, otherwise we have no where to go */
		*ret = (unsigned long)panic;
		return;
	}

206
#if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
207 208 209 210 211 212 213 214 215 216
	/*
	 * The arch may choose to record the frame pointer used
	 * and check it here to make sure that it is what we expect it
	 * to be. If gcc does not set the place holder of the return
	 * address in the frame pointer, and does a copy instead, then
	 * the function graph trace will fail. This test detects this
	 * case.
	 *
	 * Currently, x86_32 with optimize for size (-Os) makes the latest
	 * gcc do the above.
217 218 219
	 *
	 * Note, -mfentry does not use frame pointers, and this test
	 *  is not needed if CC_USING_FENTRY is set.
220 221 222 223
	 */
	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
		ftrace_graph_stop();
		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
224
		     "  from func %ps return to %lx\n",
225 226 227 228 229 230 231 232 233
		     current->ret_stack[index].fp,
		     frame_pointer,
		     (void *)current->ret_stack[index].func,
		     current->ret_stack[index].ret);
		*ret = (unsigned long)panic;
		return;
	}
#endif

234 235 236 237 238 239 240 241 242 243 244
	*ret = current->ret_stack[index].ret;
	trace->func = current->ret_stack[index].func;
	trace->calltime = current->ret_stack[index].calltime;
	trace->overrun = atomic_read(&current->trace_overrun);
	trace->depth = index;
}

/*
 * Send the trace to the ring-buffer.
 * @return the original return address.
 */
245
unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
246 247 248 249
{
	struct ftrace_graph_ret trace;
	unsigned long ret;

250
	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
251
	trace.rettime = trace_clock_local();
252 253
	barrier();
	current->curr_ret_stack--;
254 255 256 257 258 259 260 261 262
	/*
	 * The curr_ret_stack can be less than -1 only if it was
	 * filtered out and it's about to return from the function.
	 * Recover the index and continue to trace normal functions.
	 */
	if (current->curr_ret_stack < -1) {
		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
		return ret;
	}
263

264 265 266 267 268 269 270
	/*
	 * The trace should run after decrementing the ret counter
	 * in case an interrupt were to come in. We don't want to
	 * lose the interrupt if max_depth is set.
	 */
	ftrace_graph_return(&trace);

271 272 273 274 275 276 277 278 279 280
	if (unlikely(!ret)) {
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic. What else to do? */
		ret = (unsigned long)panic;
	}

	return ret;
}

281
int __trace_graph_entry(struct trace_array *tr,
282 283 284 285
				struct ftrace_graph_ent *trace,
				unsigned long flags,
				int pc)
{
286
	struct trace_event_call *call = &event_funcgraph_entry;
287
	struct ring_buffer_event *event;
288
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
289 290
	struct ftrace_graph_ent_entry *entry;

291
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
292 293 294 295 296
					  sizeof(*entry), flags, pc);
	if (!event)
		return 0;
	entry	= ring_buffer_event_data(event);
	entry->graph_ent			= *trace;
297
	if (!call_filter_check_discard(call, entry, buffer, event))
298
		__buffer_unlock_commit(buffer, event);
299 300 301 302

	return 1;
}

303 304
static inline int ftrace_graph_ignore_irqs(void)
{
305
	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
306 307 308 309 310
		return 0;

	return in_irq();
}

311 312 313 314 315 316 317 318 319 320 321 322 323
int trace_graph_entry(struct ftrace_graph_ent *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int ret;
	int cpu;
	int pc;

	if (!ftrace_trace_task(current))
		return 0;

324
	/* trace it when it is-nested-in or is a function enabled. */
325
	if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
326
	     ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
327
	    (max_depth && trace->depth >= max_depth))
328 329
		return 0;

330 331 332 333 334 335 336 337 338 339
	/*
	 * Do not trace a function if it's filtered by set_graph_notrace.
	 * Make the index of ret stack negative to indicate that it should
	 * ignore further functions.  But it needs its own ret stack entry
	 * to recover the original index in order to continue tracing after
	 * returning from the function.
	 */
	if (ftrace_graph_notrace_addr(trace->func))
		return 1;

340 341
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
342
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
343 344 345 346 347 348 349 350 351 352 353 354 355 356
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		ret = __trace_graph_entry(tr, trace, flags, pc);
	} else {
		ret = 0;
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);

	return ret;
}

357
static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
358 359 360 361 362 363 364
{
	if (tracing_thresh)
		return 1;
	else
		return trace_graph_entry(trace);
}

365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
static void
__trace_graph_function(struct trace_array *tr,
		unsigned long ip, unsigned long flags, int pc)
{
	u64 time = trace_clock_local();
	struct ftrace_graph_ent ent = {
		.func  = ip,
		.depth = 0,
	};
	struct ftrace_graph_ret ret = {
		.func     = ip,
		.depth    = 0,
		.calltime = time,
		.rettime  = time,
	};

	__trace_graph_entry(tr, &ent, flags, pc);
	__trace_graph_return(tr, &ret, flags, pc);
}

void
trace_graph_function(struct trace_array *tr,
		unsigned long ip, unsigned long parent_ip,
		unsigned long flags, int pc)
{
	__trace_graph_function(tr, ip, flags, pc);
}

393
void __trace_graph_return(struct trace_array *tr,
394 395 396 397
				struct ftrace_graph_ret *trace,
				unsigned long flags,
				int pc)
{
398
	struct trace_event_call *call = &event_funcgraph_exit;
399
	struct ring_buffer_event *event;
400
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
401 402
	struct ftrace_graph_ret_entry *entry;

403
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
404 405 406 407 408
					  sizeof(*entry), flags, pc);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->ret				= *trace;
409
	if (!call_filter_check_discard(call, entry, buffer, event))
410
		__buffer_unlock_commit(buffer, event);
411 412 413 414 415 416 417 418 419 420 421 422 423
}

void trace_graph_return(struct ftrace_graph_ret *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	local_irq_save(flags);
	cpu = raw_smp_processor_id();
424
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
425 426 427 428 429 430 431 432 433
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		__trace_graph_return(tr, trace, flags, pc);
	}
	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

434 435 436 437 438 439 440 441 442
void set_graph_array(struct trace_array *tr)
{
	graph_array = tr;

	/* Make graph_array visible before we start tracing */

	smp_mb();
}

443
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
444 445 446 447 448 449 450 451
{
	if (tracing_thresh &&
	    (trace->rettime - trace->calltime < tracing_thresh))
		return;
	else
		trace_graph_return(trace);
}

452 453
static int graph_trace_init(struct trace_array *tr)
{
454 455
	int ret;

456
	set_graph_array(tr);
457 458 459 460 461 462
	if (tracing_thresh)
		ret = register_ftrace_graph(&trace_graph_thresh_return,
					    &trace_graph_thresh_entry);
	else
		ret = register_ftrace_graph(&trace_graph_return,
					    &trace_graph_entry);
463 464 465 466 467
	if (ret)
		return ret;
	tracing_start_cmdline_record();

	return 0;
468 469 470 471
}

static void graph_trace_reset(struct trace_array *tr)
{
472 473
	tracing_stop_cmdline_record();
	unregister_ftrace_graph();
474 475
}

476
static int graph_trace_update_thresh(struct trace_array *tr)
477 478 479 480 481
{
	graph_trace_reset(tr);
	return graph_trace_init(tr);
}

482
static int max_bytes_for_cpu;
483

484
static void print_graph_cpu(struct trace_seq *s, int cpu)
485
{
486 487 488 489 490
	/*
	 * Start with a space character - to make it stand out
	 * to the right a bit when trace output is pasted into
	 * email:
	 */
491
	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
492 493
}

494 495
#define TRACE_GRAPH_PROCINFO_LENGTH	14

496
static void print_graph_proc(struct trace_seq *s, pid_t pid)
497
{
498
	char comm[TASK_COMM_LEN];
499 500
	/* sign + log10(MAX_INT) + '\0' */
	char pid_str[11];
501 502 503
	int spaces = 0;
	int len;
	int i;
504

505
	trace_find_cmdline(pid, comm);
506 507 508 509 510 511 512 513 514 515
	comm[7] = '\0';
	sprintf(pid_str, "%d", pid);

	/* 1 stands for the "-" character */
	len = strlen(comm) + strlen(pid_str) + 1;

	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;

	/* First spaces to align center */
516 517
	for (i = 0; i < spaces / 2; i++)
		trace_seq_putc(s, ' ');
518

519
	trace_seq_printf(s, "%s-%s", comm, pid_str);
520 521

	/* Last spaces to align center */
522 523
	for (i = 0; i < spaces - (spaces / 2); i++)
		trace_seq_putc(s, ' ');
524 525
}

526

527
static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
528
{
529 530
	trace_seq_putc(s, ' ');
	trace_print_lat_fmt(s, entry);
531 532
}

533
/* If the pid changed since the last trace, output this event */
534
static void
535
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
536
{
537
	pid_t prev_pid;
538
	pid_t *last_pid;
539

540
	if (!data)
541
		return;
542

543
	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
544 545

	if (*last_pid == pid)
546
		return;
547

548 549
	prev_pid = *last_pid;
	*last_pid = pid;
550

551
	if (prev_pid == -1)
552
		return;
553 554 555 556 557 558 559 560
/*
 * Context-switch trace line:

 ------------------------------------------
 | 1)  migration/0--1  =>  sshd-1755
 ------------------------------------------

 */
561 562 563 564 565 566
	trace_seq_puts(s, " ------------------------------------------\n");
	print_graph_cpu(s, cpu);
	print_graph_proc(s, prev_pid);
	trace_seq_puts(s, " => ");
	print_graph_proc(s, pid);
	trace_seq_puts(s, "\n ------------------------------------------\n\n");
567 568
}

569 570
static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter,
571 572
		struct ftrace_graph_ent_entry *curr)
{
573 574
	struct fgraph_data *data = iter->private;
	struct ring_buffer_iter *ring_iter = NULL;
575 576 577
	struct ring_buffer_event *event;
	struct ftrace_graph_ret_entry *next;

578 579 580 581 582 583 584 585
	/*
	 * If the previous output failed to write to the seq buffer,
	 * then we just reuse the data from before.
	 */
	if (data && data->failed) {
		curr = &data->ent;
		next = &data->ret;
	} else {
586

587
		ring_iter = trace_buffer_iter(iter, iter->cpu);
588 589 590 591 592 593 594 595 596

		/* First peek to compare current entry and the next one */
		if (ring_iter)
			event = ring_buffer_iter_peek(ring_iter, NULL);
		else {
			/*
			 * We need to consume the current entry to see
			 * the next one.
			 */
597
			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
598
					    NULL, NULL);
599
			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
600
						 NULL, NULL);
601
		}
602

603 604 605 606
		if (!event)
			return NULL;

		next = ring_buffer_event_data(event);
607

608 609 610 611 612 613
		if (data) {
			/*
			 * Save current and next entries for later reference
			 * if the output fails.
			 */
			data->ent = *curr;
614 615 616 617 618 619 620 621 622
			/*
			 * If the next event is not a return type, then
			 * we only care about what type it is. Otherwise we can
			 * safely copy the entire event.
			 */
			if (next->ent.type == TRACE_GRAPH_RET)
				data->ret = *next;
			else
				data->ret.ent.type = next->ent.type;
623 624
		}
	}
625 626

	if (next->ent.type != TRACE_GRAPH_RET)
627
		return NULL;
628 629 630

	if (curr->ent.pid != next->ent.pid ||
			curr->graph_ent.func != next->ret.func)
631
		return NULL;
632

633 634 635 636 637
	/* this is a leaf, now advance the iterator */
	if (ring_iter)
		ring_buffer_read(ring_iter, NULL);

	return next;
638 639
}

640
static void print_graph_abs_time(u64 t, struct trace_seq *s)
641 642 643 644 645 646
{
	unsigned long usecs_rem;

	usecs_rem = do_div(t, NSEC_PER_SEC);
	usecs_rem /= 1000;

647 648
	trace_seq_printf(s, "%5lu.%06lu |  ",
			 (unsigned long)t, usecs_rem);
649 650
}

651
static void
652
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
653
		enum trace_type type, int cpu, pid_t pid, u32 flags)
654
{
655
	struct trace_array *tr = iter->tr;
656
	struct trace_seq *s = &iter->seq;
657
	struct trace_entry *ent = iter->ent;
658 659 660

	if (addr < (unsigned long)__irqentry_text_start ||
		addr >= (unsigned long)__irqentry_text_end)
661
		return;
662

663
	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
664
		/* Absolute time */
665 666
		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
			print_graph_abs_time(iter->ts, s);
667

668
		/* Cpu */
669 670
		if (flags & TRACE_GRAPH_PRINT_CPU)
			print_graph_cpu(s, cpu);
671

672 673
		/* Proc */
		if (flags & TRACE_GRAPH_PRINT_PROC) {
674 675
			print_graph_proc(s, pid);
			trace_seq_puts(s, " | ");
676
		}
677 678

		/* Latency format */
679
		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
680
			print_graph_lat_fmt(s, ent);
681
	}
682

683
	/* No overhead */
684
	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
685

686
	if (type == TRACE_GRAPH_ENT)
687
		trace_seq_puts(s, "==========>");
688
	else
689
		trace_seq_puts(s, "<==========");
690

691
	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
692
	trace_seq_putc(s, '\n');
693
}
694

695
void
696
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
697 698
{
	unsigned long nsecs_rem = do_div(duration, 1000);
699
	/* log10(ULONG_MAX) + '\0' */
700
	char usecs_str[21];
701
	char nsecs_str[5];
702
	int len;
703 704
	int i;

705
	sprintf(usecs_str, "%lu", (unsigned long) duration);
706 707

	/* Print msecs */
708
	trace_seq_printf(s, "%s", usecs_str);
709

710
	len = strlen(usecs_str);
711 712 713

	/* Print nsecs (we don't want to exceed 7 numbers) */
	if (len < 7) {
714 715 716
		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);

		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
717
		trace_seq_printf(s, ".%s", nsecs_str);
718
		len += strlen(nsecs_str) + 1;
719 720
	}

721
	trace_seq_puts(s, " us ");
722 723

	/* Print remaining spaces to fit the row's width */
724
	for (i = len; i < 8; i++)
725
		trace_seq_putc(s, ' ');
726 727
}

728
static void
729 730
print_graph_duration(struct trace_array *tr, unsigned long long duration,
		     struct trace_seq *s, u32 flags)
731
{
732
	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
733
	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
734
		return;
735 736

	/* No real adata, just filling the column with spaces */
737 738
	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
	case FLAGS_FILL_FULL:
739 740
		trace_seq_puts(s, "              |  ");
		return;
741
	case FLAGS_FILL_START:
742 743
		trace_seq_puts(s, "  ");
		return;
744
	case FLAGS_FILL_END:
745 746
		trace_seq_puts(s, " |");
		return;
747 748 749
	}

	/* Signal a overhead of time execution to the output */
750 751 752
	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
		trace_seq_printf(s, "%c ", trace_find_mark(duration));
	else
753
		trace_seq_puts(s, "  ");
754

755 756
	trace_print_graph_duration(duration, s);
	trace_seq_puts(s, "|  ");
757 758 759
}

/* Case of a leaf function on its call entry */
760
static enum print_line_t
761
print_graph_entry_leaf(struct trace_iterator *iter,
762
		struct ftrace_graph_ent_entry *entry,
763 764
		struct ftrace_graph_ret_entry *ret_entry,
		struct trace_seq *s, u32 flags)
765
{
766
	struct fgraph_data *data = iter->private;
767
	struct trace_array *tr = iter->tr;
768 769 770
	struct ftrace_graph_ret *graph_ret;
	struct ftrace_graph_ent *call;
	unsigned long long duration;
771
	int i;
772

773 774 775 776
	graph_ret = &ret_entry->ret;
	call = &entry->graph_ent;
	duration = graph_ret->rettime - graph_ret->calltime;

777
	if (data) {
778
		struct fgraph_cpu_data *cpu_data;
779
		int cpu = iter->cpu;
780 781

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
782 783 784 785 786 787

		/*
		 * Comments display at + 1 to depth. Since
		 * this is a leaf function, keep the comments
		 * equal to this depth.
		 */
788 789 790 791 792
		cpu_data->depth = call->depth - 1;

		/* No need to keep this function around for this depth */
		if (call->depth < FTRACE_RETFUNC_DEPTH)
			cpu_data->enter_funcs[call->depth] = 0;
793 794
	}

795
	/* Overhead and duration */
796
	print_graph_duration(tr, duration, s, flags);
797

798
	/* Function */
799 800
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
		trace_seq_putc(s, ' ');
801

802
	trace_seq_printf(s, "%ps();\n", (void *)call->func);
803

804
	return trace_handle_return(s);
805 806 807
}

static enum print_line_t
808 809
print_graph_entry_nested(struct trace_iterator *iter,
			 struct ftrace_graph_ent_entry *entry,
810
			 struct trace_seq *s, int cpu, u32 flags)
811 812
{
	struct ftrace_graph_ent *call = &entry->graph_ent;
813
	struct fgraph_data *data = iter->private;
814
	struct trace_array *tr = iter->tr;
815 816 817
	int i;

	if (data) {
818
		struct fgraph_cpu_data *cpu_data;
819 820
		int cpu = iter->cpu;

821 822 823 824 825 826
		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
		cpu_data->depth = call->depth;

		/* Save this function pointer to see if the exit matches */
		if (call->depth < FTRACE_RETFUNC_DEPTH)
			cpu_data->enter_funcs[call->depth] = call->func;
827
	}
828

829
	/* No time */
830
	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
831

832
	/* Function */
833 834 835 836
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
		trace_seq_putc(s, ' ');

	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
837

838
	if (trace_seq_has_overflowed(s))
839 840
		return TRACE_TYPE_PARTIAL_LINE;

841 842 843 844 845
	/*
	 * we already consumed the current entry to check the next one
	 * and see if this is a leaf.
	 */
	return TRACE_TYPE_NO_CONSUME;
846 847
}

848
static void
849
print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
850
		     int type, unsigned long addr, u32 flags)
851
{
852
	struct fgraph_data *data = iter->private;
853
	struct trace_entry *ent = iter->ent;
854
	struct trace_array *tr = iter->tr;
855
	int cpu = iter->cpu;
856

857
	/* Pid */
858
	verif_pid(s, ent->pid, cpu, data);
859

860
	if (type)
861
		/* Interrupt */
862
		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
863

864
	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
865
		return;
866

867
	/* Absolute time */
868 869
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
		print_graph_abs_time(iter->ts, s);
870

871
	/* Cpu */
872 873
	if (flags & TRACE_GRAPH_PRINT_CPU)
		print_graph_cpu(s, cpu);
874 875

	/* Proc */
876
	if (flags & TRACE_GRAPH_PRINT_PROC) {
877 878
		print_graph_proc(s, ent->pid);
		trace_seq_puts(s, " | ");
879
	}
880

881
	/* Latency format */
882
	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
883
		print_graph_lat_fmt(s, ent);
884

885
	return;
886 887
}

888 889 890 891 892
/*
 * Entry check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
Lucas De Marchi's avatar
Lucas De Marchi committed
893
 *  - we just entered irq code
894 895 896 897 898 899 900 901 902 903
 *
 * retunns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_entry(struct trace_iterator *iter, u32 flags,
		unsigned long addr, int depth)
{
	int cpu = iter->cpu;
904
	int *depth_irq;
905 906
	struct fgraph_data *data = iter->private;

907 908 909 910 911 912 913
	/*
	 * If we are either displaying irqs, or we got called as
	 * a graph event and private data does not exist,
	 * then we bypass the irq check.
	 */
	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
	    (!data))
914 915
		return 0;

916 917
	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
	/*
	 * We are inside the irq code
	 */
	if (*depth_irq >= 0)
		return 1;

	if ((addr < (unsigned long)__irqentry_text_start) ||
	    (addr >= (unsigned long)__irqentry_text_end))
		return 0;

	/*
	 * We are entering irq code.
	 */
	*depth_irq = depth;
	return 1;
}

/*
 * Return check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
 *  - we just left irq code
 *
 * returns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
{
	int cpu = iter->cpu;
950
	int *depth_irq;
951 952
	struct fgraph_data *data = iter->private;

953 954 955 956 957 958 959
	/*
	 * If we are either displaying irqs, or we got called as
	 * a graph event and private data does not exist,
	 * then we bypass the irq check.
	 */
	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
	    (!data))
960 961
		return 0;

962 963
	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989
	/*
	 * We are not inside the irq code.
	 */
	if (*depth_irq == -1)
		return 0;

	/*
	 * We are inside the irq code, and this is returning entry.
	 * Let's not trace it and clear the entry depth, since
	 * we are out of irq code.
	 *
	 * This condition ensures that we 'leave the irq code' once
	 * we are out of the entry depth. Thus protecting us from
	 * the RETURN entry loss.
	 */
	if (*depth_irq >= depth) {
		*depth_irq = -1;
		return 1;
	}

	/*
	 * We are inside the irq code, and this is not the entry.
	 */
	return 1;
}

990 991
static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
992
			struct trace_iterator *iter, u32 flags)
993
{
994
	struct fgraph_data *data = iter->private;
995 996
	struct ftrace_graph_ent *call = &field->graph_ent;
	struct ftrace_graph_ret_entry *leaf_ret;
997 998
	static enum print_line_t ret;
	int cpu = iter->cpu;
999

1000 1001 1002
	if (check_irq_entry(iter, flags, call->func, call->depth))
		return TRACE_TYPE_HANDLED;

1003
	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1004

1005 1006
	leaf_ret = get_return_for_leaf(iter, field);
	if (leaf_ret)
1007
		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1008
	else
1009
		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1010

1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
	if (data) {
		/*
		 * If we failed to write our output, then we need to make
		 * note of it. Because we already consumed our entry.
		 */
		if (s->full) {
			data->failed = 1;
			data->cpu = cpu;
		} else
			data->failed = 0;
	}

	return ret;
1024 1025
}

1026 1027
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1028 1029
		   struct trace_entry *ent, struct trace_iterator *iter,
		   u32 flags)
1030
{
1031
	unsigned long long duration = trace->rettime - trace->calltime;
1032
	struct fgraph_data *data = iter->private;
1033
	struct trace_array *tr = iter->tr;
1034 1035
	pid_t pid = ent->pid;
	int cpu = iter->cpu;
1036
	int func_match = 1;
1037 1038
	int i;

1039 1040 1041
	if (check_irq_return(iter, flags, trace->depth))
		return TRACE_TYPE_HANDLED;

1042
	if (data) {
1043 1044 1045 1046
		struct fgraph_cpu_data *cpu_data;
		int cpu = iter->cpu;

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1047 1048 1049 1050 1051 1052

		/*
		 * Comments display at + 1 to depth. This is the
		 * return from a function, we now want the comments
		 * to display at the same level of the bracket.
		 */
1053 1054 1055 1056 1057 1058 1059
		cpu_data->depth = trace->depth - 1;

		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
			if (cpu_data->enter_funcs[trace->depth] != trace->func)
				func_match = 0;
			cpu_data->enter_funcs[trace->depth] = 0;
		}
1060
	}
1061

1062
	print_graph_prologue(iter, s, 0, 0, flags);
1063

1064
	/* Overhead and duration */
1065
	print_graph_duration(tr, duration, s, flags);
1066

1067
	/* Closing brace */
1068 1069
	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
		trace_seq_putc(s, ' ');
1070

1071 1072 1073 1074
	/*
	 * If the return function does not have a matching entry,
	 * then the entry was lost. Instead of just printing
	 * the '}' and letting the user guess what function this
1075 1076
	 * belongs to, write out the function name. Always do
	 * that if the funcgraph-tail option is enabled.
1077
	 */
1078 1079 1080 1081
	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
		trace_seq_puts(s, "}\n");
	else
		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1082

1083
	/* Overrun */
1084 1085 1086
	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
		trace_seq_printf(s, " (Overruns: %lu)\n",
				 trace->overrun);
1087

1088 1089
	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
			cpu, pid, flags);
1090

1091
	return trace_handle_return(s);
1092 1093
}

1094
static enum print_line_t
1095 1096
print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
		    struct trace_iterator *iter, u32 flags)