trace_functions_graph.c 36 KB
Newer Older
1 2 3
/*
 *
 * Function graph tracer.
4
 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 6 7 8 9 10
 * Mostly borrowed from function tracer which
 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/uaccess.h>
#include <linux/ftrace.h>
11
#include <linux/slab.h>
12 13 14
#include <linux/fs.h>

#include "trace.h"
15
#include "trace_output.h"
16

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
static bool kill_ftrace_graph;

/**
 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
 *
 * ftrace_graph_stop() is called when a severe error is detected in
 * the function graph tracing. This function is called by the critical
 * paths of function graph to keep those paths from doing any more harm.
 */
bool ftrace_graph_is_dead(void)
{
	return kill_ftrace_graph;
}

/**
 * ftrace_graph_stop - set to permanently disable function graph tracincg
 *
 * In case of an error int function graph tracing, this is called
 * to try to keep function graph tracing from causing any more harm.
 * Usually this is pretty severe and this is called to try to at least
 * get a warning out to the user.
 */
void ftrace_graph_stop(void)
{
	kill_ftrace_graph = true;
}

44 45 46
/* When set, irq functions will be ignored */
static int ftrace_graph_skip_irqs;

47
struct fgraph_cpu_data {
48 49
	pid_t		last_pid;
	int		depth;
50
	int		depth_irq;
51
	int		ignore;
52
	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
53 54 55
};

struct fgraph_data {
56
	struct fgraph_cpu_data __percpu *cpu_data;
57 58 59 60 61 62

	/* Place to preserve last processed entry. */
	struct ftrace_graph_ent_entry	ent;
	struct ftrace_graph_ret_entry	ret;
	int				failed;
	int				cpu;
63 64
};

65
#define TRACE_GRAPH_INDENT	2
66

67 68
static unsigned int max_depth;

69
static struct tracer_opt trace_opts[] = {
70
	/* Display overruns? (for self-debug purpose) */
71 72 73 74 75
	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
	/* Display CPU ? */
	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
	/* Display Overhead ? */
	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
76 77
	/* Display proc name/pid */
	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
78 79 80 81
	/* Display duration of execution */
	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
	/* Display absolute time of an entry */
	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
82 83
	/* Display interrupts */
	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
84 85
	/* Display function name after trailing } */
	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
86 87 88 89
	/* Include sleep time (scheduled out) between entry and return */
	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
	/* Include time within nested functions */
	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
90 91 92 93
	{ } /* Empty entry */
};

static struct tracer_flags tracer_flags = {
94
	/* Don't display overruns, proc, or tail by default */
95
	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
96 97
	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
98 99 100
	.opts = trace_opts
};

101
static struct trace_array *graph_array;
102

103 104 105 106 107 108
/*
 * DURATION column is being also used to display IRQ signs,
 * following values are used by print_graph_irq and others
 * to fill in space into DURATION column.
 */
enum {
109 110 111
	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
112 113
};

114
static void
115 116
print_graph_duration(unsigned long long duration, struct trace_seq *s,
		     u32 flags);
117

118 119
/* Add a function return address to the trace stack on thread info.*/
int
120 121
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
			 unsigned long frame_pointer)
122
{
123
	unsigned long long calltime;
124 125
	int index;

126 127 128
	if (unlikely(ftrace_graph_is_dead()))
		return -EBUSY;

129 130 131
	if (!current->ret_stack)
		return -EBUSY;

132 133 134 135 136 137
	/*
	 * We must make sure the ret_stack is tested before we read
	 * anything else.
	 */
	smp_rmb();

138 139 140 141 142 143
	/* The return trace stack is full */
	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
		atomic_inc(&current->trace_overrun);
		return -EBUSY;
	}

144 145 146 147 148 149 150 151 152 153 154 155 156 157
	/*
	 * The curr_ret_stack is an index to ftrace return stack of
	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
	 * DEPTH) when the function graph tracer is used.  To support
	 * filtering out specific functions, it makes the index
	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
	 * so when it sees a negative index the ftrace will ignore
	 * the record.  And the index gets recovered when returning
	 * from the filtered function by adding the FTRACE_NOTRACE_
	 * DEPTH and then it'll continue to record functions normally.
	 *
	 * The curr_ret_stack is initialized to -1 and get increased
	 * in this function.  So it can be less than -1 only if it was
	 * filtered out via ftrace_graph_notrace_addr() which can be
158
	 * set from set_graph_notrace file in tracefs by user.
159 160 161 162
	 */
	if (current->curr_ret_stack < -1)
		return -EBUSY;

163 164
	calltime = trace_clock_local();

165
	index = ++current->curr_ret_stack;
166 167
	if (ftrace_graph_notrace_addr(func))
		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
168 169 170
	barrier();
	current->ret_stack[index].ret = ret;
	current->ret_stack[index].func = func;
171
	current->ret_stack[index].calltime = calltime;
172
	current->ret_stack[index].subtime = 0;
173
	current->ret_stack[index].fp = frame_pointer;
174
	*depth = current->curr_ret_stack;
175 176 177 178 179

	return 0;
}

/* Retrieve a function return address to the trace stack on thread info.*/
180
static void
181 182
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
			unsigned long frame_pointer)
183 184 185 186 187
{
	int index;

	index = current->curr_ret_stack;

188 189 190 191 192 193 194 195 196 197 198
	/*
	 * A negative index here means that it's just returned from a
	 * notrace'd function.  Recover index to get an original
	 * return address.  See ftrace_push_return_trace().
	 *
	 * TODO: Need to check whether the stack gets corrupted.
	 */
	if (index < 0)
		index += FTRACE_NOTRACE_DEPTH;

	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
199 200 201 202 203 204 205
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic, otherwise we have no where to go */
		*ret = (unsigned long)panic;
		return;
	}

206
#if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
207 208 209 210 211 212 213 214 215 216
	/*
	 * The arch may choose to record the frame pointer used
	 * and check it here to make sure that it is what we expect it
	 * to be. If gcc does not set the place holder of the return
	 * address in the frame pointer, and does a copy instead, then
	 * the function graph trace will fail. This test detects this
	 * case.
	 *
	 * Currently, x86_32 with optimize for size (-Os) makes the latest
	 * gcc do the above.
217 218 219
	 *
	 * Note, -mfentry does not use frame pointers, and this test
	 *  is not needed if CC_USING_FENTRY is set.
220 221 222 223
	 */
	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
		ftrace_graph_stop();
		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
224
		     "  from func %ps return to %lx\n",
225 226 227 228 229 230 231 232 233
		     current->ret_stack[index].fp,
		     frame_pointer,
		     (void *)current->ret_stack[index].func,
		     current->ret_stack[index].ret);
		*ret = (unsigned long)panic;
		return;
	}
#endif

234 235 236 237 238 239 240 241 242 243 244
	*ret = current->ret_stack[index].ret;
	trace->func = current->ret_stack[index].func;
	trace->calltime = current->ret_stack[index].calltime;
	trace->overrun = atomic_read(&current->trace_overrun);
	trace->depth = index;
}

/*
 * Send the trace to the ring-buffer.
 * @return the original return address.
 */
245
unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
246 247 248 249
{
	struct ftrace_graph_ret trace;
	unsigned long ret;

250
	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
251
	trace.rettime = trace_clock_local();
252 253
	barrier();
	current->curr_ret_stack--;
254 255 256 257 258 259 260 261 262
	/*
	 * The curr_ret_stack can be less than -1 only if it was
	 * filtered out and it's about to return from the function.
	 * Recover the index and continue to trace normal functions.
	 */
	if (current->curr_ret_stack < -1) {
		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
		return ret;
	}
263

264 265 266 267 268 269 270
	/*
	 * The trace should run after decrementing the ret counter
	 * in case an interrupt were to come in. We don't want to
	 * lose the interrupt if max_depth is set.
	 */
	ftrace_graph_return(&trace);

271 272 273 274 275 276 277 278 279 280
	if (unlikely(!ret)) {
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic. What else to do? */
		ret = (unsigned long)panic;
	}

	return ret;
}

281
int __trace_graph_entry(struct trace_array *tr,
282 283 284 285
				struct ftrace_graph_ent *trace,
				unsigned long flags,
				int pc)
{
286
	struct trace_event_call *call = &event_funcgraph_entry;
287
	struct ring_buffer_event *event;
288
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
289 290
	struct ftrace_graph_ent_entry *entry;

291
	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
292 293
		return 0;

294
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
295 296 297 298 299
					  sizeof(*entry), flags, pc);
	if (!event)
		return 0;
	entry	= ring_buffer_event_data(event);
	entry->graph_ent			= *trace;
300
	if (!call_filter_check_discard(call, entry, buffer, event))
301
		__buffer_unlock_commit(buffer, event);
302 303 304 305

	return 1;
}

306 307
static inline int ftrace_graph_ignore_irqs(void)
{
308
	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
309 310 311 312 313
		return 0;

	return in_irq();
}

314 315 316 317 318 319 320 321 322 323 324 325 326
int trace_graph_entry(struct ftrace_graph_ent *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int ret;
	int cpu;
	int pc;

	if (!ftrace_trace_task(current))
		return 0;

327
	/* trace it when it is-nested-in or is a function enabled. */
328
	if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
329
	     ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
330
	    (max_depth && trace->depth >= max_depth))
331 332
		return 0;

333 334 335 336 337 338 339 340 341 342
	/*
	 * Do not trace a function if it's filtered by set_graph_notrace.
	 * Make the index of ret stack negative to indicate that it should
	 * ignore further functions.  But it needs its own ret stack entry
	 * to recover the original index in order to continue tracing after
	 * returning from the function.
	 */
	if (ftrace_graph_notrace_addr(trace->func))
		return 1;

343 344
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
345
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
346 347 348 349 350 351 352 353 354 355 356 357 358 359
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		ret = __trace_graph_entry(tr, trace, flags, pc);
	} else {
		ret = 0;
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);

	return ret;
}

360
static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
361 362 363 364 365 366 367
{
	if (tracing_thresh)
		return 1;
	else
		return trace_graph_entry(trace);
}

368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
static void
__trace_graph_function(struct trace_array *tr,
		unsigned long ip, unsigned long flags, int pc)
{
	u64 time = trace_clock_local();
	struct ftrace_graph_ent ent = {
		.func  = ip,
		.depth = 0,
	};
	struct ftrace_graph_ret ret = {
		.func     = ip,
		.depth    = 0,
		.calltime = time,
		.rettime  = time,
	};

	__trace_graph_entry(tr, &ent, flags, pc);
	__trace_graph_return(tr, &ret, flags, pc);
}

void
trace_graph_function(struct trace_array *tr,
		unsigned long ip, unsigned long parent_ip,
		unsigned long flags, int pc)
{
	__trace_graph_function(tr, ip, flags, pc);
}

396
void __trace_graph_return(struct trace_array *tr,
397 398 399 400
				struct ftrace_graph_ret *trace,
				unsigned long flags,
				int pc)
{
401
	struct trace_event_call *call = &event_funcgraph_exit;
402
	struct ring_buffer_event *event;
403
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
404 405
	struct ftrace_graph_ret_entry *entry;

406
	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
407 408
		return;

409
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
410 411 412 413 414
					  sizeof(*entry), flags, pc);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->ret				= *trace;
415
	if (!call_filter_check_discard(call, entry, buffer, event))
416
		__buffer_unlock_commit(buffer, event);
417 418 419 420 421 422 423 424 425 426 427 428 429
}

void trace_graph_return(struct ftrace_graph_ret *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	local_irq_save(flags);
	cpu = raw_smp_processor_id();
430
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
431 432 433 434 435 436 437 438 439
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		__trace_graph_return(tr, trace, flags, pc);
	}
	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

440 441 442 443 444 445 446 447 448
void set_graph_array(struct trace_array *tr)
{
	graph_array = tr;

	/* Make graph_array visible before we start tracing */

	smp_mb();
}

449
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
450 451 452 453 454 455 456 457
{
	if (tracing_thresh &&
	    (trace->rettime - trace->calltime < tracing_thresh))
		return;
	else
		trace_graph_return(trace);
}

458 459
static int graph_trace_init(struct trace_array *tr)
{
460 461
	int ret;

462
	set_graph_array(tr);
463 464 465 466 467 468
	if (tracing_thresh)
		ret = register_ftrace_graph(&trace_graph_thresh_return,
					    &trace_graph_thresh_entry);
	else
		ret = register_ftrace_graph(&trace_graph_return,
					    &trace_graph_entry);
469 470 471 472 473
	if (ret)
		return ret;
	tracing_start_cmdline_record();

	return 0;
474 475 476 477
}

static void graph_trace_reset(struct trace_array *tr)
{
478 479
	tracing_stop_cmdline_record();
	unregister_ftrace_graph();
480 481
}

482
static int graph_trace_update_thresh(struct trace_array *tr)
483 484 485 486 487
{
	graph_trace_reset(tr);
	return graph_trace_init(tr);
}

488
static int max_bytes_for_cpu;
489

490
static void print_graph_cpu(struct trace_seq *s, int cpu)
491
{
492 493 494 495 496
	/*
	 * Start with a space character - to make it stand out
	 * to the right a bit when trace output is pasted into
	 * email:
	 */
497
	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
498 499
}

500 501
#define TRACE_GRAPH_PROCINFO_LENGTH	14

502
static void print_graph_proc(struct trace_seq *s, pid_t pid)
503
{
504
	char comm[TASK_COMM_LEN];
505 506
	/* sign + log10(MAX_INT) + '\0' */
	char pid_str[11];
507 508 509
	int spaces = 0;
	int len;
	int i;
510

511
	trace_find_cmdline(pid, comm);
512 513 514 515 516 517 518 519 520 521
	comm[7] = '\0';
	sprintf(pid_str, "%d", pid);

	/* 1 stands for the "-" character */
	len = strlen(comm) + strlen(pid_str) + 1;

	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;

	/* First spaces to align center */
522 523
	for (i = 0; i < spaces / 2; i++)
		trace_seq_putc(s, ' ');
524

525
	trace_seq_printf(s, "%s-%s", comm, pid_str);
526 527

	/* Last spaces to align center */
528 529
	for (i = 0; i < spaces - (spaces / 2); i++)
		trace_seq_putc(s, ' ');
530 531
}

532

533
static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
534
{
535 536
	trace_seq_putc(s, ' ');
	trace_print_lat_fmt(s, entry);
537 538
}

539
/* If the pid changed since the last trace, output this event */
540
static void
541
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
542
{
543
	pid_t prev_pid;
544
	pid_t *last_pid;
545

546
	if (!data)
547
		return;
548

549
	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
550 551

	if (*last_pid == pid)
552
		return;
553

554 555
	prev_pid = *last_pid;
	*last_pid = pid;
556

557
	if (prev_pid == -1)
558
		return;
559 560 561 562 563 564 565 566
/*
 * Context-switch trace line:

 ------------------------------------------
 | 1)  migration/0--1  =>  sshd-1755
 ------------------------------------------

 */
567 568 569 570 571 572
	trace_seq_puts(s, " ------------------------------------------\n");
	print_graph_cpu(s, cpu);
	print_graph_proc(s, prev_pid);
	trace_seq_puts(s, " => ");
	print_graph_proc(s, pid);
	trace_seq_puts(s, "\n ------------------------------------------\n\n");
573 574
}

575 576
static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter,
577 578
		struct ftrace_graph_ent_entry *curr)
{
579 580
	struct fgraph_data *data = iter->private;
	struct ring_buffer_iter *ring_iter = NULL;
581 582 583
	struct ring_buffer_event *event;
	struct ftrace_graph_ret_entry *next;

584 585 586 587 588 589 590 591
	/*
	 * If the previous output failed to write to the seq buffer,
	 * then we just reuse the data from before.
	 */
	if (data && data->failed) {
		curr = &data->ent;
		next = &data->ret;
	} else {
592

593
		ring_iter = trace_buffer_iter(iter, iter->cpu);
594 595 596 597 598 599 600 601 602

		/* First peek to compare current entry and the next one */
		if (ring_iter)
			event = ring_buffer_iter_peek(ring_iter, NULL);
		else {
			/*
			 * We need to consume the current entry to see
			 * the next one.
			 */
603
			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
604
					    NULL, NULL);
605
			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
606
						 NULL, NULL);
607
		}
608

609 610 611 612
		if (!event)
			return NULL;

		next = ring_buffer_event_data(event);
613

614 615 616 617 618 619
		if (data) {
			/*
			 * Save current and next entries for later reference
			 * if the output fails.
			 */
			data->ent = *curr;
620 621 622 623 624 625 626 627 628
			/*
			 * If the next event is not a return type, then
			 * we only care about what type it is. Otherwise we can
			 * safely copy the entire event.
			 */
			if (next->ent.type == TRACE_GRAPH_RET)
				data->ret = *next;
			else
				data->ret.ent.type = next->ent.type;
629 630
		}
	}
631 632

	if (next->ent.type != TRACE_GRAPH_RET)
633
		return NULL;
634 635 636

	if (curr->ent.pid != next->ent.pid ||
			curr->graph_ent.func != next->ret.func)
637
		return NULL;
638

639 640 641 642 643
	/* this is a leaf, now advance the iterator */
	if (ring_iter)
		ring_buffer_read(ring_iter, NULL);

	return next;
644 645
}

646
static void print_graph_abs_time(u64 t, struct trace_seq *s)
647 648 649 650 651 652
{
	unsigned long usecs_rem;

	usecs_rem = do_div(t, NSEC_PER_SEC);
	usecs_rem /= 1000;

653 654
	trace_seq_printf(s, "%5lu.%06lu |  ",
			 (unsigned long)t, usecs_rem);
655 656
}

657
static void
658
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
659
		enum trace_type type, int cpu, pid_t pid, u32 flags)
660
{
661
	struct trace_seq *s = &iter->seq;
662
	struct trace_entry *ent = iter->ent;
663 664 665

	if (addr < (unsigned long)__irqentry_text_start ||
		addr >= (unsigned long)__irqentry_text_end)
666
		return;
667

668 669
	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
		/* Absolute time */
670 671
		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
			print_graph_abs_time(iter->ts, s);
672

673
		/* Cpu */
674 675
		if (flags & TRACE_GRAPH_PRINT_CPU)
			print_graph_cpu(s, cpu);
676

677 678
		/* Proc */
		if (flags & TRACE_GRAPH_PRINT_PROC) {
679 680
			print_graph_proc(s, pid);
			trace_seq_puts(s, " | ");
681
		}
682 683

		/* Latency format */
684 685
		if (trace_flags & TRACE_ITER_LATENCY_FMT)
			print_graph_lat_fmt(s, ent);
686
	}
687

688
	/* No overhead */
689
	print_graph_duration(0, s, flags | FLAGS_FILL_START);
690

691
	if (type == TRACE_GRAPH_ENT)
692
		trace_seq_puts(s, "==========>");
693
	else
694
		trace_seq_puts(s, "<==========");
695

696 697
	print_graph_duration(0, s, flags | FLAGS_FILL_END);
	trace_seq_putc(s, '\n');
698
}
699

700
void
701
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
702 703
{
	unsigned long nsecs_rem = do_div(duration, 1000);
704
	/* log10(ULONG_MAX) + '\0' */
705
	char usecs_str[21];
706
	char nsecs_str[5];
707
	int len;
708 709
	int i;

710
	sprintf(usecs_str, "%lu", (unsigned long) duration);
711 712

	/* Print msecs */
713
	trace_seq_printf(s, "%s", usecs_str);
714

715
	len = strlen(usecs_str);
716 717 718

	/* Print nsecs (we don't want to exceed 7 numbers) */
	if (len < 7) {
719 720 721
		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);

		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
722
		trace_seq_printf(s, ".%s", nsecs_str);
723
		len += strlen(nsecs_str) + 1;
724 725
	}

726
	trace_seq_puts(s, " us ");
727 728

	/* Print remaining spaces to fit the row's width */
729
	for (i = len; i < 8; i++)
730
		trace_seq_putc(s, ' ');
731 732
}

733
static void
734 735
print_graph_duration(unsigned long long duration, struct trace_seq *s,
		     u32 flags)
736
{
737 738
	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
	    !(trace_flags & TRACE_ITER_CONTEXT_INFO))
739
		return;
740 741

	/* No real adata, just filling the column with spaces */
742 743
	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
	case FLAGS_FILL_FULL:
744 745
		trace_seq_puts(s, "              |  ");
		return;
746
	case FLAGS_FILL_START:
747 748
		trace_seq_puts(s, "  ");
		return;
749
	case FLAGS_FILL_END:
750 751
		trace_seq_puts(s, " |");
		return;
752 753 754
	}

	/* Signal a overhead of time execution to the output */
755 756 757
	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
		trace_seq_printf(s, "%c ", trace_find_mark(duration));
	else
758
		trace_seq_puts(s, "  ");
759

760 761
	trace_print_graph_duration(duration, s);
	trace_seq_puts(s, "|  ");
762 763 764
}

/* Case of a leaf function on its call entry */
765
static enum print_line_t
766
print_graph_entry_leaf(struct trace_iterator *iter,
767
		struct ftrace_graph_ent_entry *entry,
768 769
		struct ftrace_graph_ret_entry *ret_entry,
		struct trace_seq *s, u32 flags)
770
{
771
	struct fgraph_data *data = iter->private;
772 773 774
	struct ftrace_graph_ret *graph_ret;
	struct ftrace_graph_ent *call;
	unsigned long long duration;
775
	int i;
776

777 778 779 780
	graph_ret = &ret_entry->ret;
	call = &entry->graph_ent;
	duration = graph_ret->rettime - graph_ret->calltime;

781
	if (data) {
782
		struct fgraph_cpu_data *cpu_data;
783
		int cpu = iter->cpu;
784 785

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
786 787 788 789 790 791

		/*
		 * Comments display at + 1 to depth. Since
		 * this is a leaf function, keep the comments
		 * equal to this depth.
		 */
792 793 794 795 796
		cpu_data->depth = call->depth - 1;

		/* No need to keep this function around for this depth */
		if (call->depth < FTRACE_RETFUNC_DEPTH)
			cpu_data->enter_funcs[call->depth] = 0;
797 798
	}

799
	/* Overhead and duration */
800
	print_graph_duration(duration, s, flags);
801

802
	/* Function */
803 804
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
		trace_seq_putc(s, ' ');
805

806
	trace_seq_printf(s, "%ps();\n", (void *)call->func);
807

808
	return trace_handle_return(s);
809 810 811
}

static enum print_line_t
812 813
print_graph_entry_nested(struct trace_iterator *iter,
			 struct ftrace_graph_ent_entry *entry,
814
			 struct trace_seq *s, int cpu, u32 flags)
815 816
{
	struct ftrace_graph_ent *call = &entry->graph_ent;
817 818 819 820
	struct fgraph_data *data = iter->private;
	int i;

	if (data) {
821
		struct fgraph_cpu_data *cpu_data;
822 823
		int cpu = iter->cpu;

824 825 826 827 828 829
		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
		cpu_data->depth = call->depth;

		/* Save this function pointer to see if the exit matches */
		if (call->depth < FTRACE_RETFUNC_DEPTH)
			cpu_data->enter_funcs[call->depth] = call->func;
830
	}
831

832
	/* No time */
833
	print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
834

835
	/* Function */
836 837 838 839
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
		trace_seq_putc(s, ' ');

	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
840

841
	if (trace_seq_has_overflowed(s))
842 843
		return TRACE_TYPE_PARTIAL_LINE;

844 845 846 847 848
	/*
	 * we already consumed the current entry to check the next one
	 * and see if this is a leaf.
	 */
	return TRACE_TYPE_NO_CONSUME;
849 850
}

851
static void
852
print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
853
		     int type, unsigned long addr, u32 flags)
854
{
855
	struct fgraph_data *data = iter->private;
856
	struct trace_entry *ent = iter->ent;
857
	int cpu = iter->cpu;
858

859
	/* Pid */
860
	verif_pid(s, ent->pid, cpu, data);
861

862
	if (type)
863
		/* Interrupt */
864
		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
865

866
	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
867
		return;
868

869
	/* Absolute time */
870 871
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
		print_graph_abs_time(iter->ts, s);
872

873
	/* Cpu */
874 875
	if (flags & TRACE_GRAPH_PRINT_CPU)
		print_graph_cpu(s, cpu);
876 877

	/* Proc */
878
	if (flags & TRACE_GRAPH_PRINT_PROC) {
879 880
		print_graph_proc(s, ent->pid);
		trace_seq_puts(s, " | ");
881
	}
882

883
	/* Latency format */
884 885
	if (trace_flags & TRACE_ITER_LATENCY_FMT)
		print_graph_lat_fmt(s, ent);
886

887
	return;
888 889
}

890 891 892 893 894
/*
 * Entry check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
Lucas De Marchi's avatar
Lucas De Marchi committed
895
 *  - we just entered irq code
896 897 898 899 900 901 902 903 904 905
 *
 * retunns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_entry(struct trace_iterator *iter, u32 flags,
		unsigned long addr, int depth)
{
	int cpu = iter->cpu;
906
	int *depth_irq;
907 908
	struct fgraph_data *data = iter->private;

909 910 911 912 913 914 915
	/*
	 * If we are either displaying irqs, or we got called as
	 * a graph event and private data does not exist,
	 * then we bypass the irq check.
	 */
	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
	    (!data))
916 917
		return 0;

918 919
	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
	/*
	 * We are inside the irq code
	 */
	if (*depth_irq >= 0)
		return 1;

	if ((addr < (unsigned long)__irqentry_text_start) ||
	    (addr >= (unsigned long)__irqentry_text_end))
		return 0;

	/*
	 * We are entering irq code.
	 */
	*depth_irq = depth;
	return 1;
}

/*
 * Return check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
 *  - we just left irq code
 *
 * returns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
{
	int cpu = iter->cpu;
952
	int *depth_irq;
953 954
	struct fgraph_data *data = iter->private;

955 956 957 958 959 960 961
	/*
	 * If we are either displaying irqs, or we got called as
	 * a graph event and private data does not exist,
	 * then we bypass the irq check.
	 */
	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
	    (!data))
962 963
		return 0;

964 965
	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
	/*
	 * We are not inside the irq code.
	 */
	if (*depth_irq == -1)
		return 0;

	/*
	 * We are inside the irq code, and this is returning entry.
	 * Let's not trace it and clear the entry depth, since
	 * we are out of irq code.
	 *
	 * This condition ensures that we 'leave the irq code' once
	 * we are out of the entry depth. Thus protecting us from
	 * the RETURN entry loss.
	 */
	if (*depth_irq >= depth) {
		*depth_irq = -1;
		return 1;
	}

	/*
	 * We are inside the irq code, and this is not the entry.
	 */
	return 1;
}

992 993
static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
994
			struct trace_iterator *iter, u32 flags)
995
{
996
	struct fgraph_data *data = iter->private;
997 998
	struct ftrace_graph_ent *call = &field->graph_ent;
	struct ftrace_graph_ret_entry *leaf_ret;
999 1000
	static enum print_line_t ret;
	int cpu = iter->cpu;
1001

1002 1003 1004
	if (check_irq_entry(iter, flags, call->func, call->depth))
		return TRACE_TYPE_HANDLED;

1005
	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1006

1007 1008
	leaf_ret = get_return_for_leaf(iter, field);
	if (leaf_ret)
1009
		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1010
	else
1011
		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1012

1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
	if (data) {
		/*
		 * If we failed to write our output, then we need to make
		 * note of it. Because we already consumed our entry.
		 */
		if (s->full) {
			data->failed = 1;
			data->cpu = cpu;
		} else
			data->failed = 0;
	}

	return ret;
1026 1027
}

1028 1029
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1030 1031
		   struct trace_entry *ent, struct trace_iterator *iter,
		   u32 flags)
1032
{
1033
	unsigned long long duration = trace->rettime - trace->calltime;
1034 1035 1036
	struct fgraph_data *data = iter->private;
	pid_t pid = ent->pid;
	int cpu = iter->cpu;
1037
	int func_match = 1;
1038 1039
	int i;

1040 1041 1042
	if (check_irq_return(iter, flags, trace->depth))
		return TRACE_TYPE_HANDLED;

1043
	if (data) {
1044 1045 1046 1047
		struct fgraph_cpu_data *cpu_data;
		int cpu = iter->cpu;

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1048 1049 1050 1051 1052 1053

		/*
		 * Comments display at + 1 to depth. This is the
		 * return from a function, we now want the comments
		 * to display at the same level of the bracket.
		 */
1054 1055 1056 1057 1058 1059 1060
		cpu_data->depth = trace->depth - 1;

		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
			if (cpu_data->enter_funcs[trace->depth] != trace->func)
				func_match = 0;
			cpu_data->enter_funcs[trace->depth] = 0;
		}
1061
	}
1062

1063
	print_graph_prologue(iter, s, 0, 0, flags);
1064

1065
	/* Overhead and duration */
1066
	print_graph_duration(duration, s, flags);
1067

1068
	/* Closing brace */
1069 1070
	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
		trace_seq_putc(s, ' ');
1071

1072 1073 1074 1075
	/*
	 * If the return function does not have a matching entry,
	 * then the entry was lost. Instead of just printing
	 * the '}' and letting the user guess what function this
1076 1077
	 * belongs to, write out the function name. Always do
	 * that if the funcgraph-tail option is enabled.
1078
	 */
1079 1080 1081 1082
	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
		trace_seq_puts(s, "}\n");
	else
		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1083

1084
	/* Overrun */
1085 1086 1087
	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
		trace_seq_printf(s, " (Overruns: %lu)\n",
				 trace->overrun);
1088

1089 1090
	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
			cpu, pid, flags);
1091

1092
	return trace_handle_return(s);
1093 1094
}

1095
static enum print_line_t
1096 1097
print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
		    struct trace_iterator *iter, u32 flags)
1098
{
1099
	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1100
	struct fgraph_data *data = iter->private;
1101
	struct trace_event *event;
1102
	int depth = 0;
1103
	int ret;
1104 1105 1106
	int i;

	if (data)