trace_functions_graph.c 32.6 KB
Newer Older
1 2 3
/*
 *
 * Function graph tracer.
4
 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 6 7 8 9 10 11
 * Mostly borrowed from function tracer which
 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
12
#include <linux/slab.h>
13 14 15
#include <linux/fs.h>

#include "trace.h"
16
#include "trace_output.h"
17

18
struct fgraph_cpu_data {
19 20
	pid_t		last_pid;
	int		depth;
21
	int		depth_irq;
22
	int		ignore;
23
	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
24 25 26
};

struct fgraph_data {
27
	struct fgraph_cpu_data __percpu *cpu_data;
28 29 30 31 32 33

	/* Place to preserve last processed entry. */
	struct ftrace_graph_ent_entry	ent;
	struct ftrace_graph_ret_entry	ret;
	int				failed;
	int				cpu;
34 35
};

36
#define TRACE_GRAPH_INDENT	2
37

38
/* Flag options */
39
#define TRACE_GRAPH_PRINT_OVERRUN	0x1
40 41
#define TRACE_GRAPH_PRINT_CPU		0x2
#define TRACE_GRAPH_PRINT_OVERHEAD	0x4
42
#define TRACE_GRAPH_PRINT_PROC		0x8
43
#define TRACE_GRAPH_PRINT_DURATION	0x10
44
#define TRACE_GRAPH_PRINT_ABS_TIME	0x20
45
#define TRACE_GRAPH_PRINT_IRQS		0x40
46

47
static struct tracer_opt trace_opts[] = {
48
	/* Display overruns? (for self-debug purpose) */
49 50 51 52 53
	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
	/* Display CPU ? */
	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
	/* Display Overhead ? */
	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
54 55
	/* Display proc name/pid */
	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
56 57 58 59
	/* Display duration of execution */
	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
	/* Display absolute time of an entry */
	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
60 61
	/* Display interrupts */
	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
62 63 64 65
	{ } /* Empty entry */
};

static struct tracer_flags tracer_flags = {
66
	/* Don't display overruns and proc by default */
67
	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
68
	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
69 70 71
	.opts = trace_opts
};

72
static struct trace_array *graph_array;
73

74

75 76
/* Add a function return address to the trace stack on thread info.*/
int
77 78
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
			 unsigned long frame_pointer)
79
{
80
	unsigned long long calltime;
81 82 83 84 85
	int index;

	if (!current->ret_stack)
		return -EBUSY;

86 87 88 89 90 91
	/*
	 * We must make sure the ret_stack is tested before we read
	 * anything else.
	 */
	smp_rmb();

92 93 94 95 96 97
	/* The return trace stack is full */
	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
		atomic_inc(&current->trace_overrun);
		return -EBUSY;
	}

98 99
	calltime = trace_clock_local();

100 101 102 103
	index = ++current->curr_ret_stack;
	barrier();
	current->ret_stack[index].ret = ret;
	current->ret_stack[index].func = func;
104
	current->ret_stack[index].calltime = calltime;
105
	current->ret_stack[index].subtime = 0;
106
	current->ret_stack[index].fp = frame_pointer;
107 108 109 110 111 112
	*depth = index;

	return 0;
}

/* Retrieve a function return address to the trace stack on thread info.*/
113
static void
114 115
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
			unsigned long frame_pointer)
116 117 118 119 120 121 122 123 124 125 126 127 128
{
	int index;

	index = current->curr_ret_stack;

	if (unlikely(index < 0)) {
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic, otherwise we have no where to go */
		*ret = (unsigned long)panic;
		return;
	}

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
	/*
	 * The arch may choose to record the frame pointer used
	 * and check it here to make sure that it is what we expect it
	 * to be. If gcc does not set the place holder of the return
	 * address in the frame pointer, and does a copy instead, then
	 * the function graph trace will fail. This test detects this
	 * case.
	 *
	 * Currently, x86_32 with optimize for size (-Os) makes the latest
	 * gcc do the above.
	 */
	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
		ftrace_graph_stop();
		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
144
		     "  from func %ps return to %lx\n",
145 146 147 148 149 150 151 152 153
		     current->ret_stack[index].fp,
		     frame_pointer,
		     (void *)current->ret_stack[index].func,
		     current->ret_stack[index].ret);
		*ret = (unsigned long)panic;
		return;
	}
#endif

154 155 156 157 158 159 160 161 162 163 164
	*ret = current->ret_stack[index].ret;
	trace->func = current->ret_stack[index].func;
	trace->calltime = current->ret_stack[index].calltime;
	trace->overrun = atomic_read(&current->trace_overrun);
	trace->depth = index;
}

/*
 * Send the trace to the ring-buffer.
 * @return the original return address.
 */
165
unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
166 167 168 169
{
	struct ftrace_graph_ret trace;
	unsigned long ret;

170
	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
171
	trace.rettime = trace_clock_local();
172
	ftrace_graph_return(&trace);
173 174
	barrier();
	current->curr_ret_stack--;
175 176 177 178 179 180 181 182 183 184 185

	if (unlikely(!ret)) {
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic. What else to do? */
		ret = (unsigned long)panic;
	}

	return ret;
}

186
int __trace_graph_entry(struct trace_array *tr,
187 188 189 190 191 192
				struct ftrace_graph_ent *trace,
				unsigned long flags,
				int pc)
{
	struct ftrace_event_call *call = &event_funcgraph_entry;
	struct ring_buffer_event *event;
193
	struct ring_buffer *buffer = tr->buffer;
194 195
	struct ftrace_graph_ent_entry *entry;

196
	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
197 198
		return 0;

199
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
200 201 202 203 204
					  sizeof(*entry), flags, pc);
	if (!event)
		return 0;
	entry	= ring_buffer_event_data(event);
	entry->graph_ent			= *trace;
205 206
	if (!filter_current_check_discard(buffer, call, entry, event))
		ring_buffer_unlock_commit(buffer, event);
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223

	return 1;
}

int trace_graph_entry(struct ftrace_graph_ent *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int ret;
	int cpu;
	int pc;

	if (!ftrace_trace_task(current))
		return 0;

224 225
	/* trace it when it is-nested-in or is a function enabled. */
	if (!(trace->depth || ftrace_graph_addr(trace->func)))
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
		return 0;

	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		ret = __trace_graph_entry(tr, trace, flags, pc);
	} else {
		ret = 0;
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);

	return ret;
}

245 246 247 248 249 250 251 252
int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
{
	if (tracing_thresh)
		return 1;
	else
		return trace_graph_entry(trace);
}

253
void __trace_graph_return(struct trace_array *tr,
254 255 256 257 258 259
				struct ftrace_graph_ret *trace,
				unsigned long flags,
				int pc)
{
	struct ftrace_event_call *call = &event_funcgraph_exit;
	struct ring_buffer_event *event;
260
	struct ring_buffer *buffer = tr->buffer;
261 262
	struct ftrace_graph_ret_entry *entry;

263
	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
264 265
		return;

266
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
267 268 269 270 271
					  sizeof(*entry), flags, pc);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->ret				= *trace;
272 273
	if (!filter_current_check_discard(buffer, call, entry, event))
		ring_buffer_unlock_commit(buffer, event);
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
}

void trace_graph_return(struct ftrace_graph_ret *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		__trace_graph_return(tr, trace, flags, pc);
	}
	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

297 298 299 300 301 302 303 304 305
void set_graph_array(struct trace_array *tr)
{
	graph_array = tr;

	/* Make graph_array visible before we start tracing */

	smp_mb();
}

306 307 308 309 310 311 312 313 314
void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
{
	if (tracing_thresh &&
	    (trace->rettime - trace->calltime < tracing_thresh))
		return;
	else
		trace_graph_return(trace);
}

315 316
static int graph_trace_init(struct trace_array *tr)
{
317 318
	int ret;

319
	set_graph_array(tr);
320 321 322 323 324 325
	if (tracing_thresh)
		ret = register_ftrace_graph(&trace_graph_thresh_return,
					    &trace_graph_thresh_entry);
	else
		ret = register_ftrace_graph(&trace_graph_return,
					    &trace_graph_entry);
326 327 328 329 330
	if (ret)
		return ret;
	tracing_start_cmdline_record();

	return 0;
331 332 333 334
}

static void graph_trace_reset(struct trace_array *tr)
{
335 336
	tracing_stop_cmdline_record();
	unregister_ftrace_graph();
337 338
}

339
static int max_bytes_for_cpu;
340 341 342 343 344 345

static enum print_line_t
print_graph_cpu(struct trace_seq *s, int cpu)
{
	int ret;

346 347 348 349 350
	/*
	 * Start with a space character - to make it stand out
	 * to the right a bit when trace output is pasted into
	 * email:
	 */
351
	ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
352
	if (!ret)
353 354
		return TRACE_TYPE_PARTIAL_LINE;

355 356 357
	return TRACE_TYPE_HANDLED;
}

358 359 360 361 362
#define TRACE_GRAPH_PROCINFO_LENGTH	14

static enum print_line_t
print_graph_proc(struct trace_seq *s, pid_t pid)
{
363
	char comm[TASK_COMM_LEN];
364 365
	/* sign + log10(MAX_INT) + '\0' */
	char pid_str[11];
366 367 368 369
	int spaces = 0;
	int ret;
	int len;
	int i;
370

371
	trace_find_cmdline(pid, comm);
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
	comm[7] = '\0';
	sprintf(pid_str, "%d", pid);

	/* 1 stands for the "-" character */
	len = strlen(comm) + strlen(pid_str) + 1;

	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;

	/* First spaces to align center */
	for (i = 0; i < spaces / 2; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Last spaces to align center */
	for (i = 0; i < spaces - (spaces / 2); i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
	return TRACE_TYPE_HANDLED;
}

401

402 403 404
static enum print_line_t
print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
{
405
	if (!trace_seq_putc(s, ' '))
406 407
		return 0;

408
	return trace_print_lat_fmt(s, entry);
409 410
}

411
/* If the pid changed since the last trace, output this event */
412
static enum print_line_t
413
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
414
{
415
	pid_t prev_pid;
416
	pid_t *last_pid;
417
	int ret;
418

419
	if (!data)
420 421
		return TRACE_TYPE_HANDLED;

422
	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
423 424

	if (*last_pid == pid)
425
		return TRACE_TYPE_HANDLED;
426

427 428
	prev_pid = *last_pid;
	*last_pid = pid;
429

430 431
	if (prev_pid == -1)
		return TRACE_TYPE_HANDLED;
432 433 434 435 436 437 438 439 440
/*
 * Context-switch trace line:

 ------------------------------------------
 | 1)  migration/0--1  =>  sshd-1755
 ------------------------------------------

 */
	ret = trace_seq_printf(s,
441
		" ------------------------------------------\n");
442
	if (!ret)
443
		return TRACE_TYPE_PARTIAL_LINE;
444 445 446

	ret = print_graph_cpu(s, cpu);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
447
		return TRACE_TYPE_PARTIAL_LINE;
448 449 450

	ret = print_graph_proc(s, prev_pid);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
451
		return TRACE_TYPE_PARTIAL_LINE;
452 453 454

	ret = trace_seq_printf(s, " => ");
	if (!ret)
455
		return TRACE_TYPE_PARTIAL_LINE;
456 457 458

	ret = print_graph_proc(s, pid);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
459
		return TRACE_TYPE_PARTIAL_LINE;
460 461 462 463

	ret = trace_seq_printf(s,
		"\n ------------------------------------------\n\n");
	if (!ret)
464
		return TRACE_TYPE_PARTIAL_LINE;
465

466
	return TRACE_TYPE_HANDLED;
467 468
}

469 470
static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter,
471 472
		struct ftrace_graph_ent_entry *curr)
{
473 474
	struct fgraph_data *data = iter->private;
	struct ring_buffer_iter *ring_iter = NULL;
475 476 477
	struct ring_buffer_event *event;
	struct ftrace_graph_ret_entry *next;

478 479 480 481 482 483 484 485
	/*
	 * If the previous output failed to write to the seq buffer,
	 * then we just reuse the data from before.
	 */
	if (data && data->failed) {
		curr = &data->ent;
		next = &data->ret;
	} else {
486

487 488 489 490 491 492 493 494 495 496
		ring_iter = iter->buffer_iter[iter->cpu];

		/* First peek to compare current entry and the next one */
		if (ring_iter)
			event = ring_buffer_iter_peek(ring_iter, NULL);
		else {
			/*
			 * We need to consume the current entry to see
			 * the next one.
			 */
497 498
			ring_buffer_consume(iter->tr->buffer, iter->cpu,
					    NULL, NULL);
499
			event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
500
						 NULL, NULL);
501
		}
502

503 504 505 506
		if (!event)
			return NULL;

		next = ring_buffer_event_data(event);
507

508 509 510 511 512 513
		if (data) {
			/*
			 * Save current and next entries for later reference
			 * if the output fails.
			 */
			data->ent = *curr;
514 515 516 517 518 519 520 521 522
			/*
			 * If the next event is not a return type, then
			 * we only care about what type it is. Otherwise we can
			 * safely copy the entire event.
			 */
			if (next->ent.type == TRACE_GRAPH_RET)
				data->ret = *next;
			else
				data->ret.ent.type = next->ent.type;
523 524
		}
	}
525 526

	if (next->ent.type != TRACE_GRAPH_RET)
527
		return NULL;
528 529 530

	if (curr->ent.pid != next->ent.pid ||
			curr->graph_ent.func != next->ret.func)
531
		return NULL;
532

533 534 535 536 537
	/* this is a leaf, now advance the iterator */
	if (ring_iter)
		ring_buffer_read(ring_iter, NULL);

	return next;
538 539
}

540 541
/* Signal a overhead of time execution to the output */
static int
542 543
print_graph_overhead(unsigned long long duration, struct trace_seq *s,
		     u32 flags)
544 545
{
	/* If duration disappear, we don't need anything */
546
	if (!(flags & TRACE_GRAPH_PRINT_DURATION))
547 548 549 550 551 552
		return 1;

	/* Non nested entry or return */
	if (duration == -1)
		return trace_seq_printf(s, "  ");

553
	if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
554 555 556 557 558 559 560 561 562 563 564 565
		/* Duration exceeded 100 msecs */
		if (duration > 100000ULL)
			return trace_seq_printf(s, "! ");

		/* Duration exceeded 10 msecs */
		if (duration > 10000ULL)
			return trace_seq_printf(s, "+ ");
	}

	return trace_seq_printf(s, "  ");
}

566 567 568 569 570 571 572 573 574 575 576
static int print_graph_abs_time(u64 t, struct trace_seq *s)
{
	unsigned long usecs_rem;

	usecs_rem = do_div(t, NSEC_PER_SEC);
	usecs_rem /= 1000;

	return trace_seq_printf(s, "%5lu.%06lu |  ",
			(unsigned long)t, usecs_rem);
}

577
static enum print_line_t
578
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
579
		enum trace_type type, int cpu, pid_t pid, u32 flags)
580 581
{
	int ret;
582
	struct trace_seq *s = &iter->seq;
583 584 585 586 587

	if (addr < (unsigned long)__irqentry_text_start ||
		addr >= (unsigned long)__irqentry_text_end)
		return TRACE_TYPE_UNHANDLED;

588
	/* Absolute time */
589
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
590 591 592 593 594
		ret = print_graph_abs_time(iter->ts, s);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

595
	/* Cpu */
596
	if (flags & TRACE_GRAPH_PRINT_CPU) {
597 598 599 600
		ret = print_graph_cpu(s, cpu);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
601

602
	/* Proc */
603
	if (flags & TRACE_GRAPH_PRINT_PROC) {
604 605 606 607 608 609 610
		ret = print_graph_proc(s, pid);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
		ret = trace_seq_printf(s, " | ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
611

612
	/* No overhead */
613
	ret = print_graph_overhead(-1, s, flags);
614 615
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
616

617 618 619 620 621 622 623 624 625
	if (type == TRACE_GRAPH_ENT)
		ret = trace_seq_printf(s, "==========>");
	else
		ret = trace_seq_printf(s, "<==========");

	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Don't close the duration column if haven't one */
626
	if (flags & TRACE_GRAPH_PRINT_DURATION)
627 628
		trace_seq_printf(s, " |");
	ret = trace_seq_printf(s, "\n");
629 630 631 632 633

	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
	return TRACE_TYPE_HANDLED;
}
634

635 636
enum print_line_t
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
637 638
{
	unsigned long nsecs_rem = do_div(duration, 1000);
639 640 641 642 643 644 645 646 647
	/* log10(ULONG_MAX) + '\0' */
	char msecs_str[21];
	char nsecs_str[5];
	int ret, len;
	int i;

	sprintf(msecs_str, "%lu", (unsigned long) duration);

	/* Print msecs */
648
	ret = trace_seq_printf(s, "%s", msecs_str);
649 650 651 652 653 654 655
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	len = strlen(msecs_str);

	/* Print nsecs (we don't want to exceed 7 numbers) */
	if (len < 7) {
656 657
		snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu",
			 nsecs_rem);
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
		ret = trace_seq_printf(s, ".%s", nsecs_str);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
		len += strlen(nsecs_str);
	}

	ret = trace_seq_printf(s, " us ");
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Print remaining spaces to fit the row's width */
	for (i = len; i < 7; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
674 675 676 677 678 679 680 681 682 683 684
	return TRACE_TYPE_HANDLED;
}

static enum print_line_t
print_graph_duration(unsigned long long duration, struct trace_seq *s)
{
	int ret;

	ret = trace_print_graph_duration(duration, s);
	if (ret != TRACE_TYPE_HANDLED)
		return ret;
685 686 687 688 689

	ret = trace_seq_printf(s, "|  ");
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

690
	return TRACE_TYPE_HANDLED;
691 692 693
}

/* Case of a leaf function on its call entry */
694
static enum print_line_t
695
print_graph_entry_leaf(struct trace_iterator *iter,
696
		struct ftrace_graph_ent_entry *entry,
697 698
		struct ftrace_graph_ret_entry *ret_entry,
		struct trace_seq *s, u32 flags)
699
{
700
	struct fgraph_data *data = iter->private;
701 702 703
	struct ftrace_graph_ret *graph_ret;
	struct ftrace_graph_ent *call;
	unsigned long long duration;
704
	int ret;
705
	int i;
706

707 708 709 710
	graph_ret = &ret_entry->ret;
	call = &entry->graph_ent;
	duration = graph_ret->rettime - graph_ret->calltime;

711
	if (data) {
712
		struct fgraph_cpu_data *cpu_data;
713
		int cpu = iter->cpu;
714 715

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
716 717 718 719 720 721

		/*
		 * Comments display at + 1 to depth. Since
		 * this is a leaf function, keep the comments
		 * equal to this depth.
		 */
722 723 724 725 726
		cpu_data->depth = call->depth - 1;

		/* No need to keep this function around for this depth */
		if (call->depth < FTRACE_RETFUNC_DEPTH)
			cpu_data->enter_funcs[call->depth] = 0;
727 728
	}

729
	/* Overhead */
730
	ret = print_graph_overhead(duration, s, flags);
731 732
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
733 734

	/* Duration */
735
	if (flags & TRACE_GRAPH_PRINT_DURATION) {
736 737 738 739
		ret = print_graph_duration(duration, s);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
740

741 742 743 744 745 746 747
	/* Function */
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

748
	ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
749 750 751 752 753 754 755
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}

static enum print_line_t
756 757
print_graph_entry_nested(struct trace_iterator *iter,
			 struct ftrace_graph_ent_entry *entry,
758
			 struct trace_seq *s, int cpu, u32 flags)
759 760
{
	struct ftrace_graph_ent *call = &entry->graph_ent;
761 762 763 764 765
	struct fgraph_data *data = iter->private;
	int ret;
	int i;

	if (data) {
766
		struct fgraph_cpu_data *cpu_data;
767 768
		int cpu = iter->cpu;

769 770 771 772 773 774
		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
		cpu_data->depth = call->depth;

		/* Save this function pointer to see if the exit matches */
		if (call->depth < FTRACE_RETFUNC_DEPTH)
			cpu_data->enter_funcs[call->depth] = call->func;
775
	}
776 777

	/* No overhead */
778
	ret = print_graph_overhead(-1, s, flags);
779 780
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
781

782
	/* No time */
783
	if (flags & TRACE_GRAPH_PRINT_DURATION) {
784 785 786 787 788
		ret = trace_seq_printf(s, "            |  ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

789
	/* Function */
790 791
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
792 793
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
794 795
	}

796
	ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
797 798 799
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

800 801 802 803 804
	/*
	 * we already consumed the current entry to check the next one
	 * and see if this is a leaf.
	 */
	return TRACE_TYPE_NO_CONSUME;
805 806
}

807
static enum print_line_t
808
print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
809
		     int type, unsigned long addr, u32 flags)
810
{
811
	struct fgraph_data *data = iter->private;
812
	struct trace_entry *ent = iter->ent;
813 814
	int cpu = iter->cpu;
	int ret;
815

816
	/* Pid */
817
	if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
818 819
		return TRACE_TYPE_PARTIAL_LINE;

820 821
	if (type) {
		/* Interrupt */
822
		ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
823 824 825
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
826

827
	/* Absolute time */
828
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
829 830 831 832 833
		ret = print_graph_abs_time(iter->ts, s);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

834
	/* Cpu */
835
	if (flags & TRACE_GRAPH_PRINT_CPU) {
836
		ret = print_graph_cpu(s, cpu);
837 838 839 840 841
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Proc */
842
	if (flags & TRACE_GRAPH_PRINT_PROC) {
843
		ret = print_graph_proc(s, ent->pid);
844 845 846 847
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;

		ret = trace_seq_printf(s, " | ");
848 849 850
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
851

852 853 854 855 856 857 858
	/* Latency format */
	if (trace_flags & TRACE_ITER_LATENCY_FMT) {
		ret = print_graph_lat_fmt(s, ent);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}

859 860 861
	return 0;
}

862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
/*
 * Entry check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
 *  - we just extered irq code
 *
 * retunns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_entry(struct trace_iterator *iter, u32 flags,
		unsigned long addr, int depth)
{
	int cpu = iter->cpu;
	struct fgraph_data *data = iter->private;
	int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

	if (flags & TRACE_GRAPH_PRINT_IRQS)
		return 0;

	/*
	 * We are inside the irq code
	 */
	if (*depth_irq >= 0)
		return 1;

	if ((addr < (unsigned long)__irqentry_text_start) ||
	    (addr >= (unsigned long)__irqentry_text_end))
		return 0;

	/*
	 * We are entering irq code.
	 */
	*depth_irq = depth;
	return 1;
}

/*
 * Return check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
 *  - we just left irq code
 *
 * returns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
{
	int cpu = iter->cpu;
	struct fgraph_data *data = iter->private;
	int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

	if (flags & TRACE_GRAPH_PRINT_IRQS)
		return 0;

	/*
	 * We are not inside the irq code.
	 */
	if (*depth_irq == -1)
		return 0;

	/*
	 * We are inside the irq code, and this is returning entry.
	 * Let's not trace it and clear the entry depth, since
	 * we are out of irq code.
	 *
	 * This condition ensures that we 'leave the irq code' once
	 * we are out of the entry depth. Thus protecting us from
	 * the RETURN entry loss.
	 */
	if (*depth_irq >= depth) {
		*depth_irq = -1;
		return 1;
	}

	/*
	 * We are inside the irq code, and this is not the entry.
	 */
	return 1;
}

948 949
static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
950
			struct trace_iterator *iter, u32 flags)
951
{
952
	struct fgraph_data *data = iter->private;
953 954
	struct ftrace_graph_ent *call = &field->graph_ent;
	struct ftrace_graph_ret_entry *leaf_ret;
955 956
	static enum print_line_t ret;
	int cpu = iter->cpu;
957

958 959 960
	if (check_irq_entry(iter, flags, call->func, call->depth))
		return TRACE_TYPE_HANDLED;

961
	if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
962 963
		return TRACE_TYPE_PARTIAL_LINE;

964 965
	leaf_ret = get_return_for_leaf(iter, field);
	if (leaf_ret)
966
		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
967
	else
968
		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
969

970 971 972 973 974 975 976 977 978 979 980 981 982
	if (data) {
		/*
		 * If we failed to write our output, then we need to make
		 * note of it. Because we already consumed our entry.
		 */
		if (s->full) {
			data->failed = 1;
			data->cpu = cpu;
		} else
			data->failed = 0;
	}

	return ret;
983 984
}

985 986
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
987 988
		   struct trace_entry *ent, struct trace_iterator *iter,
		   u32 flags)
989
{
990
	unsigned long long duration = trace->rettime - trace->calltime;
991 992 993
	struct fgraph_data *data = iter->private;
	pid_t pid = ent->pid;
	int cpu = iter->cpu;
994
	int func_match = 1;
995 996 997
	int ret;
	int i;

998 999 1000
	if (check_irq_return(iter, flags, trace->depth))
		return TRACE_TYPE_HANDLED;

1001
	if (data) {
1002 1003 1004 1005
		struct fgraph_cpu_data *cpu_data;
		int cpu = iter->cpu;

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1006 1007 1008 1009 1010 1011

		/*
		 * Comments display at + 1 to depth. This is the
		 * return from a function, we now want the comments
		 * to display at the same level of the bracket.
		 */
1012 1013 1014 1015 1016 1017 1018
		cpu_data->depth = trace->depth - 1;

		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
			if (cpu_data->enter_funcs[trace->depth] != trace->func)
				func_match = 0;
			cpu_data->enter_funcs[trace->depth] = 0;
		}
1019
	}
1020

1021
	if (print_graph_prologue(iter, s, 0, 0, flags))
1022 1023
		return TRACE_TYPE_PARTIAL_LINE;

1024
	/* Overhead */
1025
	ret = print_graph_overhead(duration, s, flags);
1026 1027
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
1028 1029

	/* Duration */
1030
	if (flags & TRACE_GRAPH_PRINT_DURATION) {
1031 1032 1033 1034
		ret = print_graph_duration(duration, s);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
1035 1036

	/* Closing brace */
1037 1038
	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
1039 1040
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
1041 1042
	}

1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
	/*
	 * If the return function does not have a matching entry,
	 * then the entry was lost. Instead of just printing
	 * the '}' and letting the user guess what function this
	 * belongs to, write out the function name.
	 */
	if (func_match) {
		ret = trace_seq_printf(s, "}\n");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	} else {
1054
		ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1055 1056 1057
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
1058

1059
	/* Overrun */
1060
	if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1061 1062
		ret = trace_seq_printf(s, " (Overruns: %lu)\n",
					trace->overrun);
1063 1064
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
1065
	}
1066

1067 1068
	ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
			      cpu, pid, flags);
1069 1070 1071
	if (ret == TRACE_TYPE_PARTIAL_LINE)
		return TRACE_TYPE_PARTIAL_LINE;

1072 1073 1074
	return TRACE_TYPE_HANDLED;
}

1075
static enum print_line_t
1076 1077
print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
		    struct trace_iterator *iter, u32 flags)
1078
{
1079
	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1080
	struct fgraph_data *data = iter->private;
1081
	struct trace_event *event;
1082
	int depth = 0;
1083
	int ret;
1084 1085 1086
	int i;

	if (data)
1087
		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1088

1089
	if (print_graph_prologue(iter, s, 0, 0, flags))
1090 1091
		return TRACE_TYPE_PARTIAL_LINE;

1092