trace_functions_graph.c 39 KB
Newer Older
1 2 3
/*
 *
 * Function graph tracer.
4
 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 6 7 8 9 10 11
 * Mostly borrowed from function tracer which
 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
12
#include <linux/slab.h>
13 14 15
#include <linux/fs.h>

#include "trace.h"
16
#include "trace_output.h"
17

18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
static bool kill_ftrace_graph;

/**
 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
 *
 * ftrace_graph_stop() is called when a severe error is detected in
 * the function graph tracing. This function is called by the critical
 * paths of function graph to keep those paths from doing any more harm.
 */
bool ftrace_graph_is_dead(void)
{
	return kill_ftrace_graph;
}

/**
 * ftrace_graph_stop - set to permanently disable function graph tracincg
 *
 * In case of an error int function graph tracing, this is called
 * to try to keep function graph tracing from causing any more harm.
 * Usually this is pretty severe and this is called to try to at least
 * get a warning out to the user.
 */
void ftrace_graph_stop(void)
{
	kill_ftrace_graph = true;
}

45 46 47
/* When set, irq functions will be ignored */
static int ftrace_graph_skip_irqs;

48
struct fgraph_cpu_data {
49 50
	pid_t		last_pid;
	int		depth;
51
	int		depth_irq;
52
	int		ignore;
53
	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
54 55 56
};

struct fgraph_data {
57
	struct fgraph_cpu_data __percpu *cpu_data;
58 59 60 61 62 63

	/* Place to preserve last processed entry. */
	struct ftrace_graph_ent_entry	ent;
	struct ftrace_graph_ret_entry	ret;
	int				failed;
	int				cpu;
64 65
};

66
#define TRACE_GRAPH_INDENT	2
67

68 69
static unsigned int max_depth;

70
static struct tracer_opt trace_opts[] = {
71
	/* Display overruns? (for self-debug purpose) */
72 73 74 75 76
	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
	/* Display CPU ? */
	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
	/* Display Overhead ? */
	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
77 78
	/* Display proc name/pid */
	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
79 80 81 82
	/* Display duration of execution */
	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
	/* Display absolute time of an entry */
	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
83 84
	/* Display interrupts */
	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
85 86
	/* Display function name after trailing } */
	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
87 88 89 90
	{ } /* Empty entry */
};

static struct tracer_flags tracer_flags = {
91
	/* Don't display overruns, proc, or tail by default */
92
	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
93
	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
94 95 96
	.opts = trace_opts
};

97
static struct trace_array *graph_array;
98

99 100 101 102 103 104
/*
 * DURATION column is being also used to display IRQ signs,
 * following values are used by print_graph_irq and others
 * to fill in space into DURATION column.
 */
enum {
105 106 107
	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
108 109 110 111 112
};

static enum print_line_t
print_graph_duration(unsigned long long duration, struct trace_seq *s,
		     u32 flags);
113

114 115
/* Add a function return address to the trace stack on thread info.*/
int
116 117
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
			 unsigned long frame_pointer)
118
{
119
	unsigned long long calltime;
120 121
	int index;

122 123 124
	if (unlikely(ftrace_graph_is_dead()))
		return -EBUSY;

125 126 127
	if (!current->ret_stack)
		return -EBUSY;

128 129 130 131 132 133
	/*
	 * We must make sure the ret_stack is tested before we read
	 * anything else.
	 */
	smp_rmb();

134 135 136 137 138 139
	/* The return trace stack is full */
	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
		atomic_inc(&current->trace_overrun);
		return -EBUSY;
	}

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
	/*
	 * The curr_ret_stack is an index to ftrace return stack of
	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
	 * DEPTH) when the function graph tracer is used.  To support
	 * filtering out specific functions, it makes the index
	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
	 * so when it sees a negative index the ftrace will ignore
	 * the record.  And the index gets recovered when returning
	 * from the filtered function by adding the FTRACE_NOTRACE_
	 * DEPTH and then it'll continue to record functions normally.
	 *
	 * The curr_ret_stack is initialized to -1 and get increased
	 * in this function.  So it can be less than -1 only if it was
	 * filtered out via ftrace_graph_notrace_addr() which can be
	 * set from set_graph_notrace file in debugfs by user.
	 */
	if (current->curr_ret_stack < -1)
		return -EBUSY;

159 160
	calltime = trace_clock_local();

161
	index = ++current->curr_ret_stack;
162 163
	if (ftrace_graph_notrace_addr(func))
		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
164 165 166
	barrier();
	current->ret_stack[index].ret = ret;
	current->ret_stack[index].func = func;
167
	current->ret_stack[index].calltime = calltime;
168
	current->ret_stack[index].subtime = 0;
169
	current->ret_stack[index].fp = frame_pointer;
170
	*depth = current->curr_ret_stack;
171 172 173 174 175

	return 0;
}

/* Retrieve a function return address to the trace stack on thread info.*/
176
static void
177 178
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
			unsigned long frame_pointer)
179 180 181 182 183
{
	int index;

	index = current->curr_ret_stack;

184 185 186 187 188 189 190 191 192 193 194
	/*
	 * A negative index here means that it's just returned from a
	 * notrace'd function.  Recover index to get an original
	 * return address.  See ftrace_push_return_trace().
	 *
	 * TODO: Need to check whether the stack gets corrupted.
	 */
	if (index < 0)
		index += FTRACE_NOTRACE_DEPTH;

	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
195 196 197 198 199 200 201
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic, otherwise we have no where to go */
		*ret = (unsigned long)panic;
		return;
	}

202
#if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
203 204 205 206 207 208 209 210 211 212
	/*
	 * The arch may choose to record the frame pointer used
	 * and check it here to make sure that it is what we expect it
	 * to be. If gcc does not set the place holder of the return
	 * address in the frame pointer, and does a copy instead, then
	 * the function graph trace will fail. This test detects this
	 * case.
	 *
	 * Currently, x86_32 with optimize for size (-Os) makes the latest
	 * gcc do the above.
213 214 215
	 *
	 * Note, -mfentry does not use frame pointers, and this test
	 *  is not needed if CC_USING_FENTRY is set.
216 217 218 219
	 */
	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
		ftrace_graph_stop();
		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
220
		     "  from func %ps return to %lx\n",
221 222 223 224 225 226 227 228 229
		     current->ret_stack[index].fp,
		     frame_pointer,
		     (void *)current->ret_stack[index].func,
		     current->ret_stack[index].ret);
		*ret = (unsigned long)panic;
		return;
	}
#endif

230 231 232 233 234 235 236 237 238 239 240
	*ret = current->ret_stack[index].ret;
	trace->func = current->ret_stack[index].func;
	trace->calltime = current->ret_stack[index].calltime;
	trace->overrun = atomic_read(&current->trace_overrun);
	trace->depth = index;
}

/*
 * Send the trace to the ring-buffer.
 * @return the original return address.
 */
241
unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
242 243 244 245
{
	struct ftrace_graph_ret trace;
	unsigned long ret;

246
	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
247
	trace.rettime = trace_clock_local();
248 249
	barrier();
	current->curr_ret_stack--;
250 251 252 253 254 255 256 257 258
	/*
	 * The curr_ret_stack can be less than -1 only if it was
	 * filtered out and it's about to return from the function.
	 * Recover the index and continue to trace normal functions.
	 */
	if (current->curr_ret_stack < -1) {
		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
		return ret;
	}
259

260 261 262 263 264 265 266
	/*
	 * The trace should run after decrementing the ret counter
	 * in case an interrupt were to come in. We don't want to
	 * lose the interrupt if max_depth is set.
	 */
	ftrace_graph_return(&trace);

267 268 269 270 271 272 273 274 275 276
	if (unlikely(!ret)) {
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic. What else to do? */
		ret = (unsigned long)panic;
	}

	return ret;
}

277
int __trace_graph_entry(struct trace_array *tr,
278 279 280 281 282 283
				struct ftrace_graph_ent *trace,
				unsigned long flags,
				int pc)
{
	struct ftrace_event_call *call = &event_funcgraph_entry;
	struct ring_buffer_event *event;
284
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
285 286
	struct ftrace_graph_ent_entry *entry;

287
	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
288 289
		return 0;

290
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
291 292 293 294 295
					  sizeof(*entry), flags, pc);
	if (!event)
		return 0;
	entry	= ring_buffer_event_data(event);
	entry->graph_ent			= *trace;
296
	if (!call_filter_check_discard(call, entry, buffer, event))
297
		__buffer_unlock_commit(buffer, event);
298 299 300 301

	return 1;
}

302 303
static inline int ftrace_graph_ignore_irqs(void)
{
304
	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
305 306 307 308 309
		return 0;

	return in_irq();
}

310 311 312 313 314 315 316 317 318 319 320 321 322
int trace_graph_entry(struct ftrace_graph_ent *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int ret;
	int cpu;
	int pc;

	if (!ftrace_trace_task(current))
		return 0;

323
	/* trace it when it is-nested-in or is a function enabled. */
324
	if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
325
	     ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
326
	    (max_depth && trace->depth >= max_depth))
327 328
		return 0;

329 330 331 332 333 334 335 336 337 338
	/*
	 * Do not trace a function if it's filtered by set_graph_notrace.
	 * Make the index of ret stack negative to indicate that it should
	 * ignore further functions.  But it needs its own ret stack entry
	 * to recover the original index in order to continue tracing after
	 * returning from the function.
	 */
	if (ftrace_graph_notrace_addr(trace->func))
		return 1;

339 340
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
341
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
342 343 344 345 346 347 348 349 350 351 352 353 354 355
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		ret = __trace_graph_entry(tr, trace, flags, pc);
	} else {
		ret = 0;
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);

	return ret;
}

356 357 358 359 360 361 362 363
int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
{
	if (tracing_thresh)
		return 1;
	else
		return trace_graph_entry(trace);
}

364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
static void
__trace_graph_function(struct trace_array *tr,
		unsigned long ip, unsigned long flags, int pc)
{
	u64 time = trace_clock_local();
	struct ftrace_graph_ent ent = {
		.func  = ip,
		.depth = 0,
	};
	struct ftrace_graph_ret ret = {
		.func     = ip,
		.depth    = 0,
		.calltime = time,
		.rettime  = time,
	};

	__trace_graph_entry(tr, &ent, flags, pc);
	__trace_graph_return(tr, &ret, flags, pc);
}

void
trace_graph_function(struct trace_array *tr,
		unsigned long ip, unsigned long parent_ip,
		unsigned long flags, int pc)
{
	__trace_graph_function(tr, ip, flags, pc);
}

392
void __trace_graph_return(struct trace_array *tr,
393 394 395 396 397 398
				struct ftrace_graph_ret *trace,
				unsigned long flags,
				int pc)
{
	struct ftrace_event_call *call = &event_funcgraph_exit;
	struct ring_buffer_event *event;
399
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
400 401
	struct ftrace_graph_ret_entry *entry;

402
	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
403 404
		return;

405
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
406 407 408 409 410
					  sizeof(*entry), flags, pc);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->ret				= *trace;
411
	if (!call_filter_check_discard(call, entry, buffer, event))
412
		__buffer_unlock_commit(buffer, event);
413 414 415 416 417 418 419 420 421 422 423 424 425
}

void trace_graph_return(struct ftrace_graph_ret *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	local_irq_save(flags);
	cpu = raw_smp_processor_id();
426
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
427 428 429 430 431 432 433 434 435
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		__trace_graph_return(tr, trace, flags, pc);
	}
	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

436 437 438 439 440 441 442 443 444
void set_graph_array(struct trace_array *tr)
{
	graph_array = tr;

	/* Make graph_array visible before we start tracing */

	smp_mb();
}

445 446 447 448 449 450 451 452 453
void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
{
	if (tracing_thresh &&
	    (trace->rettime - trace->calltime < tracing_thresh))
		return;
	else
		trace_graph_return(trace);
}

454 455
static int graph_trace_init(struct trace_array *tr)
{
456 457
	int ret;

458
	set_graph_array(tr);
459 460 461 462 463 464
	if (tracing_thresh)
		ret = register_ftrace_graph(&trace_graph_thresh_return,
					    &trace_graph_thresh_entry);
	else
		ret = register_ftrace_graph(&trace_graph_return,
					    &trace_graph_entry);
465 466 467 468 469
	if (ret)
		return ret;
	tracing_start_cmdline_record();

	return 0;
470 471 472 473
}

static void graph_trace_reset(struct trace_array *tr)
{
474 475
	tracing_stop_cmdline_record();
	unregister_ftrace_graph();
476 477
}

478
static int max_bytes_for_cpu;
479 480 481 482 483 484

static enum print_line_t
print_graph_cpu(struct trace_seq *s, int cpu)
{
	int ret;

485 486 487 488 489
	/*
	 * Start with a space character - to make it stand out
	 * to the right a bit when trace output is pasted into
	 * email:
	 */
490
	ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
491
	if (!ret)
492 493
		return TRACE_TYPE_PARTIAL_LINE;

494 495 496
	return TRACE_TYPE_HANDLED;
}

497 498 499 500 501
#define TRACE_GRAPH_PROCINFO_LENGTH	14

static enum print_line_t
print_graph_proc(struct trace_seq *s, pid_t pid)
{
502
	char comm[TASK_COMM_LEN];
503 504
	/* sign + log10(MAX_INT) + '\0' */
	char pid_str[11];
505 506 507 508
	int spaces = 0;
	int ret;
	int len;
	int i;
509

510
	trace_find_cmdline(pid, comm);
511 512 513 514 515 516 517 518 519 520 521
	comm[7] = '\0';
	sprintf(pid_str, "%d", pid);

	/* 1 stands for the "-" character */
	len = strlen(comm) + strlen(pid_str) + 1;

	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;

	/* First spaces to align center */
	for (i = 0; i < spaces / 2; i++) {
522
		ret = trace_seq_putc(s, ' ');
523 524 525 526 527 528 529 530 531 532
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Last spaces to align center */
	for (i = 0; i < spaces - (spaces / 2); i++) {
533
		ret = trace_seq_putc(s, ' ');
534 535 536 537 538 539
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
	return TRACE_TYPE_HANDLED;
}

540

541 542 543
static enum print_line_t
print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
{
544
	if (!trace_seq_putc(s, ' '))
545 546
		return 0;

547
	return trace_print_lat_fmt(s, entry);
548 549
}

550
/* If the pid changed since the last trace, output this event */
551
static enum print_line_t
552
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
553
{
554
	pid_t prev_pid;
555
	pid_t *last_pid;
556
	int ret;
557

558
	if (!data)
559 560
		return TRACE_TYPE_HANDLED;

561
	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
562 563

	if (*last_pid == pid)
564
		return TRACE_TYPE_HANDLED;
565

566 567
	prev_pid = *last_pid;
	*last_pid = pid;
568

569 570
	if (prev_pid == -1)
		return TRACE_TYPE_HANDLED;
571 572 573 574 575 576 577 578
/*
 * Context-switch trace line:

 ------------------------------------------
 | 1)  migration/0--1  =>  sshd-1755
 ------------------------------------------

 */
579
	ret = trace_seq_puts(s,
580
		" ------------------------------------------\n");
581
	if (!ret)
582
		return TRACE_TYPE_PARTIAL_LINE;
583 584 585

	ret = print_graph_cpu(s, cpu);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
586
		return TRACE_TYPE_PARTIAL_LINE;
587 588 589

	ret = print_graph_proc(s, prev_pid);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
590
		return TRACE_TYPE_PARTIAL_LINE;
591

592
	ret = trace_seq_puts(s, " => ");
593
	if (!ret)
594
		return TRACE_TYPE_PARTIAL_LINE;
595 596 597

	ret = print_graph_proc(s, pid);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
598
		return TRACE_TYPE_PARTIAL_LINE;
599

600
	ret = trace_seq_puts(s,
601 602
		"\n ------------------------------------------\n\n");
	if (!ret)
603
		return TRACE_TYPE_PARTIAL_LINE;
604

605
	return TRACE_TYPE_HANDLED;
606 607
}

608 609
static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter,
610 611
		struct ftrace_graph_ent_entry *curr)
{
612 613
	struct fgraph_data *data = iter->private;
	struct ring_buffer_iter *ring_iter = NULL;
614 615 616
	struct ring_buffer_event *event;
	struct ftrace_graph_ret_entry *next;

617 618 619 620 621 622 623 624
	/*
	 * If the previous output failed to write to the seq buffer,
	 * then we just reuse the data from before.
	 */
	if (data && data->failed) {
		curr = &data->ent;
		next = &data->ret;
	} else {
625

626
		ring_iter = trace_buffer_iter(iter, iter->cpu);
627 628 629 630 631 632 633 634 635

		/* First peek to compare current entry and the next one */
		if (ring_iter)
			event = ring_buffer_iter_peek(ring_iter, NULL);
		else {
			/*
			 * We need to consume the current entry to see
			 * the next one.
			 */
636
			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
637
					    NULL, NULL);
638
			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
639
						 NULL, NULL);
640
		}
641

642 643 644 645
		if (!event)
			return NULL;

		next = ring_buffer_event_data(event);
646

647 648 649 650 651 652
		if (data) {
			/*
			 * Save current and next entries for later reference
			 * if the output fails.
			 */
			data->ent = *curr;
653 654 655 656 657 658 659 660 661
			/*
			 * If the next event is not a return type, then
			 * we only care about what type it is. Otherwise we can
			 * safely copy the entire event.
			 */
			if (next->ent.type == TRACE_GRAPH_RET)
				data->ret = *next;
			else
				data->ret.ent.type = next->ent.type;
662 663
		}
	}
664 665

	if (next->ent.type != TRACE_GRAPH_RET)
666
		return NULL;
667 668 669

	if (curr->ent.pid != next->ent.pid ||
			curr->graph_ent.func != next->ret.func)
670
		return NULL;
671

672 673 674 675 676
	/* this is a leaf, now advance the iterator */
	if (ring_iter)
		ring_buffer_read(ring_iter, NULL);

	return next;
677 678
}

679 680 681 682 683 684 685 686 687 688 689
static int print_graph_abs_time(u64 t, struct trace_seq *s)
{
	unsigned long usecs_rem;

	usecs_rem = do_div(t, NSEC_PER_SEC);
	usecs_rem /= 1000;

	return trace_seq_printf(s, "%5lu.%06lu |  ",
			(unsigned long)t, usecs_rem);
}

690
static enum print_line_t
691
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
692
		enum trace_type type, int cpu, pid_t pid, u32 flags)
693 694
{
	int ret;
695
	struct trace_seq *s = &iter->seq;
696 697 698 699 700

	if (addr < (unsigned long)__irqentry_text_start ||
		addr >= (unsigned long)__irqentry_text_end)
		return TRACE_TYPE_UNHANDLED;

701 702 703 704 705 706 707
	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
		/* Absolute time */
		if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
			ret = print_graph_abs_time(iter->ts, s);
			if (!ret)
				return TRACE_TYPE_PARTIAL_LINE;
		}
708

709 710 711 712 713 714
		/* Cpu */
		if (flags & TRACE_GRAPH_PRINT_CPU) {
			ret = print_graph_cpu(s, cpu);
			if (ret == TRACE_TYPE_PARTIAL_LINE)
				return TRACE_TYPE_PARTIAL_LINE;
		}
715

716 717 718 719 720
		/* Proc */
		if (flags & TRACE_GRAPH_PRINT_PROC) {
			ret = print_graph_proc(s, pid);
			if (ret == TRACE_TYPE_PARTIAL_LINE)
				return TRACE_TYPE_PARTIAL_LINE;
721
			ret = trace_seq_puts(s, " | ");
722 723 724
			if (!ret)
				return TRACE_TYPE_PARTIAL_LINE;
		}
725
	}
726

727
	/* No overhead */
728
	ret = print_graph_duration(0, s, flags | FLAGS_FILL_START);
729 730
	if (ret != TRACE_TYPE_HANDLED)
		return ret;
731

732
	if (type == TRACE_GRAPH_ENT)
733
		ret = trace_seq_puts(s, "==========>");
734
	else
735
		ret = trace_seq_puts(s, "<==========");
736 737 738 739

	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

740
	ret = print_graph_duration(0, s, flags | FLAGS_FILL_END);
741 742 743
	if (ret != TRACE_TYPE_HANDLED)
		return ret;

744
	ret = trace_seq_putc(s, '\n');
745 746 747 748 749

	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
	return TRACE_TYPE_HANDLED;
}
750

751 752
enum print_line_t
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
753 754
{
	unsigned long nsecs_rem = do_div(duration, 1000);
755 756 757 758 759 760 761 762 763
	/* log10(ULONG_MAX) + '\0' */
	char msecs_str[21];
	char nsecs_str[5];
	int ret, len;
	int i;

	sprintf(msecs_str, "%lu", (unsigned long) duration);

	/* Print msecs */
764
	ret = trace_seq_printf(s, "%s", msecs_str);
765 766 767 768 769 770 771
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	len = strlen(msecs_str);

	/* Print nsecs (we don't want to exceed 7 numbers) */
	if (len < 7) {
772 773 774
		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);

		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
775 776 777 778 779 780
		ret = trace_seq_printf(s, ".%s", nsecs_str);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
		len += strlen(nsecs_str);
	}

781
	ret = trace_seq_puts(s, " us ");
782 783 784 785 786
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Print remaining spaces to fit the row's width */
	for (i = len; i < 7; i++) {
787
		ret = trace_seq_putc(s, ' ');
788 789 790
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
791 792 793 794
	return TRACE_TYPE_HANDLED;
}

static enum print_line_t
795 796
print_graph_duration(unsigned long long duration, struct trace_seq *s,
		     u32 flags)
797
{
798 799
	int ret = -1;

800 801 802
	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
	    !(trace_flags & TRACE_ITER_CONTEXT_INFO))
			return TRACE_TYPE_HANDLED;
803 804

	/* No real adata, just filling the column with spaces */
805 806
	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
	case FLAGS_FILL_FULL:
807
		ret = trace_seq_puts(s, "              |  ");
808
		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
809
	case FLAGS_FILL_START:
810
		ret = trace_seq_puts(s, "  ");
811
		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
812
	case FLAGS_FILL_END:
813
		ret = trace_seq_puts(s, " |");
814 815 816 817 818 819 820
		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
	}

	/* Signal a overhead of time execution to the output */
	if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
		/* Duration exceeded 100 msecs */
		if (duration > 100000ULL)
821
			ret = trace_seq_puts(s, "! ");
822 823
		/* Duration exceeded 10 msecs */
		else if (duration > 10000ULL)
824
			ret = trace_seq_puts(s, "+ ");
825 826 827 828 829 830 831 832
	}

	/*
	 * The -1 means we either did not exceed the duration tresholds
	 * or we dont want to print out the overhead. Either way we need
	 * to fill out the space.
	 */
	if (ret == -1)
833
		ret = trace_seq_puts(s, "  ");
834 835 836 837

	/* Catching here any failure happenned above */
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
838 839 840 841

	ret = trace_print_graph_duration(duration, s);
	if (ret != TRACE_TYPE_HANDLED)
		return ret;
842

843
	ret = trace_seq_puts(s, "|  ");
844 845 846
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

847
	return TRACE_TYPE_HANDLED;
848 849 850
}

/* Case of a leaf function on its call entry */
851
static enum print_line_t
852
print_graph_entry_leaf(struct trace_iterator *iter,
853
		struct ftrace_graph_ent_entry *entry,
854 855
		struct ftrace_graph_ret_entry *ret_entry,
		struct trace_seq *s, u32 flags)
856
{
857
	struct fgraph_data *data = iter->private;
858 859 860
	struct ftrace_graph_ret *graph_ret;
	struct ftrace_graph_ent *call;
	unsigned long long duration;
861
	int ret;
862
	int i;
863

864 865 866 867
	graph_ret = &ret_entry->ret;
	call = &entry->graph_ent;
	duration = graph_ret->rettime - graph_ret->calltime;

868
	if (data) {
869
		struct fgraph_cpu_data *cpu_data;
870
		int cpu = iter->cpu;
871 872

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
873 874 875 876 877 878

		/*
		 * Comments display at + 1 to depth. Since
		 * this is a leaf function, keep the comments
		 * equal to this depth.
		 */
879 880 881 882 883
		cpu_data->depth = call->depth - 1;

		/* No need to keep this function around for this depth */
		if (call->depth < FTRACE_RETFUNC_DEPTH)
			cpu_data->enter_funcs[call->depth] = 0;
884 885
	}

886 887 888
	/* Overhead and duration */
	ret = print_graph_duration(duration, s, flags);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
889
		return TRACE_TYPE_PARTIAL_LINE;
890

891 892
	/* Function */
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
893
		ret = trace_seq_putc(s, ' ');
894 895 896 897
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

898
	ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
899 900 901 902 903 904 905
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}

static enum print_line_t
906 907
print_graph_entry_nested(struct trace_iterator *iter,
			 struct ftrace_graph_ent_entry *entry,
908
			 struct trace_seq *s, int cpu, u32 flags)
909 910
{
	struct ftrace_graph_ent *call = &entry->graph_ent;
911 912 913 914 915
	struct fgraph_data *data = iter->private;
	int ret;
	int i;

	if (data) {
916
		struct fgraph_cpu_data *cpu_data;
917 918
		int cpu = iter->cpu;

919 920 921 922 923 924
		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
		cpu_data->depth = call->depth;

		/* Save this function pointer to see if the exit matches */
		if (call->depth < FTRACE_RETFUNC_DEPTH)
			cpu_data->enter_funcs[call->depth] = call->func;
925
	}
926

927
	/* No time */
928
	ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
929 930
	if (ret != TRACE_TYPE_HANDLED)
		return ret;
931

932
	/* Function */
933
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
934
		ret = trace_seq_putc(s, ' ');
935 936
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
937 938
	}

939
	ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
940 941 942
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

943 944 945 946 947
	/*
	 * we already consumed the current entry to check the next one
	 * and see if this is a leaf.
	 */
	return TRACE_TYPE_NO_CONSUME;
948 949
}

950
static enum print_line_t
951
print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
952
		     int type, unsigned long addr, u32 flags)
953
{
954
	struct fgraph_data *data = iter->private;
955
	struct trace_entry *ent = iter->ent;
956 957
	int cpu = iter->cpu;
	int ret;
958

959
	/* Pid */
960
	if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
961 962
		return TRACE_TYPE_PARTIAL_LINE;

963 964
	if (type) {
		/* Interrupt */
965
		ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
966 967 968
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
969

970 971 972
	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
		return 0;

973
	/* Absolute time */
974
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
975 976 977 978 979
		ret = print_graph_abs_time(iter->ts, s);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

980
	/* Cpu */
981
	if (flags & TRACE_GRAPH_PRINT_CPU) {
982
		ret = print_graph_cpu(s, cpu);
983 984 985 986 987
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Proc */
988
	if (flags & TRACE_GRAPH_PRINT_PROC) {
989
		ret = print_graph_proc(s, ent->pid);
990 991 992
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;

993
		ret = trace_seq_puts(s, " | ");
994 995 996
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
997

998 999 1000 1001 1002 1003 1004
	/* Latency format */
	if (trace_flags & TRACE_ITER_LATENCY_FMT) {
		ret = print_graph_lat_fmt(s, ent);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}

1005 1006 1007
	return 0;
}

1008 1009 1010 1011 1012
/*
 * Entry check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
Lucas De Marchi's avatar
Lucas De Marchi committed
1013
 *  - we just entered irq code
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
 *
 * retunns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_entry(struct trace_iterator *iter, u32 flags,
		unsigned long addr, int depth)
{
	int cpu = iter->cpu;
1024
	int *depth_irq;
1025 1026
	struct fgraph_data *data = iter->private;

1027 1028 1029 1030 1031 1032 1033
	/*
	 * If we are either displaying irqs, or we got called as
	 * a graph event and private data does not exist,
	 * then we bypass the irq check.
	 */
	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
	    (!data))
1034 1035
		return 0;

1036 1037
	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
	/*
	 * We are inside the irq code
	 */
	if (*depth_irq >= 0)
		return 1;

	if ((addr < (unsigned long)__irqentry_text_start) ||
	    (addr >= (unsigned long)__irqentry_text_end))
		return 0;

	/*
	 * We are entering irq code.
	 */
	*depth_irq = depth;
	return 1;
}

/*
 * Return check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
 *  - we just left irq code
 *
 * returns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
{
	int cpu = iter->cpu;
1070
	int *depth_irq;
1071 1072
	struct fgraph_data *data = iter->private;

1073 1074 1075 1076 1077 1078 1079
	/*
	 * If we are either displaying irqs, or we got called as
	 * a graph event and private data does not exist,
	 * then we bypass the irq check.
	 */
	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
	    (!data))
1080 1081
		return 0;

1082 1083
	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
	/*
	 * We are not inside the irq code.
	 */
	if (*depth_irq == -1)
		return 0;

	/*
	 * We are inside the irq code, and this is returning entry.
	 * Let's not trace it and clear the entry depth, since
	 * we are out of irq code.
	 *
	 * This condition ensures that we 'leave the irq code' once
	 * we are out of the entry depth. Thus protecting us from
	 * the RETURN entry loss.
	 */
	if (*depth_irq >= depth) {
		*depth_irq = -1;
		return 1;
	}

	/*
	 * We are inside the irq code, and this is not the entry.
	 */
	return 1;
}

1110 1111
static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1112
			struct trace_iterator *iter, u32 flags)
1113
{
1114
	struct fgraph_data *data = iter->private;
1115 1116
	struct ftrace_graph_ent *call = &field->graph_ent;
	struct ftrace_graph_ret_entry *leaf_ret;
1117 1118
	static enum print_line_t ret;
	int cpu = iter->cpu;
1119

1120 1121 1122
	if (check_irq_entry(iter, flags, call->func, call->depth))
		return TRACE_TYPE_HANDLED;

1123
	if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
1124 1125
		return TRACE_TYPE_PARTIAL_LINE;

1126 1127
	leaf_ret = get_return_for_leaf(iter, field);
	if (leaf_ret)
1128
		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1129
	else
1130
		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1131

1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
	if (data) {
		/*
		 * If we failed to write our output, then we need to make
		 * note of it. Because we already consumed our entry.
		 */
		if (s->full) {
			data->failed = 1;
			data->cpu = cpu;
		} else
			data->failed = 0;
	}

	return ret;
1145 1146
}

1147 1148
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1149 1150
		   struct trace_entry *ent, struct trace_iterator *iter,
		   u32 flags)
1151
{
1152
	unsigned long long duration = trace->rettime - trace->calltime;
1153 1154 1155
	struct fgraph_data *data = iter->private;
	pid_t pid = ent->pid;
	int cpu = iter->cpu;
1156
	int func_match = 1;
1157 1158 1159
	int ret;
	int i;

1160 1161 1162
	if (check_irq_return(iter, flags, trace->depth))
		return TRACE_TYPE_HANDLED;

1163
	if (data) {
1164 1165 1166 1167
		struct fgraph_cpu_data *cpu_data;
		int cpu = iter->cpu;

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1168 1169 1170 1171 1172 1173

		/*
		 * Comments display at + 1 to depth. This is the
		 * return from a function, we now want the comments
		 * to display at the same level of the bracket.
		 */
1174 1175 1176 1177 1178 1179 1180
		cpu_data->depth = trace->depth - 1;

		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
			if (cpu_data->enter_funcs[trace->depth] != trace->func)
				func_match = 0;
			cpu_data->enter_funcs[trace->depth] = 0;
		}
1181
	}
1182

1183
	if (print_graph_prologue(iter, s, 0, 0, flags))
1184 1185
		return TRACE_TYPE_PARTIAL_LINE;

1186 1187 1188
	/* Overhead and duration */
	ret = print_graph_duration(duration, s, flags);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
1189
		return TRACE_TYPE_PARTIAL_LINE;
1190

1191
	/* Closing brace */
1192
	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1193
		ret = trace_seq_putc(s, ' ');
1194 1195
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
1196 1197
	}

1198 1199 1200 1201
	/*
	 * If the return function does not have a matching entry,
	 * then the entry was lost. Instead of just printing
	 * the '}' and letting the user guess what function this
1202 1203
	 * belongs to, write out the function name. Always do
	 * that if the funcgraph-tail option is enabled.
1204
	 */
1205
	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) {
1206
		ret = trace_seq_puts(s, "}\n");
1207 1208 1209
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	} else {
1210
		ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1211