trace_functions_graph.c 35.8 KB
Newer Older
1 2 3
/*
 *
 * Function graph tracer.
4
 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 6 7 8 9 10 11
 * Mostly borrowed from function tracer which
 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
12
#include <linux/slab.h>
13 14 15
#include <linux/fs.h>

#include "trace.h"
16
#include "trace_output.h"
17

18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
static bool kill_ftrace_graph;

/**
 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
 *
 * ftrace_graph_stop() is called when a severe error is detected in
 * the function graph tracing. This function is called by the critical
 * paths of function graph to keep those paths from doing any more harm.
 */
bool ftrace_graph_is_dead(void)
{
	return kill_ftrace_graph;
}

/**
 * ftrace_graph_stop - set to permanently disable function graph tracincg
 *
 * In case of an error int function graph tracing, this is called
 * to try to keep function graph tracing from causing any more harm.
 * Usually this is pretty severe and this is called to try to at least
 * get a warning out to the user.
 */
void ftrace_graph_stop(void)
{
	kill_ftrace_graph = true;
}

45 46 47
/* When set, irq functions will be ignored */
static int ftrace_graph_skip_irqs;

48
struct fgraph_cpu_data {
49 50
	pid_t		last_pid;
	int		depth;
51
	int		depth_irq;
52
	int		ignore;
53
	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
54 55 56
};

struct fgraph_data {
57
	struct fgraph_cpu_data __percpu *cpu_data;
58 59 60 61 62 63

	/* Place to preserve last processed entry. */
	struct ftrace_graph_ent_entry	ent;
	struct ftrace_graph_ret_entry	ret;
	int				failed;
	int				cpu;
64 65
};

66
#define TRACE_GRAPH_INDENT	2
67

68 69
static unsigned int max_depth;

70
static struct tracer_opt trace_opts[] = {
71
	/* Display overruns? (for self-debug purpose) */
72 73 74 75 76
	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
	/* Display CPU ? */
	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
	/* Display Overhead ? */
	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
77 78
	/* Display proc name/pid */
	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
79 80 81 82
	/* Display duration of execution */
	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
	/* Display absolute time of an entry */
	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
83 84
	/* Display interrupts */
	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
85 86
	/* Display function name after trailing } */
	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
87 88 89 90
	{ } /* Empty entry */
};

static struct tracer_flags tracer_flags = {
91
	/* Don't display overruns, proc, or tail by default */
92
	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
93
	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
94 95 96
	.opts = trace_opts
};

97
static struct trace_array *graph_array;
98

99 100 101 102 103 104
/*
 * DURATION column is being also used to display IRQ signs,
 * following values are used by print_graph_irq and others
 * to fill in space into DURATION column.
 */
enum {
105 106 107
	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
108 109
};

110
static void
111 112
print_graph_duration(unsigned long long duration, struct trace_seq *s,
		     u32 flags);
113

114 115
/* Add a function return address to the trace stack on thread info.*/
int
116 117
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
			 unsigned long frame_pointer)
118
{
119
	unsigned long long calltime;
120 121
	int index;

122 123 124
	if (unlikely(ftrace_graph_is_dead()))
		return -EBUSY;

125 126 127
	if (!current->ret_stack)
		return -EBUSY;

128 129 130 131 132 133
	/*
	 * We must make sure the ret_stack is tested before we read
	 * anything else.
	 */
	smp_rmb();

134 135 136 137 138 139
	/* The return trace stack is full */
	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
		atomic_inc(&current->trace_overrun);
		return -EBUSY;
	}

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
	/*
	 * The curr_ret_stack is an index to ftrace return stack of
	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
	 * DEPTH) when the function graph tracer is used.  To support
	 * filtering out specific functions, it makes the index
	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
	 * so when it sees a negative index the ftrace will ignore
	 * the record.  And the index gets recovered when returning
	 * from the filtered function by adding the FTRACE_NOTRACE_
	 * DEPTH and then it'll continue to record functions normally.
	 *
	 * The curr_ret_stack is initialized to -1 and get increased
	 * in this function.  So it can be less than -1 only if it was
	 * filtered out via ftrace_graph_notrace_addr() which can be
	 * set from set_graph_notrace file in debugfs by user.
	 */
	if (current->curr_ret_stack < -1)
		return -EBUSY;

159 160
	calltime = trace_clock_local();

161
	index = ++current->curr_ret_stack;
162 163
	if (ftrace_graph_notrace_addr(func))
		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
164 165 166
	barrier();
	current->ret_stack[index].ret = ret;
	current->ret_stack[index].func = func;
167
	current->ret_stack[index].calltime = calltime;
168
	current->ret_stack[index].subtime = 0;
169
	current->ret_stack[index].fp = frame_pointer;
170
	*depth = current->curr_ret_stack;
171 172 173 174 175

	return 0;
}

/* Retrieve a function return address to the trace stack on thread info.*/
176
static void
177 178
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
			unsigned long frame_pointer)
179 180 181 182 183
{
	int index;

	index = current->curr_ret_stack;

184 185 186 187 188 189 190 191 192 193 194
	/*
	 * A negative index here means that it's just returned from a
	 * notrace'd function.  Recover index to get an original
	 * return address.  See ftrace_push_return_trace().
	 *
	 * TODO: Need to check whether the stack gets corrupted.
	 */
	if (index < 0)
		index += FTRACE_NOTRACE_DEPTH;

	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
195 196 197 198 199 200 201
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic, otherwise we have no where to go */
		*ret = (unsigned long)panic;
		return;
	}

202
#if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
203 204 205 206 207 208 209 210 211 212
	/*
	 * The arch may choose to record the frame pointer used
	 * and check it here to make sure that it is what we expect it
	 * to be. If gcc does not set the place holder of the return
	 * address in the frame pointer, and does a copy instead, then
	 * the function graph trace will fail. This test detects this
	 * case.
	 *
	 * Currently, x86_32 with optimize for size (-Os) makes the latest
	 * gcc do the above.
213 214 215
	 *
	 * Note, -mfentry does not use frame pointers, and this test
	 *  is not needed if CC_USING_FENTRY is set.
216 217 218 219
	 */
	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
		ftrace_graph_stop();
		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
220
		     "  from func %ps return to %lx\n",
221 222 223 224 225 226 227 228 229
		     current->ret_stack[index].fp,
		     frame_pointer,
		     (void *)current->ret_stack[index].func,
		     current->ret_stack[index].ret);
		*ret = (unsigned long)panic;
		return;
	}
#endif

230 231 232 233 234 235 236 237 238 239 240
	*ret = current->ret_stack[index].ret;
	trace->func = current->ret_stack[index].func;
	trace->calltime = current->ret_stack[index].calltime;
	trace->overrun = atomic_read(&current->trace_overrun);
	trace->depth = index;
}

/*
 * Send the trace to the ring-buffer.
 * @return the original return address.
 */
241
unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
242 243 244 245
{
	struct ftrace_graph_ret trace;
	unsigned long ret;

246
	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
247
	trace.rettime = trace_clock_local();
248 249
	barrier();
	current->curr_ret_stack--;
250 251 252 253 254 255 256 257 258
	/*
	 * The curr_ret_stack can be less than -1 only if it was
	 * filtered out and it's about to return from the function.
	 * Recover the index and continue to trace normal functions.
	 */
	if (current->curr_ret_stack < -1) {
		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
		return ret;
	}
259

260 261 262 263 264 265 266
	/*
	 * The trace should run after decrementing the ret counter
	 * in case an interrupt were to come in. We don't want to
	 * lose the interrupt if max_depth is set.
	 */
	ftrace_graph_return(&trace);

267 268 269 270 271 272 273 274 275 276
	if (unlikely(!ret)) {
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic. What else to do? */
		ret = (unsigned long)panic;
	}

	return ret;
}

277
int __trace_graph_entry(struct trace_array *tr,
278 279 280 281 282 283
				struct ftrace_graph_ent *trace,
				unsigned long flags,
				int pc)
{
	struct ftrace_event_call *call = &event_funcgraph_entry;
	struct ring_buffer_event *event;
284
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
285 286
	struct ftrace_graph_ent_entry *entry;

287
	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
288 289
		return 0;

290
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
291 292 293 294 295
					  sizeof(*entry), flags, pc);
	if (!event)
		return 0;
	entry	= ring_buffer_event_data(event);
	entry->graph_ent			= *trace;
296
	if (!call_filter_check_discard(call, entry, buffer, event))
297
		__buffer_unlock_commit(buffer, event);
298 299 300 301

	return 1;
}

302 303
static inline int ftrace_graph_ignore_irqs(void)
{
304
	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
305 306 307 308 309
		return 0;

	return in_irq();
}

310 311 312 313 314 315 316 317 318 319 320 321 322
int trace_graph_entry(struct ftrace_graph_ent *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int ret;
	int cpu;
	int pc;

	if (!ftrace_trace_task(current))
		return 0;

323
	/* trace it when it is-nested-in or is a function enabled. */
324
	if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
325
	     ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
326
	    (max_depth && trace->depth >= max_depth))
327 328
		return 0;

329 330 331 332 333 334 335 336 337 338
	/*
	 * Do not trace a function if it's filtered by set_graph_notrace.
	 * Make the index of ret stack negative to indicate that it should
	 * ignore further functions.  But it needs its own ret stack entry
	 * to recover the original index in order to continue tracing after
	 * returning from the function.
	 */
	if (ftrace_graph_notrace_addr(trace->func))
		return 1;

339 340
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
341
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
342 343 344 345 346 347 348 349 350 351 352 353 354 355
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		ret = __trace_graph_entry(tr, trace, flags, pc);
	} else {
		ret = 0;
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);

	return ret;
}

356
static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
357 358 359 360 361 362 363
{
	if (tracing_thresh)
		return 1;
	else
		return trace_graph_entry(trace);
}

364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
static void
__trace_graph_function(struct trace_array *tr,
		unsigned long ip, unsigned long flags, int pc)
{
	u64 time = trace_clock_local();
	struct ftrace_graph_ent ent = {
		.func  = ip,
		.depth = 0,
	};
	struct ftrace_graph_ret ret = {
		.func     = ip,
		.depth    = 0,
		.calltime = time,
		.rettime  = time,
	};

	__trace_graph_entry(tr, &ent, flags, pc);
	__trace_graph_return(tr, &ret, flags, pc);
}

void
trace_graph_function(struct trace_array *tr,
		unsigned long ip, unsigned long parent_ip,
		unsigned long flags, int pc)
{
	__trace_graph_function(tr, ip, flags, pc);
}

392
void __trace_graph_return(struct trace_array *tr,
393 394 395 396 397 398
				struct ftrace_graph_ret *trace,
				unsigned long flags,
				int pc)
{
	struct ftrace_event_call *call = &event_funcgraph_exit;
	struct ring_buffer_event *event;
399
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
400 401
	struct ftrace_graph_ret_entry *entry;

402
	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
403 404
		return;

405
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
406 407 408 409 410
					  sizeof(*entry), flags, pc);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->ret				= *trace;
411
	if (!call_filter_check_discard(call, entry, buffer, event))
412
		__buffer_unlock_commit(buffer, event);
413 414 415 416 417 418 419 420 421 422 423 424 425
}

void trace_graph_return(struct ftrace_graph_ret *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	local_irq_save(flags);
	cpu = raw_smp_processor_id();
426
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
427 428 429 430 431 432 433 434 435
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		__trace_graph_return(tr, trace, flags, pc);
	}
	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

436 437 438 439 440 441 442 443 444
void set_graph_array(struct trace_array *tr)
{
	graph_array = tr;

	/* Make graph_array visible before we start tracing */

	smp_mb();
}

445
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
446 447 448 449 450 451 452 453
{
	if (tracing_thresh &&
	    (trace->rettime - trace->calltime < tracing_thresh))
		return;
	else
		trace_graph_return(trace);
}

454 455
static int graph_trace_init(struct trace_array *tr)
{
456 457
	int ret;

458
	set_graph_array(tr);
459 460 461 462 463 464
	if (tracing_thresh)
		ret = register_ftrace_graph(&trace_graph_thresh_return,
					    &trace_graph_thresh_entry);
	else
		ret = register_ftrace_graph(&trace_graph_return,
					    &trace_graph_entry);
465 466 467 468 469
	if (ret)
		return ret;
	tracing_start_cmdline_record();

	return 0;
470 471 472 473
}

static void graph_trace_reset(struct trace_array *tr)
{
474 475
	tracing_stop_cmdline_record();
	unregister_ftrace_graph();
476 477
}

478
static int graph_trace_update_thresh(struct trace_array *tr)
479 480 481 482 483
{
	graph_trace_reset(tr);
	return graph_trace_init(tr);
}

484
static int max_bytes_for_cpu;
485

486
static void print_graph_cpu(struct trace_seq *s, int cpu)
487
{
488 489 490 491 492
	/*
	 * Start with a space character - to make it stand out
	 * to the right a bit when trace output is pasted into
	 * email:
	 */
493
	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
494 495
}

496 497
#define TRACE_GRAPH_PROCINFO_LENGTH	14

498
static void print_graph_proc(struct trace_seq *s, pid_t pid)
499
{
500
	char comm[TASK_COMM_LEN];
501 502
	/* sign + log10(MAX_INT) + '\0' */
	char pid_str[11];
503 504 505
	int spaces = 0;
	int len;
	int i;
506

507
	trace_find_cmdline(pid, comm);
508 509 510 511 512 513 514 515 516 517
	comm[7] = '\0';
	sprintf(pid_str, "%d", pid);

	/* 1 stands for the "-" character */
	len = strlen(comm) + strlen(pid_str) + 1;

	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;

	/* First spaces to align center */
518 519
	for (i = 0; i < spaces / 2; i++)
		trace_seq_putc(s, ' ');
520

521
	trace_seq_printf(s, "%s-%s", comm, pid_str);
522 523

	/* Last spaces to align center */
524 525
	for (i = 0; i < spaces - (spaces / 2); i++)
		trace_seq_putc(s, ' ');
526 527
}

528

529
static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
530
{
531 532
	trace_seq_putc(s, ' ');
	trace_print_lat_fmt(s, entry);
533 534
}

535
/* If the pid changed since the last trace, output this event */
536
static void
537
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
538
{
539
	pid_t prev_pid;
540
	pid_t *last_pid;
541

542
	if (!data)
543
		return;
544

545
	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
546 547

	if (*last_pid == pid)
548
		return;
549

550 551
	prev_pid = *last_pid;
	*last_pid = pid;
552

553
	if (prev_pid == -1)
554
		return;
555 556 557 558 559 560 561 562
/*
 * Context-switch trace line:

 ------------------------------------------
 | 1)  migration/0--1  =>  sshd-1755
 ------------------------------------------

 */
563 564 565 566 567 568
	trace_seq_puts(s, " ------------------------------------------\n");
	print_graph_cpu(s, cpu);
	print_graph_proc(s, prev_pid);
	trace_seq_puts(s, " => ");
	print_graph_proc(s, pid);
	trace_seq_puts(s, "\n ------------------------------------------\n\n");
569 570
}

571 572
static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter,
573 574
		struct ftrace_graph_ent_entry *curr)
{
575 576
	struct fgraph_data *data = iter->private;
	struct ring_buffer_iter *ring_iter = NULL;
577 578 579
	struct ring_buffer_event *event;
	struct ftrace_graph_ret_entry *next;

580 581 582 583 584 585 586 587
	/*
	 * If the previous output failed to write to the seq buffer,
	 * then we just reuse the data from before.
	 */
	if (data && data->failed) {
		curr = &data->ent;
		next = &data->ret;
	} else {
588

589
		ring_iter = trace_buffer_iter(iter, iter->cpu);
590 591 592 593 594 595 596 597 598

		/* First peek to compare current entry and the next one */
		if (ring_iter)
			event = ring_buffer_iter_peek(ring_iter, NULL);
		else {
			/*
			 * We need to consume the current entry to see
			 * the next one.
			 */
599
			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
600
					    NULL, NULL);
601
			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
602
						 NULL, NULL);
603
		}
604

605 606 607 608
		if (!event)
			return NULL;

		next = ring_buffer_event_data(event);
609

610 611 612 613 614 615
		if (data) {
			/*
			 * Save current and next entries for later reference
			 * if the output fails.
			 */
			data->ent = *curr;
616 617 618 619 620 621 622 623 624
			/*
			 * If the next event is not a return type, then
			 * we only care about what type it is. Otherwise we can
			 * safely copy the entire event.
			 */
			if (next->ent.type == TRACE_GRAPH_RET)
				data->ret = *next;
			else
				data->ret.ent.type = next->ent.type;
625 626
		}
	}
627 628

	if (next->ent.type != TRACE_GRAPH_RET)
629
		return NULL;
630 631 632

	if (curr->ent.pid != next->ent.pid ||
			curr->graph_ent.func != next->ret.func)
633
		return NULL;
634

635 636 637 638 639
	/* this is a leaf, now advance the iterator */
	if (ring_iter)
		ring_buffer_read(ring_iter, NULL);

	return next;
640 641
}

642
static void print_graph_abs_time(u64 t, struct trace_seq *s)
643 644 645 646 647 648
{
	unsigned long usecs_rem;

	usecs_rem = do_div(t, NSEC_PER_SEC);
	usecs_rem /= 1000;

649 650
	trace_seq_printf(s, "%5lu.%06lu |  ",
			 (unsigned long)t, usecs_rem);
651 652
}

653
static void
654
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
655
		enum trace_type type, int cpu, pid_t pid, u32 flags)
656
{
657
	struct trace_seq *s = &iter->seq;
658
	struct trace_entry *ent = iter->ent;
659 660 661

	if (addr < (unsigned long)__irqentry_text_start ||
		addr >= (unsigned long)__irqentry_text_end)
662
		return;
663

664 665
	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
		/* Absolute time */
666 667
		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
			print_graph_abs_time(iter->ts, s);
668

669
		/* Cpu */
670 671
		if (flags & TRACE_GRAPH_PRINT_CPU)
			print_graph_cpu(s, cpu);
672

673 674
		/* Proc */
		if (flags & TRACE_GRAPH_PRINT_PROC) {
675 676
			print_graph_proc(s, pid);
			trace_seq_puts(s, " | ");
677
		}
678 679

		/* Latency format */
680 681
		if (trace_flags & TRACE_ITER_LATENCY_FMT)
			print_graph_lat_fmt(s, ent);
682
	}
683

684
	/* No overhead */
685
	print_graph_duration(0, s, flags | FLAGS_FILL_START);
686

687
	if (type == TRACE_GRAPH_ENT)
688
		trace_seq_puts(s, "==========>");
689
	else
690
		trace_seq_puts(s, "<==========");
691

692 693
	print_graph_duration(0, s, flags | FLAGS_FILL_END);
	trace_seq_putc(s, '\n');
694
}
695

696
void
697
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
698 699
{
	unsigned long nsecs_rem = do_div(duration, 1000);
700
	/* log10(ULONG_MAX) + '\0' */
701
	char usecs_str[21];
702
	char nsecs_str[5];
703
	int len;
704 705
	int i;

706
	sprintf(usecs_str, "%lu", (unsigned long) duration);
707 708

	/* Print msecs */
709
	trace_seq_printf(s, "%s", usecs_str);
710

711
	len = strlen(usecs_str);
712 713 714

	/* Print nsecs (we don't want to exceed 7 numbers) */
	if (len < 7) {
715 716 717
		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);

		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
718
		trace_seq_printf(s, ".%s", nsecs_str);
719 720 721
		len += strlen(nsecs_str);
	}

722
	trace_seq_puts(s, " us ");
723 724

	/* Print remaining spaces to fit the row's width */
725 726
	for (i = len; i < 7; i++)
		trace_seq_putc(s, ' ');
727 728
}

729
static void
730 731
print_graph_duration(unsigned long long duration, struct trace_seq *s,
		     u32 flags)
732
{
733
	bool duration_printed = false;
734

735 736
	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
	    !(trace_flags & TRACE_ITER_CONTEXT_INFO))
737
		return;
738 739

	/* No real adata, just filling the column with spaces */
740 741
	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
	case FLAGS_FILL_FULL:
742 743
		trace_seq_puts(s, "              |  ");
		return;
744
	case FLAGS_FILL_START:
745 746
		trace_seq_puts(s, "  ");
		return;
747
	case FLAGS_FILL_END:
748 749
		trace_seq_puts(s, " |");
		return;
750 751 752 753
	}

	/* Signal a overhead of time execution to the output */
	if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
754
		/* Duration exceeded 100 usecs */
755 756 757 758
		if (duration > 100000ULL) {
			trace_seq_puts(s, "! ");
			duration_printed = true;

759
		/* Duration exceeded 10 usecs */
760 761 762 763
		} else if (duration > 10000ULL) {
			trace_seq_puts(s, "+ ");
			duration_printed = true;
		}
764 765 766
	}

	/*
767 768
	 * If we did not exceed the duration tresholds or we dont want
	 * to print out the overhead. Either way we need to fill out the space.
769
	 */
770 771
	if (!duration_printed)
		trace_seq_puts(s, "  ");
772

773 774
	trace_print_graph_duration(duration, s);
	trace_seq_puts(s, "|  ");
775 776 777
}

/* Case of a leaf function on its call entry */
778
static enum print_line_t
779
print_graph_entry_leaf(struct trace_iterator *iter,
780
		struct ftrace_graph_ent_entry *entry,
781 782
		struct ftrace_graph_ret_entry *ret_entry,
		struct trace_seq *s, u32 flags)
783
{
784
	struct fgraph_data *data = iter->private;
785 786 787
	struct ftrace_graph_ret *graph_ret;
	struct ftrace_graph_ent *call;
	unsigned long long duration;
788
	int i;
789

790 791 792 793
	graph_ret = &ret_entry->ret;
	call = &entry->graph_ent;
	duration = graph_ret->rettime - graph_ret->calltime;

794
	if (data) {
795
		struct fgraph_cpu_data *cpu_data;
796
		int cpu = iter->cpu;
797 798

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
799 800 801 802 803 804

		/*
		 * Comments display at + 1 to depth. Since
		 * this is a leaf function, keep the comments
		 * equal to this depth.
		 */
805 806 807 808 809
		cpu_data->depth = call->depth - 1;

		/* No need to keep this function around for this depth */
		if (call->depth < FTRACE_RETFUNC_DEPTH)
			cpu_data->enter_funcs[call->depth] = 0;
810 811
	}

812
	/* Overhead and duration */
813
	print_graph_duration(duration, s, flags);
814

815
	/* Function */
816 817
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
		trace_seq_putc(s, ' ');
818

819
	trace_seq_printf(s, "%ps();\n", (void *)call->func);
820

821
	return trace_handle_return(s);
822 823 824
}

static enum print_line_t
825 826
print_graph_entry_nested(struct trace_iterator *iter,
			 struct ftrace_graph_ent_entry *entry,
827
			 struct trace_seq *s, int cpu, u32 flags)
828 829
{
	struct ftrace_graph_ent *call = &entry->graph_ent;
830 831 832 833
	struct fgraph_data *data = iter->private;
	int i;

	if (data) {
834
		struct fgraph_cpu_data *cpu_data;
835 836
		int cpu = iter->cpu;

837 838 839 840 841 842
		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
		cpu_data->depth = call->depth;

		/* Save this function pointer to see if the exit matches */
		if (call->depth < FTRACE_RETFUNC_DEPTH)
			cpu_data->enter_funcs[call->depth] = call->func;
843
	}
844

845
	/* No time */
846
	print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
847

848
	/* Function */
849 850 851 852
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
		trace_seq_putc(s, ' ');

	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
853

854
	if (trace_seq_has_overflowed(s))
855 856
		return TRACE_TYPE_PARTIAL_LINE;

857 858 859 860 861
	/*
	 * we already consumed the current entry to check the next one
	 * and see if this is a leaf.
	 */
	return TRACE_TYPE_NO_CONSUME;
862 863
}

864
static void
865
print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
866
		     int type, unsigned long addr, u32 flags)
867
{
868
	struct fgraph_data *data = iter->private;
869
	struct trace_entry *ent = iter->ent;
870
	int cpu = iter->cpu;
871

872
	/* Pid */
873
	verif_pid(s, ent->pid, cpu, data);
874

875
	if (type)
876
		/* Interrupt */
877
		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
878

879
	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
880
		return;
881

882
	/* Absolute time */
883 884
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
		print_graph_abs_time(iter->ts, s);
885

886
	/* Cpu */
887 888
	if (flags & TRACE_GRAPH_PRINT_CPU)
		print_graph_cpu(s, cpu);
889 890

	/* Proc */
891
	if (flags & TRACE_GRAPH_PRINT_PROC) {
892 893
		print_graph_proc(s, ent->pid);
		trace_seq_puts(s, " | ");
894
	}
895

896
	/* Latency format */
897 898
	if (trace_flags & TRACE_ITER_LATENCY_FMT)
		print_graph_lat_fmt(s, ent);
899

900
	return;
901 902
}

903 904 905 906 907
/*
 * Entry check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
Lucas De Marchi's avatar
Lucas De Marchi committed
908
 *  - we just entered irq code
909 910 911 912 913 914 915 916 917 918
 *
 * retunns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_entry(struct trace_iterator *iter, u32 flags,
		unsigned long addr, int depth)
{
	int cpu = iter->cpu;
919
	int *depth_irq;
920 921
	struct fgraph_data *data = iter->private;

922 923 924 925 926 927 928
	/*
	 * If we are either displaying irqs, or we got called as
	 * a graph event and private data does not exist,
	 * then we bypass the irq check.
	 */
	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
	    (!data))
929 930
		return 0;

931 932
	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964
	/*
	 * We are inside the irq code
	 */
	if (*depth_irq >= 0)
		return 1;

	if ((addr < (unsigned long)__irqentry_text_start) ||
	    (addr >= (unsigned long)__irqentry_text_end))
		return 0;

	/*
	 * We are entering irq code.
	 */
	*depth_irq = depth;
	return 1;
}

/*
 * Return check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
 *  - we just left irq code
 *
 * returns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
{
	int cpu = iter->cpu;
965
	int *depth_irq;
966 967
	struct fgraph_data *data = iter->private;

968 969 970 971 972 973 974
	/*
	 * If we are either displaying irqs, or we got called as
	 * a graph event and private data does not exist,
	 * then we bypass the irq check.
	 */
	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
	    (!data))
975 976
		return 0;

977 978
	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
	/*
	 * We are not inside the irq code.
	 */
	if (*depth_irq == -1)
		return 0;

	/*
	 * We are inside the irq code, and this is returning entry.
	 * Let's not trace it and clear the entry depth, since
	 * we are out of irq code.
	 *
	 * This condition ensures that we 'leave the irq code' once
	 * we are out of the entry depth. Thus protecting us from
	 * the RETURN entry loss.
	 */
	if (*depth_irq >= depth) {
		*depth_irq = -1;
		return 1;
	}

	/*
	 * We are inside the irq code, and this is not the entry.
	 */
	return 1;
}

1005 1006
static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1007
			struct trace_iterator *iter, u32 flags)
1008
{
1009
	struct fgraph_data *data = iter->private;
1010 1011
	struct ftrace_graph_ent *call = &field->graph_ent;
	struct ftrace_graph_ret_entry *leaf_ret;
1012 1013
	static enum print_line_t ret;
	int cpu = iter->cpu;
1014

1015 1016 1017
	if (check_irq_entry(iter, flags, call->func, call->depth))
		return TRACE_TYPE_HANDLED;

1018
	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1019

1020 1021
	leaf_ret = get_return_for_leaf(iter, field);
	if (leaf_ret)
1022
		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1023
	else
1024
		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1025

1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
	if (data) {
		/*
		 * If we failed to write our output, then we need to make
		 * note of it. Because we already consumed our entry.
		 */
		if (s->full) {
			data->failed = 1;
			data->cpu = cpu;
		} else
			data->failed = 0;
	}

	return ret;
1039 1040
}

1041 1042
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1043 1044
		   struct trace_entry *ent, struct trace_iterator *iter,
		   u32 flags)
1045
{
1046
	unsigned long long duration = trace->rettime - trace->calltime;
1047 1048 1049
	struct fgraph_data *data = iter->private;
	pid_t pid = ent->pid;
	int cpu = iter->cpu;
1050
	int func_match = 1;
1051 1052
	int i;

1053 1054 1055
	if (check_irq_return(iter, flags, trace->depth))
		return TRACE_TYPE_HANDLED;

1056
	if (data) {
1057 1058 1059 1060
		struct fgraph_cpu_data *cpu_data;
		int cpu = iter->cpu;

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1061 1062 1063 1064 1065 1066

		/*
		 * Comments display at + 1 to depth. This is the
		 * return from a function, we now want the comments
		 * to display at the same level of the bracket.
		 */
1067 1068 1069 1070 1071 1072 1073
		cpu_data->depth = trace->depth - 1;

		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
			if (cpu_data->enter_funcs[trace->depth] != trace->func)
				func_match = 0;
			cpu_data->enter_funcs[trace->depth] = 0;
		}
1074
	}
1075

1076
	print_graph_prologue(iter, s, 0, 0, flags);
1077

1078
	/* Overhead and duration */
1079
	print_graph_duration(duration, s, flags);
1080

1081
	/* Closing brace */
1082 1083
	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
		trace_seq_putc(s, ' ');
1084

1085 1086 1087 1088
	/*
	 * If the return function does not have a matching entry,
	 * then the entry was lost. Instead of just printing
	 * the '}' and letting the user guess what function this
1089 1090
	 * belongs to, write out the function name. Always do
	 * that if the funcgraph-tail option is enabled.
1091
	 */
1092 1093 1094 1095
	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
		trace_seq_puts(s, "}\n");
	else
		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1096

1097
	/* Overrun */
1098 1099 1100
	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
		trace_seq_printf(s, " (Overruns: %lu)\n",
				 trace->overrun);
1101

1102 1103
	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
			cpu, pid, flags);
1104

1105
	return trace_handle_return(s);
1106 1107
}

1108
static enum print_line_t
1109 1110
print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
		    struct trace_iterator *iter, u32 flags)
1111
{
1112
	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1113
	struct fgraph_data *data = iter->private;
1114
	struct trace_event *event;
1115
	int depth = 0;
1116
	int ret;
1117 1118 1119
	int i;

	if (data)