trace_functions_graph.c 8.23 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 *
 * Function graph tracer.
 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
 * Mostly borrowed from function tracer which
 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/fs.h>

#include "trace.h"

16
#define TRACE_GRAPH_INDENT	2
17

18
/* Flag options */
19
#define TRACE_GRAPH_PRINT_OVERRUN	0x1
20 21 22
#define TRACE_GRAPH_PRINT_CPU		0x2
#define TRACE_GRAPH_PRINT_OVERHEAD	0x4

23
static struct tracer_opt trace_opts[] = {
24 25 26 27 28 29
	/* Display overruns ? */
	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
	/* Display CPU ? */
	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
	/* Display Overhead ? */
	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
30 31 32 33
	{ } /* Empty entry */
};

static struct tracer_flags tracer_flags = {
34 35
	/* Don't display overruns by default */
	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
36 37 38
	.opts = trace_opts
};

39
/* pid on the last trace processed */
40
static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
41 42 43

static int graph_trace_init(struct trace_array *tr)
{
44 45
	int cpu, ret;

46 47 48
	for_each_online_cpu(cpu)
		tracing_reset(tr, cpu);

49
	ret = register_ftrace_graph(&trace_graph_return,
50
					&trace_graph_entry);
51 52 53 54 55
	if (ret)
		return ret;
	tracing_start_cmdline_record();

	return 0;
56 57 58 59
}

static void graph_trace_reset(struct trace_array *tr)
{
60 61
	tracing_stop_cmdline_record();
	unregister_ftrace_graph();
62 63
}

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
static inline int log10_cpu(int nb)
{
	if (nb / 100)
		return 3;
	if (nb / 10)
		return 2;
	return 1;
}

static enum print_line_t
print_graph_cpu(struct trace_seq *s, int cpu)
{
	int i;
	int ret;
	int log10_this = log10_cpu(cpu);
	int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));


	for (i = 0; i < log10_all - log10_this; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
	ret = trace_seq_printf(s, "%d) ", cpu);
	if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	return TRACE_TYPE_HANDLED;
}


94
/* If the pid changed since the last trace, output this event */
95
static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
96
{
97 98
	char *comm;

99
	if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
100
		return 1;
101

102
	last_pid[cpu] = pid;
103 104
	comm = trace_find_cmdline(pid);

105
	return trace_seq_printf(s, "\n------------8<---------- thread %s-%d"
106
				    " ------------8<----------\n\n",
107
				    cpu, comm, pid);
108 109
}

110 111 112 113 114 115 116 117 118 119 120 121 122
static bool
trace_branch_is_leaf(struct trace_iterator *iter,
		struct ftrace_graph_ent_entry *curr)
{
	struct ring_buffer_iter *ring_iter;
	struct ring_buffer_event *event;
	struct ftrace_graph_ret_entry *next;

	ring_iter = iter->buffer_iter[iter->cpu];

	if (!ring_iter)
		return false;

123
	event = ring_buffer_iter_peek(ring_iter, NULL);
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144

	if (!event)
		return false;

	next = ring_buffer_event_data(event);

	if (next->ent.type != TRACE_GRAPH_RET)
		return false;

	if (curr->ent.pid != next->ent.pid ||
			curr->graph_ent.func != next->ret.func)
		return false;

	return true;
}


static inline int
print_graph_duration(unsigned long long duration, struct trace_seq *s)
{
	unsigned long nsecs_rem = do_div(duration, 1000);
145
	return trace_seq_printf(s, "%4llu.%3lu us | ", duration, nsecs_rem);
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
}

/* Signal a overhead of time execution to the output */
static int
print_graph_overhead(unsigned long long duration, struct trace_seq *s)
{
	/* Duration exceeded 100 msecs */
	if (duration > 100000ULL)
		return trace_seq_printf(s, "! ");

	/* Duration exceeded 10 msecs */
	if (duration > 10000ULL)
		return trace_seq_printf(s, "+ ");

	return trace_seq_printf(s, "  ");
}

/* Case of a leaf function on its call entry */
164
static enum print_line_t
165 166
print_graph_entry_leaf(struct trace_iterator *iter,
		struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
167
{
168 169 170 171 172
	struct ftrace_graph_ret_entry *ret_entry;
	struct ftrace_graph_ret *graph_ret;
	struct ring_buffer_event *event;
	struct ftrace_graph_ent *call;
	unsigned long long duration;
173
	int ret;
174
	int i;
175

176 177 178 179 180 181
	event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
	ret_entry = ring_buffer_event_data(event);
	graph_ret = &ret_entry->ret;
	call = &entry->graph_ent;
	duration = graph_ret->rettime - graph_ret->calltime;

182 183 184 185
	/* Must not exceed 8 characters: 9999.999 us */
	if (duration > 10000000ULL)
		duration = 9999999ULL;

186
	/* Overhead */
187 188 189 190 191 192 193 194
	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
		ret = print_graph_overhead(duration, s);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Duration */
	ret = print_graph_duration(duration, s);
195
	if (!ret)
196 197
		return TRACE_TYPE_PARTIAL_LINE;

198 199 200 201 202 203 204 205 206 207 208
	/* Function */
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	ret = seq_print_ip_sym(s, call->func, 0);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

209
	ret = trace_seq_printf(s, "();\n");
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}

static enum print_line_t
print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
			struct trace_seq *s)
{
	int i;
	int ret;
	struct ftrace_graph_ent *call = &entry->graph_ent;

	/* No overhead */
225 226 227 228 229 230 231 232
	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
		ret = trace_seq_printf(s, "  ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* No time */
	ret = trace_seq_printf(s, "        |     ");
233 234

	/* Function */
235 236
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
237 238
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
239 240 241 242 243 244
	}

	ret = seq_print_ip_sym(s, call->func, 0);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

245
	ret = trace_seq_printf(s, "() {\n");
246 247 248
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

249 250 251
	return TRACE_TYPE_HANDLED;
}

252 253 254 255 256 257 258
static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
			struct trace_iterator *iter, int cpu)
{
	int ret;
	struct trace_entry *ent = iter->ent;

259
	/* Pid */
260 261 262
	if (!verif_pid(s, ent->pid, cpu))
		return TRACE_TYPE_PARTIAL_LINE;

263 264 265 266 267 268
	/* Cpu */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
		ret = print_graph_cpu(s, cpu);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
269 270 271 272 273 274 275 276

	if (trace_branch_is_leaf(iter, field))
		return print_graph_entry_leaf(iter, field, s);
	else
		return print_graph_entry_nested(field, s);

}

277 278
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
279
		   struct trace_entry *ent, int cpu)
280 281 282
{
	int i;
	int ret;
283
	unsigned long long duration = trace->rettime - trace->calltime;
284

285 286 287 288
	/* Must not exceed 8 characters: xxxx.yyy us */
	if (duration > 10000000ULL)
		duration = 9999999ULL;

289
	/* Pid */
290 291 292
	if (!verif_pid(s, ent->pid, cpu))
		return TRACE_TYPE_PARTIAL_LINE;

293
	/* Cpu */
294 295 296 297 298
	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
		ret = print_graph_cpu(s, cpu);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
299

300
	/* Overhead */
301 302 303 304 305 306 307 308
	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
		ret = print_graph_overhead(duration, s);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Duration */
	ret = print_graph_duration(duration, s);
309 310 311 312
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Closing brace */
313 314
	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
315 316
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
317 318
	}

319
	ret = trace_seq_printf(s, "}\n");
320 321
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
322

323
	/* Overrun */
324 325 326
	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
		ret = trace_seq_printf(s, " (Overruns: %lu)\n",
					trace->overrun);
327 328
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
329 330 331 332 333 334 335 336 337
	}
	return TRACE_TYPE_HANDLED;
}

enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *entry = iter->ent;
338

339 340 341 342
	switch (entry->type) {
	case TRACE_GRAPH_ENT: {
		struct ftrace_graph_ent_entry *field;
		trace_assign_type(field, entry);
343
		return print_graph_entry(field, s, iter,
344
					 iter->cpu);
345 346 347 348
	}
	case TRACE_GRAPH_RET: {
		struct ftrace_graph_ret_entry *field;
		trace_assign_type(field, entry);
349
		return print_graph_return(&field->ret, s, entry, iter->cpu);
350 351 352
	}
	default:
		return TRACE_TYPE_UNHANDLED;
353 354 355 356
	}
}

static struct tracer graph_trace __read_mostly = {
357
	.name	     = "function_graph",
358 359 360 361 362 363 364 365 366 367 368 369
	.init	     = graph_trace_init,
	.reset	     = graph_trace_reset,
	.print_line = print_graph_function,
	.flags		= &tracer_flags,
};

static __init int init_graph_trace(void)
{
	return register_tracer(&graph_trace);
}

device_initcall(init_graph_trace);