trace_event_perf.c 9.04 KB
Newer Older
Peter Zijlstra's avatar
Peter Zijlstra committed
1
/*
2
 * trace event based perf event profiling/tracing
Peter Zijlstra's avatar
Peter Zijlstra committed
3
 *
4
 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
5
 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
Peter Zijlstra's avatar
Peter Zijlstra committed
6 7
 */

8
#include <linux/module.h>
9
#include <linux/kprobes.h>
Peter Zijlstra's avatar
Peter Zijlstra committed
10 11
#include "trace.h"

12
static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
13

14 15 16 17 18 19
/*
 * Force it to be aligned to unsigned long to avoid misaligned accesses
 * suprises
 */
typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
	perf_trace_t;
20

21
/* Count the events in use (per event id, not per instance) */
22
static int	total_ref_count;
23

24
static int perf_trace_event_perm(struct trace_event_call *tp_event,
25 26
				 struct perf_event *p_event)
{
27 28 29 30 31 32
	if (tp_event->perf_perm) {
		int ret = tp_event->perf_perm(tp_event, p_event);
		if (ret)
			return ret;
	}

33 34 35 36 37 38 39 40 41 42 43 44
	/*
	 * We checked and allowed to create parent,
	 * allow children without checking.
	 */
	if (p_event->parent)
		return 0;

	/*
	 * It's ok to check current process (owner) permissions in here,
	 * because code below is called only via perf_event_open syscall.
	 */

45
	/* The ftrace function trace is allowed only for root. */
46 47 48 49
	if (ftrace_event_is_function(tp_event)) {
		if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
			return -EPERM;

50 51 52
		if (!is_sampling_event(p_event))
			return 0;

53 54 55 56 57 58 59
		/*
		 * We don't allow user space callchains for  function trace
		 * event, due to issues with page faults while tracing page
		 * fault handler and its overall trickiness nature.
		 */
		if (!p_event->attr.exclude_callchain_user)
			return -EINVAL;
60 61 62 63 64 65 66

		/*
		 * Same reason to disable user stack dump as for user space
		 * callchains above.
		 */
		if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
			return -EINVAL;
67
	}
68

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
	/* No tracing, just counting, so no obvious leak */
	if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
		return 0;

	/* Some events are ok to be traced by non-root users... */
	if (p_event->attach_state == PERF_ATTACH_TASK) {
		if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
			return 0;
	}

	/*
	 * ...otherwise raw tracepoint data can be a severe data leak,
	 * only allow root to have these.
	 */
	if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
		return -EPERM;

	return 0;
}

89
static int perf_trace_event_reg(struct trace_event_call *tp_event,
90
				struct perf_event *p_event)
91
{
92
	struct hlist_head __percpu *list;
93
	int ret = -ENOMEM;
94
	int cpu;
95

96 97
	p_event->tp_event = tp_event;
	if (tp_event->perf_refcount++ > 0)
98 99
		return 0;

100 101 102 103 104 105
	list = alloc_percpu(struct hlist_head);
	if (!list)
		goto fail;

	for_each_possible_cpu(cpu)
		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
106

107
	tp_event->perf_events = list;
108

109
	if (!total_ref_count) {
110
		char __percpu *buf;
111
		int i;
112

113
		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
114
			buf = (char __percpu *)alloc_percpu(perf_trace_t);
115
			if (!buf)
116
				goto fail;
117

118
			perf_trace_buf[i] = buf;
119
		}
120 121
	}

122
	ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
123 124
	if (ret)
		goto fail;
125

126 127 128 129
	total_ref_count++;
	return 0;

fail:
130
	if (!total_ref_count) {
131 132
		int i;

133
		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
134 135 136
			free_percpu(perf_trace_buf[i]);
			perf_trace_buf[i] = NULL;
		}
137
	}
138 139 140 141

	if (!--tp_event->perf_refcount) {
		free_percpu(tp_event->perf_events);
		tp_event->perf_events = NULL;
142
	}
143 144

	return ret;
145 146
}

147 148
static void perf_trace_event_unreg(struct perf_event *p_event)
{
149
	struct trace_event_call *tp_event = p_event->tp_event;
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
	int i;

	if (--tp_event->perf_refcount > 0)
		goto out;

	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);

	/*
	 * Ensure our callback won't be called anymore. The buffers
	 * will be freed after that.
	 */
	tracepoint_synchronize_unregister();

	free_percpu(tp_event->perf_events);
	tp_event->perf_events = NULL;

	if (!--total_ref_count) {
		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
			free_percpu(perf_trace_buf[i]);
			perf_trace_buf[i] = NULL;
		}
	}
out:
	module_put(tp_event->mod);
}

static int perf_trace_event_open(struct perf_event *p_event)
{
178
	struct trace_event_call *tp_event = p_event->tp_event;
179 180 181 182 183
	return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
}

static void perf_trace_event_close(struct perf_event *p_event)
{
184
	struct trace_event_call *tp_event = p_event->tp_event;
185 186 187
	tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
}

188
static int perf_trace_event_init(struct trace_event_call *tp_event,
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
				 struct perf_event *p_event)
{
	int ret;

	ret = perf_trace_event_perm(tp_event, p_event);
	if (ret)
		return ret;

	ret = perf_trace_event_reg(tp_event, p_event);
	if (ret)
		return ret;

	ret = perf_trace_event_open(p_event);
	if (ret) {
		perf_trace_event_unreg(p_event);
		return ret;
	}

	return 0;
}

210
int perf_trace_init(struct perf_event *p_event)
Peter Zijlstra's avatar
Peter Zijlstra committed
211
{
212
	struct trace_event_call *tp_event;
213
	u64 event_id = p_event->attr.config;
214
	int ret = -EINVAL;
Peter Zijlstra's avatar
Peter Zijlstra committed
215

216
	mutex_lock(&event_mutex);
217
	list_for_each_entry(tp_event, &ftrace_events, list) {
218
		if (tp_event->event.type == event_id &&
219
		    tp_event->class && tp_event->class->reg &&
220 221
		    try_module_get(tp_event->mod)) {
			ret = perf_trace_event_init(tp_event, p_event);
Li Zefan's avatar
Li Zefan committed
222 223
			if (ret)
				module_put(tp_event->mod);
224 225
			break;
		}
Peter Zijlstra's avatar
Peter Zijlstra committed
226
	}
227
	mutex_unlock(&event_mutex);
Peter Zijlstra's avatar
Peter Zijlstra committed
228

229
	return ret;
Peter Zijlstra's avatar
Peter Zijlstra committed
230 231
}

232 233 234 235 236 237 238 239
void perf_trace_destroy(struct perf_event *p_event)
{
	mutex_lock(&event_mutex);
	perf_trace_event_close(p_event);
	perf_trace_event_unreg(p_event);
	mutex_unlock(&event_mutex);
}

240
int perf_trace_add(struct perf_event *p_event, int flags)
241
{
242
	struct trace_event_call *tp_event = p_event->tp_event;
243
	struct hlist_head __percpu *pcpu_list;
244
	struct hlist_head *list;
245

246 247
	pcpu_list = tp_event->perf_events;
	if (WARN_ON_ONCE(!pcpu_list))
248
		return -EINVAL;
249

250 251 252
	if (!(flags & PERF_EF_START))
		p_event->hw.state = PERF_HES_STOPPED;

253
	list = this_cpu_ptr(pcpu_list);
254
	hlist_add_head_rcu(&p_event->hlist_entry, list);
255

256
	return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
257
}
258

259
void perf_trace_del(struct perf_event *p_event, int flags)
260
{
261
	struct trace_event_call *tp_event = p_event->tp_event;
262
	hlist_del_rcu(&p_event->hlist_entry);
263
	tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
264 265
}

266
void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
267
{
268
	char *raw_data;
269
	int rctx;
270

271 272
	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));

273
	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
274
		      "perf buffer not large enough"))
275 276
		return NULL;

277 278
	*rctxp = rctx = perf_swevent_get_recursion_context();
	if (rctx < 0)
279
		return NULL;
280

281
	if (regs)
282 283
		*regs = this_cpu_ptr(&__perf_regs[rctx]);
	raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
284 285

	/* zero the dead bytes from align to not leak stack to user */
286
	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
287 288 289 290 291 292 293 294 295 296
	return raw_data;
}
EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
NOKPROBE_SYMBOL(perf_trace_buf_alloc);

void perf_trace_buf_update(void *record, u16 type)
{
	struct trace_entry *entry = record;
	int pc = preempt_count();
	unsigned long flags;
297

298 299
	local_save_flags(flags);
	tracing_generic_entry_update(entry, flags, pc);
300 301
	entry->type = type;
}
302
NOKPROBE_SYMBOL(perf_trace_buf_update);
303 304 305

#ifdef CONFIG_FUNCTION_TRACER
static void
306
perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
307
			  struct ftrace_ops *ops, struct pt_regs *pt_regs)
308
{
309
	struct perf_event *event;
310 311 312 313 314
	struct ftrace_entry *entry;
	struct hlist_head *head;
	struct pt_regs regs;
	int rctx;

315 316 317 318
	head = this_cpu_ptr(event_function.perf_events);
	if (hlist_empty(head))
		return;

319 320 321 322 323
#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
		    sizeof(u64)) - sizeof(u32))

	BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);

324
	memset(&regs, 0, sizeof(regs));
325 326
	perf_fetch_caller_regs(&regs);

327
	entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
328 329 330 331 332
	if (!entry)
		return;

	entry->ip = ip;
	entry->parent_ip = parent_ip;
333
	event = container_of(ops, struct perf_event, ftrace_ops);
334
	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
335
			      1, &regs, head, NULL, event);
336 337 338 339 340 341 342 343

#undef ENTRY_SIZE
}

static int perf_ftrace_function_register(struct perf_event *event)
{
	struct ftrace_ops *ops = &event->ftrace_ops;

344
	ops->flags |= FTRACE_OPS_FL_PER_CPU | FTRACE_OPS_FL_RCU;
345 346 347 348 349 350 351
	ops->func = perf_ftrace_function_call;
	return register_ftrace_function(ops);
}

static int perf_ftrace_function_unregister(struct perf_event *event)
{
	struct ftrace_ops *ops = &event->ftrace_ops;
352 353 354
	int ret = unregister_ftrace_function(ops);
	ftrace_free_filter(ops);
	return ret;
355 356 357 358 359 360 361 362 363 364 365 366
}

static void perf_ftrace_function_enable(struct perf_event *event)
{
	ftrace_function_local_enable(&event->ftrace_ops);
}

static void perf_ftrace_function_disable(struct perf_event *event)
{
	ftrace_function_local_disable(&event->ftrace_ops);
}

367
int perf_ftrace_event_register(struct trace_event_call *call,
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
			       enum trace_reg type, void *data)
{
	switch (type) {
	case TRACE_REG_REGISTER:
	case TRACE_REG_UNREGISTER:
		break;
	case TRACE_REG_PERF_REGISTER:
	case TRACE_REG_PERF_UNREGISTER:
		return 0;
	case TRACE_REG_PERF_OPEN:
		return perf_ftrace_function_register(data);
	case TRACE_REG_PERF_CLOSE:
		return perf_ftrace_function_unregister(data);
	case TRACE_REG_PERF_ADD:
		perf_ftrace_function_enable(data);
		return 0;
	case TRACE_REG_PERF_DEL:
		perf_ftrace_function_disable(data);
		return 0;
	}

	return -EINVAL;
}
#endif /* CONFIG_FUNCTION_TRACER */