trace_syscalls.c 18.4 KB
Newer Older
1
#include <trace/syscall.h>
2
#include <trace/events/syscalls.h>
3
#include <linux/syscalls.h>
4
#include <linux/slab.h>
5
#include <linux/kernel.h>
6
#include <linux/module.h>	/* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
7
#include <linux/ftrace.h>
8
#include <linux/perf_event.h>
9 10 11 12 13
#include <asm/syscall.h>

#include "trace_output.h"
#include "trace.h"

14
static DEFINE_MUTEX(syscall_trace_lock);
15

16
static int syscall_enter_register(struct trace_event_call *event,
17
				 enum trace_reg type, void *data);
18
static int syscall_exit_register(struct trace_event_call *event,
19
				 enum trace_reg type, void *data);
20

21
static struct list_head *
22
syscall_get_enter_fields(struct trace_event_call *call)
23 24 25 26 27 28
{
	struct syscall_metadata *entry = call->data;

	return &entry->enter_fields;
}

29 30
extern struct syscall_metadata *__start_syscalls_metadata[];
extern struct syscall_metadata *__stop_syscalls_metadata[];
31 32 33

static struct syscall_metadata **syscalls_metadata;

34 35 36 37 38 39
#ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
{
	/*
	 * Only compare after the "sys" prefix. Archs that use
	 * syscall wrappers may have syscalls symbols aliases prefixed
40
	 * with ".SyS" or ".sys" instead of "sys", leading to an unwanted
41 42 43 44 45 46
	 * mismatch.
	 */
	return !strcmp(sym + 3, name + 3);
}
#endif

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
#ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
/*
 * Some architectures that allow for 32bit applications
 * to run on a 64bit kernel, do not map the syscalls for
 * the 32bit tasks the same as they do for 64bit tasks.
 *
 *     *cough*x86*cough*
 *
 * In such a case, instead of reporting the wrong syscalls,
 * simply ignore them.
 *
 * For an arch to ignore the compat syscalls it needs to
 * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
 * define the function arch_trace_is_compat_syscall() to let
 * the tracing system know that it should ignore it.
 */
static int
trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
{
	if (unlikely(arch_trace_is_compat_syscall(regs)))
		return -1;

	return syscall_get_nr(task, regs);
}
#else
static inline int
trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
{
	return syscall_get_nr(task, regs);
}
#endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */

79 80
static __init struct syscall_metadata *
find_syscall_meta(unsigned long syscall)
81
{
82 83
	struct syscall_metadata **start;
	struct syscall_metadata **stop;
84 85 86
	char str[KSYM_SYMBOL_LEN];


87 88
	start = __start_syscalls_metadata;
	stop = __stop_syscalls_metadata;
89 90
	kallsyms_lookup(syscall, NULL, NULL, NULL, str);

91 92 93
	if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
		return NULL;

94
	for ( ; start < stop; start++) {
95
		if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
96
			return *start;
97 98 99 100 101 102 103 104 105 106 107 108
	}
	return NULL;
}

static struct syscall_metadata *syscall_nr_to_meta(int nr)
{
	if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
		return NULL;

	return syscalls_metadata[nr];
}

109
static enum print_line_t
110 111
print_syscall_enter(struct trace_iterator *iter, int flags,
		    struct trace_event *event)
112
{
113
	struct trace_array *tr = iter->tr;
114 115 116 117
	struct trace_seq *s = &iter->seq;
	struct trace_entry *ent = iter->ent;
	struct syscall_trace_enter *trace;
	struct syscall_metadata *entry;
118
	int i, syscall;
119

120
	trace = (typeof(trace))ent;
121 122
	syscall = trace->nr;
	entry = syscall_nr_to_meta(syscall);
123

124 125 126
	if (!entry)
		goto end;

127
	if (entry->enter_event->event.type != ent->type) {
128 129 130 131
		WARN_ON_ONCE(1);
		goto end;
	}

132
	trace_seq_printf(s, "%s(", entry->name);
133 134

	for (i = 0; i < entry->nb_args; i++) {
135 136 137 138

		if (trace_seq_has_overflowed(s))
			goto end;

139
		/* parameter types */
140
		if (tr->trace_flags & TRACE_ITER_VERBOSE)
141 142
			trace_seq_printf(s, "%s ", entry->types[i]);

143
		/* parameter values */
144 145 146
		trace_seq_printf(s, "%s: %lx%s", entry->args[i],
				 trace->args[i],
				 i == entry->nb_args - 1 ? "" : ", ");
147 148
	}

149
	trace_seq_putc(s, ')');
150
end:
151
	trace_seq_putc(s, '\n');
152

153
	return trace_handle_return(s);
154 155
}

156
static enum print_line_t
157 158
print_syscall_exit(struct trace_iterator *iter, int flags,
		   struct trace_event *event)
159 160 161 162 163 164 165
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *ent = iter->ent;
	struct syscall_trace_exit *trace;
	int syscall;
	struct syscall_metadata *entry;

166
	trace = (typeof(trace))ent;
167 168
	syscall = trace->nr;
	entry = syscall_nr_to_meta(syscall);
169

170
	if (!entry) {
171
		trace_seq_putc(s, '\n');
172
		goto out;
173 174
	}

175
	if (entry->exit_event->event.type != ent->type) {
176 177 178 179
		WARN_ON_ONCE(1);
		return TRACE_TYPE_UNHANDLED;
	}

180
	trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
181 182
				trace->ret);

183 184
 out:
	return trace_handle_return(s);
185 186
}

187 188 189 190 191
extern char *__bad_type_size(void);

#define SYSCALL_FIELD(type, name)					\
	sizeof(type) != sizeof(trace.name) ?				\
		__bad_type_size() :					\
192 193
		#type, #name, offsetof(typeof(trace), name),		\
		sizeof(trace.name), is_signed_type(type)
194

195 196
static int __init
__set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
{
	int i;
	int pos = 0;

	/* When len=0, we just calculate the needed length */
#define LEN_OR_ZERO (len ? len - pos : 0)

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
	for (i = 0; i < entry->nb_args; i++) {
		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
				entry->args[i], sizeof(unsigned long),
				i == entry->nb_args - 1 ? "" : ", ");
	}
	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");

	for (i = 0; i < entry->nb_args; i++) {
		pos += snprintf(buf + pos, LEN_OR_ZERO,
				", ((unsigned long)(REC->%s))", entry->args[i]);
	}

#undef LEN_OR_ZERO

	/* return the length of print_fmt */
	return pos;
}

223
static int __init set_syscall_print_fmt(struct trace_event_call *call)
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
{
	char *print_fmt;
	int len;
	struct syscall_metadata *entry = call->data;

	if (entry->enter_event != call) {
		call->print_fmt = "\"0x%lx\", REC->ret";
		return 0;
	}

	/* First: called with 0 length to calculate the needed length */
	len = __set_enter_print_fmt(entry, NULL, 0);

	print_fmt = kmalloc(len + 1, GFP_KERNEL);
	if (!print_fmt)
		return -ENOMEM;

	/* Second: actually write the @print_fmt */
	__set_enter_print_fmt(entry, print_fmt, len + 1);
	call->print_fmt = print_fmt;

	return 0;
}

248
static void __init free_syscall_print_fmt(struct trace_event_call *call)
249 250 251 252 253 254 255
{
	struct syscall_metadata *entry = call->data;

	if (entry->enter_event == call)
		kfree(call->print_fmt);
}

256
static int __init syscall_enter_define_fields(struct trace_event_call *call)
257 258
{
	struct syscall_trace_enter trace;
259
	struct syscall_metadata *meta = call->data;
260 261 262 263
	int ret;
	int i;
	int offset = offsetof(typeof(trace), args);

264 265 266 267
	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
	if (ret)
		return ret;

268
	for (i = 0; i < meta->nb_args; i++) {
269 270
		ret = trace_define_field(call, meta->types[i],
					 meta->args[i], offset,
271 272
					 sizeof(unsigned long), 0,
					 FILTER_OTHER);
273 274 275 276 277 278
		offset += sizeof(unsigned long);
	}

	return ret;
}

279
static int __init syscall_exit_define_fields(struct trace_event_call *call)
280 281 282 283
{
	struct syscall_trace_exit trace;
	int ret;

284 285 286 287
	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
	if (ret)
		return ret;

288
	ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
289
				 FILTER_OTHER);
290 291 292 293

	return ret;
}

294
static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
295
{
296
	struct trace_array *tr = data;
297
	struct trace_event_file *trace_file;
298 299 300
	struct syscall_trace_enter *entry;
	struct syscall_metadata *sys_data;
	struct ring_buffer_event *event;
301
	struct ring_buffer *buffer;
302 303
	unsigned long irq_flags;
	int pc;
304
	int syscall_nr;
305
	int size;
306

307
	syscall_nr = trace_get_syscall_nr(current, regs);
308
	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
309
		return;
310 311

	/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
312 313
	trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
	if (!trace_file)
314 315
		return;

316
	if (trace_trigger_soft_disabled(trace_file))
317
		return;
318

319 320 321 322 323 324
	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

	size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;

325 326 327
	local_save_flags(irq_flags);
	pc = preempt_count();

328
	buffer = tr->trace_buffer.buffer;
329
	event = trace_buffer_lock_reserve(buffer,
330
			sys_data->enter_event->event.type, size, irq_flags, pc);
331 332 333 334 335 336 337
	if (!event)
		return;

	entry = ring_buffer_event_data(event);
	entry->nr = syscall_nr;
	syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);

338
	event_trigger_unlock_commit(trace_file, buffer, event, entry,
339
				    irq_flags, pc);
340 341
}

342
static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
343
{
344
	struct trace_array *tr = data;
345
	struct trace_event_file *trace_file;
346 347 348
	struct syscall_trace_exit *entry;
	struct syscall_metadata *sys_data;
	struct ring_buffer_event *event;
349
	struct ring_buffer *buffer;
350 351
	unsigned long irq_flags;
	int pc;
352 353
	int syscall_nr;

354
	syscall_nr = trace_get_syscall_nr(current, regs);
355
	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
356
		return;
357 358

	/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
359 360
	trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
	if (!trace_file)
361 362
		return;

363
	if (trace_trigger_soft_disabled(trace_file))
364
		return;
365

366 367 368 369
	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

370 371 372
	local_save_flags(irq_flags);
	pc = preempt_count();

373
	buffer = tr->trace_buffer.buffer;
374
	event = trace_buffer_lock_reserve(buffer,
375 376
			sys_data->exit_event->event.type, sizeof(*entry),
			irq_flags, pc);
377 378 379 380 381 382 383
	if (!event)
		return;

	entry = ring_buffer_event_data(event);
	entry->nr = syscall_nr;
	entry->ret = syscall_get_return_value(current, regs);

384
	event_trigger_unlock_commit(trace_file, buffer, event, entry,
385
				    irq_flags, pc);
386 387
}

388
static int reg_event_syscall_enter(struct trace_event_file *file,
389
				   struct trace_event_call *call)
390
{
391
	struct trace_array *tr = file->tr;
392 393 394
	int ret = 0;
	int num;

395
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
396
	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
397 398
		return -ENOSYS;
	mutex_lock(&syscall_trace_lock);
399 400
	if (!tr->sys_refcount_enter)
		ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
401
	if (!ret) {
402
		rcu_assign_pointer(tr->enter_syscall_files[num], file);
403
		tr->sys_refcount_enter++;
404 405 406
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
407 408
}

409
static void unreg_event_syscall_enter(struct trace_event_file *file,
410
				      struct trace_event_call *call)
411
{
412
	struct trace_array *tr = file->tr;
413
	int num;
414

415
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
416
	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
417 418
		return;
	mutex_lock(&syscall_trace_lock);
419
	tr->sys_refcount_enter--;
420
	RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL);
421 422
	if (!tr->sys_refcount_enter)
		unregister_trace_sys_enter(ftrace_syscall_enter, tr);
423 424
	mutex_unlock(&syscall_trace_lock);
}
425

426
static int reg_event_syscall_exit(struct trace_event_file *file,
427
				  struct trace_event_call *call)
428
{
429
	struct trace_array *tr = file->tr;
430 431 432
	int ret = 0;
	int num;

433
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
434
	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
435 436
		return -ENOSYS;
	mutex_lock(&syscall_trace_lock);
437 438
	if (!tr->sys_refcount_exit)
		ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
439
	if (!ret) {
440
		rcu_assign_pointer(tr->exit_syscall_files[num], file);
441
		tr->sys_refcount_exit++;
442
	}
443 444 445
	mutex_unlock(&syscall_trace_lock);
	return ret;
}
446

447
static void unreg_event_syscall_exit(struct trace_event_file *file,
448
				     struct trace_event_call *call)
449
{
450
	struct trace_array *tr = file->tr;
451
	int num;
452

453
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
454
	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
455 456
		return;
	mutex_lock(&syscall_trace_lock);
457
	tr->sys_refcount_exit--;
458
	RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL);
459 460
	if (!tr->sys_refcount_exit)
		unregister_trace_sys_exit(ftrace_syscall_exit, tr);
461
	mutex_unlock(&syscall_trace_lock);
462
}
463

464
static int __init init_syscall_trace(struct trace_event_call *call)
465 466
{
	int id;
467 468 469 470 471 472 473 474
	int num;

	num = ((struct syscall_metadata *)call->data)->syscall_nr;
	if (num < 0 || num >= NR_syscalls) {
		pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
				((struct syscall_metadata *)call->data)->name);
		return -ENOSYS;
	}
475

476 477 478
	if (set_syscall_print_fmt(call) < 0)
		return -ENOMEM;

479 480 481
	id = trace_event_raw_init(call);

	if (id < 0) {
482
		free_syscall_print_fmt(call);
483
		return id;
484
	}
485 486

	return id;
487 488
}

489 490 491 492 493 494 495 496
struct trace_event_functions enter_syscall_print_funcs = {
	.trace		= print_syscall_enter,
};

struct trace_event_functions exit_syscall_print_funcs = {
	.trace		= print_syscall_exit,
};

497
struct trace_event_class __refdata event_class_syscall_enter = {
498 499 500 501 502 503 504
	.system		= "syscalls",
	.reg		= syscall_enter_register,
	.define_fields	= syscall_enter_define_fields,
	.get_fields	= syscall_get_enter_fields,
	.raw_init	= init_syscall_trace,
};

505
struct trace_event_class __refdata event_class_syscall_exit = {
506 507 508 509 510 511 512
	.system		= "syscalls",
	.reg		= syscall_exit_register,
	.define_fields	= syscall_exit_define_fields,
	.fields		= LIST_HEAD_INIT(event_class_syscall_exit.fields),
	.raw_init	= init_syscall_trace,
};

513
unsigned long __init __weak arch_syscall_addr(int nr)
514 515 516 517
{
	return (unsigned long)sys_call_table[nr];
}

518
void __init init_ftrace_syscalls(void)
519 520 521 522 523
{
	struct syscall_metadata *meta;
	unsigned long addr;
	int i;

524 525
	syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
				    GFP_KERNEL);
526 527
	if (!syscalls_metadata) {
		WARN_ON(1);
528
		return;
529 530 531 532 533
	}

	for (i = 0; i < NR_syscalls; i++) {
		addr = arch_syscall_addr(i);
		meta = find_syscall_meta(addr);
534 535 536 537
		if (!meta)
			continue;

		meta->syscall_nr = i;
538 539 540 541
		syscalls_metadata[i] = meta;
	}
}

542
#ifdef CONFIG_PERF_EVENTS
543

544 545 546 547
static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
static int sys_perf_refcount_enter;
static int sys_perf_refcount_exit;
548

549
static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
550 551
{
	struct syscall_metadata *sys_data;
552
	struct syscall_trace_enter *rec;
553
	struct hlist_head *head;
554
	int syscall_nr;
555
	int rctx;
556
	int size;
557

558
	syscall_nr = trace_get_syscall_nr(current, regs);
559
	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
560
		return;
561
	if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
562 563 564 565 566 567
		return;

	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

568 569 570 571
	head = this_cpu_ptr(sys_data->enter_event->perf_events);
	if (hlist_empty(head))
		return;

572 573 574 575 576
	/* get the size after alignment with the u32 buffer size field */
	size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
	size = ALIGN(size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);

577
	rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
578
				sys_data->enter_event->event.type, NULL, &rctx);
579 580
	if (!rec)
		return;
581 582 583 584

	rec->nr = syscall_nr;
	syscall_get_arguments(current, regs, 0, sys_data->nb_args,
			       (unsigned long *)&rec->args);
585
	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
586 587
}

588
static int perf_sysenter_enable(struct trace_event_call *call)
589 590 591 592
{
	int ret = 0;
	int num;

593
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
594 595

	mutex_lock(&syscall_trace_lock);
596
	if (!sys_perf_refcount_enter)
597
		ret = register_trace_sys_enter(perf_syscall_enter, NULL);
598 599 600 601
	if (ret) {
		pr_info("event trace: Could not activate"
				"syscall entry trace point");
	} else {
602 603
		set_bit(num, enabled_perf_enter_syscalls);
		sys_perf_refcount_enter++;
604 605 606 607 608
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
}

609
static void perf_sysenter_disable(struct trace_event_call *call)
610 611 612
{
	int num;

613
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
614 615

	mutex_lock(&syscall_trace_lock);
616 617 618
	sys_perf_refcount_enter--;
	clear_bit(num, enabled_perf_enter_syscalls);
	if (!sys_perf_refcount_enter)
619
		unregister_trace_sys_enter(perf_syscall_enter, NULL);
620 621 622
	mutex_unlock(&syscall_trace_lock);
}

623
static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
624 625
{
	struct syscall_metadata *sys_data;
626
	struct syscall_trace_exit *rec;
627
	struct hlist_head *head;
628
	int syscall_nr;
629
	int rctx;
630
	int size;
631

632
	syscall_nr = trace_get_syscall_nr(current, regs);
633
	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
634
		return;
635
	if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
636 637 638 639 640 641
		return;

	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

642 643 644 645
	head = this_cpu_ptr(sys_data->exit_event->perf_events);
	if (hlist_empty(head))
		return;

646 647 648
	/* We can probably do that at build time */
	size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
649

650
	rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
651
				sys_data->exit_event->event.type, NULL, &rctx);
652 653
	if (!rec)
		return;
654 655 656

	rec->nr = syscall_nr;
	rec->ret = syscall_get_return_value(current, regs);
657
	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
658 659
}

660
static int perf_sysexit_enable(struct trace_event_call *call)
661 662 663 664
{
	int ret = 0;
	int num;

665
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
666 667

	mutex_lock(&syscall_trace_lock);
668
	if (!sys_perf_refcount_exit)
669
		ret = register_trace_sys_exit(perf_syscall_exit, NULL);
670 671
	if (ret) {
		pr_info("event trace: Could not activate"
672
				"syscall exit trace point");
673
	} else {
674 675
		set_bit(num, enabled_perf_exit_syscalls);
		sys_perf_refcount_exit++;
676 677 678 679 680
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
}

681
static void perf_sysexit_disable(struct trace_event_call *call)
682 683 684
{
	int num;

685
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
686 687

	mutex_lock(&syscall_trace_lock);
688 689 690
	sys_perf_refcount_exit--;
	clear_bit(num, enabled_perf_exit_syscalls);
	if (!sys_perf_refcount_exit)
691
		unregister_trace_sys_exit(perf_syscall_exit, NULL);
692 693 694
	mutex_unlock(&syscall_trace_lock);
}

695
#endif /* CONFIG_PERF_EVENTS */
696

697
static int syscall_enter_register(struct trace_event_call *event,
698
				 enum trace_reg type, void *data)
699
{
700
	struct trace_event_file *file = data;
701

702 703
	switch (type) {
	case TRACE_REG_REGISTER:
704
		return reg_event_syscall_enter(file, event);
705
	case TRACE_REG_UNREGISTER:
706
		unreg_event_syscall_enter(file, event);
707 708 709 710 711 712 713 714
		return 0;

#ifdef CONFIG_PERF_EVENTS
	case TRACE_REG_PERF_REGISTER:
		return perf_sysenter_enable(event);
	case TRACE_REG_PERF_UNREGISTER:
		perf_sysenter_disable(event);
		return 0;
715 716
	case TRACE_REG_PERF_OPEN:
	case TRACE_REG_PERF_CLOSE:
717 718
	case TRACE_REG_PERF_ADD:
	case TRACE_REG_PERF_DEL:
719
		return 0;
720 721 722 723 724
#endif
	}
	return 0;
}

725
static int syscall_exit_register(struct trace_event_call *event,
726
				 enum trace_reg type, void *data)
727
{
728
	struct trace_event_file *file = data;
729

730 731
	switch (type) {
	case TRACE_REG_REGISTER:
732
		return reg_event_syscall_exit(file, event);
733
	case TRACE_REG_UNREGISTER:
734
		unreg_event_syscall_exit(file, event);
735 736 737 738 739 740 741 742
		return 0;

#ifdef CONFIG_PERF_EVENTS
	case TRACE_REG_PERF_REGISTER:
		return perf_sysexit_enable(event);
	case TRACE_REG_PERF_UNREGISTER:
		perf_sysexit_disable(event);
		return 0;
743 744
	case TRACE_REG_PERF_OPEN:
	case TRACE_REG_PERF_CLOSE:
745 746
	case TRACE_REG_PERF_ADD:
	case TRACE_REG_PERF_DEL:
747
		return 0;
748 749 750 751
#endif
	}
	return 0;
}