panic.c 15.4 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10
/*
 *  linux/kernel/panic.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 */

/*
 * This function is used through-out the kernel (including mm and fs)
 * to indicate a major problem.
 */
Ingo Molnar's avatar
Ingo Molnar committed
11 12
#include <linux/debug_locks.h>
#include <linux/interrupt.h>
13
#include <linux/kmsg_dump.h>
Ingo Molnar's avatar
Ingo Molnar committed
14 15
#include <linux/kallsyms.h>
#include <linux/notifier.h>
Linus Torvalds's avatar
Linus Torvalds committed
16
#include <linux/module.h>
Ingo Molnar's avatar
Ingo Molnar committed
17
#include <linux/random.h>
18
#include <linux/ftrace.h>
Linus Torvalds's avatar
Linus Torvalds committed
19
#include <linux/reboot.h>
Ingo Molnar's avatar
Ingo Molnar committed
20 21 22
#include <linux/delay.h>
#include <linux/kexec.h>
#include <linux/sched.h>
Linus Torvalds's avatar
Linus Torvalds committed
23
#include <linux/sysrq.h>
Ingo Molnar's avatar
Ingo Molnar committed
24
#include <linux/init.h>
Linus Torvalds's avatar
Linus Torvalds committed
25
#include <linux/nmi.h>
26
#include <linux/console.h>
27
#include <linux/bug.h>
Linus Torvalds's avatar
Linus Torvalds committed
28

29 30 31
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18

32
int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
Andi Kleen's avatar
Andi Kleen committed
33
static unsigned long tainted_mask;
34 35 36
static int pause_on_oops;
static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
37
bool crash_kexec_post_notifiers;
Prarit Bhargava's avatar
Prarit Bhargava committed
38
int panic_on_warn __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
39

40
int panic_timeout = CONFIG_PANIC_TIMEOUT;
41
EXPORT_SYMBOL_GPL(panic_timeout);
Linus Torvalds's avatar
Linus Torvalds committed
42

43
ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
Linus Torvalds's avatar
Linus Torvalds committed
44 45 46

EXPORT_SYMBOL(panic_notifier_list);

47
static long no_blink(int state)
48
{
49
	return 0;
50 51
}

52 53 54 55
/* Returns how long it waited in ms */
long (*panic_blink)(int state);
EXPORT_SYMBOL(panic_blink);

56 57 58 59 60 61 62 63 64
/*
 * Stop ourself in panic -- architecture code may override this
 */
void __weak panic_smp_self_stop(void)
{
	while (1)
		cpu_relax();
}

65 66 67 68 69 70 71 72 73
/*
 * Stop ourselves in NMI context if another CPU has already panicked. Arch code
 * may override this to prepare for crash dumping, e.g. save regs info.
 */
void __weak nmi_panic_self_stop(struct pt_regs *regs)
{
	panic_smp_self_stop();
}

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
/*
 * Stop other CPUs in panic.  Architecture dependent code may override this
 * with more suitable version.  For example, if the architecture supports
 * crash dump, it should save registers of each stopped CPU and disable
 * per-CPU features such as virtualization extensions.
 */
void __weak crash_smp_send_stop(void)
{
	static int cpus_stopped;

	/*
	 * This function can be called twice in panic path, but obviously
	 * we execute this only once.
	 */
	if (cpus_stopped)
		return;

	/*
	 * Note smp_send_stop is the usual smp shutdown function, which
	 * unfortunately means it may not be hardened to work in a panic
	 * situation.
	 */
	smp_send_stop();
	cpus_stopped = 1;
}

100 101
atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
/*
 * A variant of panic() called from NMI context. We return if we've already
 * panicked on this CPU. If another CPU already panicked, loop in
 * nmi_panic_self_stop() which can provide architecture dependent code such
 * as saving register state for crash dump.
 */
void nmi_panic(struct pt_regs *regs, const char *msg)
{
	int old_cpu, cpu;

	cpu = raw_smp_processor_id();
	old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu);

	if (old_cpu == PANIC_CPU_INVALID)
		panic("%s", msg);
	else if (old_cpu != cpu)
		nmi_panic_self_stop(regs);
}
EXPORT_SYMBOL(nmi_panic);

Linus Torvalds's avatar
Linus Torvalds committed
122 123 124 125 126 127 128 129
/**
 *	panic - halt the system
 *	@fmt: The text string to print
 *
 *	Display a message, then perform cleanups.
 *
 *	This function never returns.
 */
130
void panic(const char *fmt, ...)
Linus Torvalds's avatar
Linus Torvalds committed
131 132 133
{
	static char buf[1024];
	va_list args;
134 135
	long i, i_next = 0;
	int state = 0;
136
	int old_cpu, this_cpu;
137
	bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
Linus Torvalds's avatar
Linus Torvalds committed
138

139 140 141 142
	/*
	 * Disable local interrupts. This will prevent panic_smp_self_stop
	 * from deadlocking the first cpu that invokes the panic, since
	 * there is nothing to prevent an interrupt handler (that runs
143
	 * after setting panic_cpu) from invoking panic() again.
144 145 146
	 */
	local_irq_disable();

147
	/*
Ingo Molnar's avatar
Ingo Molnar committed
148 149
	 * It's possible to come here directly from a panic-assertion and
	 * not have preempt disabled. Some functions called from here want
150
	 * preempt to be disabled. No point enabling it later though...
151 152 153 154 155
	 *
	 * Only one CPU is allowed to execute the panic code from here. For
	 * multiple parallel invocations of panic, all other CPUs either
	 * stop themself or will wait until they are stopped by the 1st CPU
	 * with smp_send_stop().
156 157 158 159 160
	 *
	 * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which
	 * comes here, so go ahead.
	 * `old_cpu == this_cpu' means we came from nmi_panic() which sets
	 * panic_cpu to this CPU.  In this case, this is also the 1st CPU.
161
	 */
162 163 164 165
	this_cpu = raw_smp_processor_id();
	old_cpu  = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);

	if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
166
		panic_smp_self_stop();
167

168
	console_verbose();
Linus Torvalds's avatar
Linus Torvalds committed
169 170 171 172
	bust_spinlocks(1);
	va_start(args, fmt);
	vsnprintf(buf, sizeof(buf), fmt, args);
	va_end(args);
173
	pr_emerg("Kernel panic - not syncing: %s\n", buf);
174
#ifdef CONFIG_DEBUG_BUGVERBOSE
175 176 177
	/*
	 * Avoid nested stack-dumping if a panic occurs during oops processing
	 */
178
	if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
179
		dump_stack();
180
#endif
Linus Torvalds's avatar
Linus Torvalds committed
181

182 183 184
	/*
	 * If we have crashed and we have a crash kernel loaded let it handle
	 * everything else.
185 186
	 * If we want to run this after calling panic_notifiers, pass
	 * the "crash_kexec_post_notifiers" option to the kernel.
187 188
	 *
	 * Bypass the panic_cpu check and call __crash_kexec directly.
189
	 */
190
	if (!_crash_kexec_post_notifiers) {
191
		printk_nmi_flush_on_panic();
192
		__crash_kexec(NULL);
193

194 195 196 197 198 199 200 201 202 203 204 205 206 207
		/*
		 * Note smp_send_stop is the usual smp shutdown function, which
		 * unfortunately means it may not be hardened to work in a
		 * panic situation.
		 */
		smp_send_stop();
	} else {
		/*
		 * If we want to do crash dump after notifier calls and
		 * kmsg_dump, we will need architecture dependent extra
		 * works in addition to stopping other CPUs.
		 */
		crash_smp_send_stop();
	}
Linus Torvalds's avatar
Linus Torvalds committed
208

209 210 211 212
	/*
	 * Run any panic handlers, including those that might need to
	 * add information to the kmsg dump output.
	 */
213
	atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
Linus Torvalds's avatar
Linus Torvalds committed
214

215 216
	/* Call flush even twice. It tries harder with a single online CPU */
	printk_nmi_flush_on_panic();
217 218
	kmsg_dump(KMSG_DUMP_PANIC);

219 220 221 222 223 224
	/*
	 * If you doubt kdump always works fine in any situation,
	 * "crash_kexec_post_notifiers" offers you a chance to run
	 * panic_notifiers and dumping kmsg before kdump.
	 * Note: since some panic_notifiers can make crashed kernel
	 * more unstable, it can increase risks of the kdump failure too.
225 226
	 *
	 * Bypass the panic_cpu check and call __crash_kexec directly.
227
	 */
228
	if (_crash_kexec_post_notifiers)
229
		__crash_kexec(NULL);
230

231 232
	bust_spinlocks(0);

233 234 235 236
	/*
	 * We may have ended up stopping the CPU holding the lock (in
	 * smp_send_stop()) while still having some valuable data in the console
	 * buffer.  Try to acquire the lock then release it regardless of the
237 238 239
	 * result.  The release will also print the buffers out.  Locks debug
	 * should be disabled to avoid reporting bad unlock balance when
	 * panic() is not being callled from OOPS.
240
	 */
241
	debug_locks_off();
242
	console_flush_on_panic();
243

244 245 246
	if (!panic_blink)
		panic_blink = no_blink;

247
	if (panic_timeout > 0) {
Linus Torvalds's avatar
Linus Torvalds committed
248
		/*
Ingo Molnar's avatar
Ingo Molnar committed
249 250 251
		 * Delay timeout seconds before rebooting the machine.
		 * We can't use the "normal" timers since we just panicked.
		 */
Jiri Slaby's avatar
Jiri Slaby committed
252
		pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
Ingo Molnar's avatar
Ingo Molnar committed
253

254
		for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
Linus Torvalds's avatar
Linus Torvalds committed
255
			touch_nmi_watchdog();
256 257 258 259 260
			if (i >= i_next) {
				i += panic_blink(state ^= 1);
				i_next = i + 3600 / PANIC_BLINK_SPD;
			}
			mdelay(PANIC_TIMER_STEP);
Linus Torvalds's avatar
Linus Torvalds committed
261
		}
262 263
	}
	if (panic_timeout != 0) {
Ingo Molnar's avatar
Ingo Molnar committed
264 265 266 267
		/*
		 * This will not be a clean reboot, with everything
		 * shutting down.  But if there is a chance of
		 * rebooting the system it will be rebooted.
Linus Torvalds's avatar
Linus Torvalds committed
268
		 */
269
		emergency_restart();
Linus Torvalds's avatar
Linus Torvalds committed
270 271 272 273
	}
#ifdef __sparc__
	{
		extern int stop_a_enabled;
274
		/* Make sure the user can actually press Stop-A (L1-A) */
Linus Torvalds's avatar
Linus Torvalds committed
275
		stop_a_enabled = 1;
276
		pr_emerg("Press Stop-A (L1-A) to return to the boot prom\n");
Linus Torvalds's avatar
Linus Torvalds committed
277 278
	}
#endif
279
#if defined(CONFIG_S390)
Ingo Molnar's avatar
Ingo Molnar committed
280 281 282 283 284 285
	{
		unsigned long caller;

		caller = (unsigned long)__builtin_return_address(0);
		disabled_wait(caller);
	}
Linus Torvalds's avatar
Linus Torvalds committed
286
#endif
287
	pr_emerg("---[ end Kernel panic - not syncing: %s\n", buf);
Linus Torvalds's avatar
Linus Torvalds committed
288
	local_irq_enable();
289
	for (i = 0; ; i += PANIC_TIMER_STEP) {
290
		touch_softlockup_watchdog();
291 292 293 294 295
		if (i >= i_next) {
			i += panic_blink(state ^= 1);
			i_next = i + 3600 / PANIC_BLINK_SPD;
		}
		mdelay(PANIC_TIMER_STEP);
Linus Torvalds's avatar
Linus Torvalds committed
296 297 298 299 300
	}
}

EXPORT_SYMBOL(panic);

301

Andi Kleen's avatar
Andi Kleen committed
302
struct tnt {
Ingo Molnar's avatar
Ingo Molnar committed
303 304 305
	u8	bit;
	char	true;
	char	false;
Andi Kleen's avatar
Andi Kleen committed
306 307 308
};

static const struct tnt tnts[] = {
Ingo Molnar's avatar
Ingo Molnar committed
309 310
	{ TAINT_PROPRIETARY_MODULE,	'P', 'G' },
	{ TAINT_FORCED_MODULE,		'F', ' ' },
311
	{ TAINT_CPU_OUT_OF_SPEC,	'S', ' ' },
Ingo Molnar's avatar
Ingo Molnar committed
312 313 314 315 316 317 318 319
	{ TAINT_FORCED_RMMOD,		'R', ' ' },
	{ TAINT_MACHINE_CHECK,		'M', ' ' },
	{ TAINT_BAD_PAGE,		'B', ' ' },
	{ TAINT_USER,			'U', ' ' },
	{ TAINT_DIE,			'D', ' ' },
	{ TAINT_OVERRIDDEN_ACPI_TABLE,	'A', ' ' },
	{ TAINT_WARN,			'W', ' ' },
	{ TAINT_CRAP,			'C', ' ' },
320
	{ TAINT_FIRMWARE_WORKAROUND,	'I', ' ' },
321
	{ TAINT_OOT_MODULE,		'O', ' ' },
322
	{ TAINT_UNSIGNED_MODULE,	'E', ' ' },
Josh Hunt's avatar
Josh Hunt committed
323
	{ TAINT_SOFTLOCKUP,		'L', ' ' },
324
	{ TAINT_LIVEPATCH,		'K', ' ' },
Andi Kleen's avatar
Andi Kleen committed
325 326
};

Linus Torvalds's avatar
Linus Torvalds committed
327 328 329 330 331 332 333
/**
 *	print_tainted - return a string to represent the kernel taint state.
 *
 *  'P' - Proprietary module has been loaded.
 *  'F' - Module has been forcibly loaded.
 *  'S' - SMP with CPUs not designed for SMP.
 *  'R' - User forced a module unload.
334
 *  'M' - System experienced a machine check exception.
Linus Torvalds's avatar
Linus Torvalds committed
335
 *  'B' - System has hit bad_page.
336
 *  'U' - Userspace-defined naughtiness.
Arjan van de Ven's avatar
Arjan van de Ven committed
337
 *  'D' - Kernel has oopsed before
338 339
 *  'A' - ACPI table overridden.
 *  'W' - Taint on warning.
340
 *  'C' - modules from drivers/staging are loaded.
341
 *  'I' - Working around severe firmware bug.
342
 *  'O' - Out-of-tree module has been loaded.
343
 *  'E' - Unsigned module has been loaded.
344
 *  'L' - A soft lockup has previously occurred.
345
 *  'K' - Kernel has been live patched.
Linus Torvalds's avatar
Linus Torvalds committed
346
 *
347
 *	The string is overwritten by the next call to print_tainted().
Linus Torvalds's avatar
Linus Torvalds committed
348 349 350
 */
const char *print_tainted(void)
{
351
	static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ")];
Andi Kleen's avatar
Andi Kleen committed
352 353 354 355 356 357 358 359 360 361 362 363 364

	if (tainted_mask) {
		char *s;
		int i;

		s = buf + sprintf(buf, "Tainted: ");
		for (i = 0; i < ARRAY_SIZE(tnts); i++) {
			const struct tnt *t = &tnts[i];
			*s++ = test_bit(t->bit, &tainted_mask) ?
					t->true : t->false;
		}
		*s = 0;
	} else
Linus Torvalds's avatar
Linus Torvalds committed
365
		snprintf(buf, sizeof(buf), "Not tainted");
Ingo Molnar's avatar
Ingo Molnar committed
366 367

	return buf;
Linus Torvalds's avatar
Linus Torvalds committed
368 369
}

Andi Kleen's avatar
Andi Kleen committed
370
int test_taint(unsigned flag)
Linus Torvalds's avatar
Linus Torvalds committed
371
{
Andi Kleen's avatar
Andi Kleen committed
372 373 374 375 376 377 378
	return test_bit(flag, &tainted_mask);
}
EXPORT_SYMBOL(test_taint);

unsigned long get_taint(void)
{
	return tainted_mask;
Linus Torvalds's avatar
Linus Torvalds committed
379
}
380

381 382 383 384 385 386 387 388 389
/**
 * add_taint: add a taint flag if not already set.
 * @flag: one of the TAINT_* constants.
 * @lockdep_ok: whether lock debugging is still OK.
 *
 * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for
 * some notewortht-but-not-corrupting cases, it can be set to true.
 */
void add_taint(unsigned flag, enum lockdep_ok lockdep_ok)
390
{
391
	if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off())
392
		pr_warn("Disabling lock debugging due to kernel taint\n");
393

Andi Kleen's avatar
Andi Kleen committed
394
	set_bit(flag, &tainted_mask);
395
}
Linus Torvalds's avatar
Linus Torvalds committed
396
EXPORT_SYMBOL(add_taint);
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447

static void spin_msec(int msecs)
{
	int i;

	for (i = 0; i < msecs; i++) {
		touch_nmi_watchdog();
		mdelay(1);
	}
}

/*
 * It just happens that oops_enter() and oops_exit() are identically
 * implemented...
 */
static void do_oops_enter_exit(void)
{
	unsigned long flags;
	static int spin_counter;

	if (!pause_on_oops)
		return;

	spin_lock_irqsave(&pause_on_oops_lock, flags);
	if (pause_on_oops_flag == 0) {
		/* This CPU may now print the oops message */
		pause_on_oops_flag = 1;
	} else {
		/* We need to stall this CPU */
		if (!spin_counter) {
			/* This CPU gets to do the counting */
			spin_counter = pause_on_oops;
			do {
				spin_unlock(&pause_on_oops_lock);
				spin_msec(MSEC_PER_SEC);
				spin_lock(&pause_on_oops_lock);
			} while (--spin_counter);
			pause_on_oops_flag = 0;
		} else {
			/* This CPU waits for a different one */
			while (spin_counter) {
				spin_unlock(&pause_on_oops_lock);
				spin_msec(1);
				spin_lock(&pause_on_oops_lock);
			}
		}
	}
	spin_unlock_irqrestore(&pause_on_oops_lock, flags);
}

/*
Ingo Molnar's avatar
Ingo Molnar committed
448 449
 * Return true if the calling CPU is allowed to print oops-related info.
 * This is a bit racy..
450 451 452 453 454 455 456 457
 */
int oops_may_print(void)
{
	return pause_on_oops_flag == 0;
}

/*
 * Called when the architecture enters its oops handler, before it prints
Ingo Molnar's avatar
Ingo Molnar committed
458 459
 * anything.  If this is the first CPU to oops, and it's oopsing the first
 * time then let it proceed.
460
 *
Ingo Molnar's avatar
Ingo Molnar committed
461 462 463 464
 * This is all enabled by the pause_on_oops kernel boot option.  We do all
 * this to ensure that oopses don't scroll off the screen.  It has the
 * side-effect of preventing later-oopsing CPUs from mucking up the display,
 * too.
465
 *
Ingo Molnar's avatar
Ingo Molnar committed
466 467 468
 * It turns out that the CPU which is allowed to print ends up pausing for
 * the right duration, whereas all the other CPUs pause for twice as long:
 * once in oops_enter(), once in oops_exit().
469 470 471
 */
void oops_enter(void)
{
472
	tracing_off();
Ingo Molnar's avatar
Ingo Molnar committed
473 474
	/* can't trust the integrity of the kernel anymore: */
	debug_locks_off();
475 476 477
	do_oops_enter_exit();
}

478 479 480 481 482 483 484 485 486
/*
 * 64-bit random ID for oopses:
 */
static u64 oops_id;

static int init_oops_id(void)
{
	if (!oops_id)
		get_random_bytes(&oops_id, sizeof(oops_id));
487 488
	else
		oops_id++;
489 490 491 492 493

	return 0;
}
late_initcall(init_oops_id);

494
void print_oops_end_marker(void)
495 496
{
	init_oops_id();
497
	pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id);
498 499
}

500 501 502 503 504 505 506
/*
 * Called when the architecture exits its oops handler, after printing
 * everything.
 */
void oops_exit(void)
{
	do_oops_enter_exit();
507
	print_oops_end_marker();
508
	kmsg_dump(KMSG_DUMP_OOPS);
509
}
510

511
struct warn_args {
512
	const char *fmt;
513
	va_list args;
514
};
515

516 517
void __warn(const char *file, int line, void *caller, unsigned taint,
	    struct pt_regs *regs, struct warn_args *args)
518
{
519 520
	disable_trace_on_warning();

521
	pr_warn("------------[ cut here ]------------\n");
522 523 524 525 526 527 528 529

	if (file)
		pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
			raw_smp_processor_id(), current->pid, file, line,
			caller);
	else
		pr_warn("WARNING: CPU: %d PID: %d at %pS\n",
			raw_smp_processor_id(), current->pid, caller);
530

531 532
	if (args)
		vprintk(args->fmt, args->args);
533

Prarit Bhargava's avatar
Prarit Bhargava committed
534 535 536 537 538 539 540 541 542 543 544
	if (panic_on_warn) {
		/*
		 * This thread may hit another WARN() in the panic path.
		 * Resetting this prevents additional WARN() from panicking the
		 * system on this thread.  Other threads are blocked by the
		 * panic_mutex in panic().
		 */
		panic_on_warn = 0;
		panic("panic_on_warn set ...\n");
	}

545
	print_modules();
546 547 548 549 550 551

	if (regs)
		show_regs(regs);
	else
		dump_stack();

552
	print_oops_end_marker();
553

554 555
	/* Just a warning, don't kill lockdep. */
	add_taint(taint, LOCKDEP_STILL_OK);
556
}
557

558
#ifdef WANT_WARN_ON_SLOWPATH
559 560
void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
{
561
	struct warn_args args;
562 563 564

	args.fmt = fmt;
	va_start(args.args, fmt);
565 566
	__warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL,
	       &args);
567 568
	va_end(args.args);
}
569 570
EXPORT_SYMBOL(warn_slowpath_fmt);

571 572 573
void warn_slowpath_fmt_taint(const char *file, int line,
			     unsigned taint, const char *fmt, ...)
{
574
	struct warn_args args;
575 576 577

	args.fmt = fmt;
	va_start(args.args, fmt);
578
	__warn(file, line, __builtin_return_address(0), taint, NULL, &args);
579 580 581 582
	va_end(args.args);
}
EXPORT_SYMBOL(warn_slowpath_fmt_taint);

583 584
void warn_slowpath_null(const char *file, int line)
{
585
	__warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL, NULL);
586 587
}
EXPORT_SYMBOL(warn_slowpath_null);
588 589
#endif

590
#ifdef CONFIG_CC_STACKPROTECTOR
591

592 593 594 595
/*
 * Called when gcc's -fstack-protector feature is used, and
 * gcc detects corruption of the on-stack canary value
 */
596
__visible void __stack_chk_fail(void)
597
{
598 599
	panic("stack-protector: Kernel stack is corrupted in: %p\n",
		__builtin_return_address(0));
600 601
}
EXPORT_SYMBOL(__stack_chk_fail);
602

603
#endif
604 605 606

core_param(panic, panic_timeout, int, 0644);
core_param(pause_on_oops, pause_on_oops, int, 0644);
Prarit Bhargava's avatar
Prarit Bhargava committed
607
core_param(panic_on_warn, panic_on_warn, int, 0644);
608
core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644);
609

610 611 612 613 614 615 616 617 618
static int __init oops_setup(char *s)
{
	if (!s)
		return -EINVAL;
	if (!strcmp(s, "panic"))
		panic_on_oops = 1;
	return 0;
}
early_param("oops", oops_setup);