watchdog.c 13.9 KB
Newer Older
1 2 3 4 5
/*
 * Detect hard and soft lockups on a system
 *
 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
 *
6 7 8
 * Note: Most of this code is borrowed heavily from the original softlockup
 * detector, so thanks to Ingo for the initial implementation.
 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 10 11
 * to those contributors as well.
 */

12 13
#define pr_fmt(fmt) "NMI watchdog: " fmt

14 15 16 17 18 19 20 21 22 23 24
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/nmi.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/lockdep.h>
#include <linux/notifier.h>
#include <linux/module.h>
#include <linux/sysctl.h>
25
#include <linux/smpboot.h>
26 27

#include <asm/irq_regs.h>
28
#include <linux/kvm_para.h>
29 30
#include <linux/perf_event.h>

31
int watchdog_enabled = 1;
32
int __read_mostly watchdog_thresh = 10;
33
static int __read_mostly watchdog_disabled;
34
static u64 __read_mostly sample_period;
35 36 37 38 39 40

static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
static DEFINE_PER_CPU(bool, softlockup_touch_sync);
static DEFINE_PER_CPU(bool, soft_watchdog_warn);
41 42
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
43
#ifdef CONFIG_HARDLOCKUP_DETECTOR
44 45
static DEFINE_PER_CPU(bool, hard_watchdog_warn);
static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
46 47 48 49 50 51 52 53
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
#endif

/* boot commands */
/*
 * Should we panic when a soft-lockup or hard-lockup occurs:
 */
54
#ifdef CONFIG_HARDLOCKUP_DETECTOR
55 56
static int hardlockup_panic =
			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
57 58 59 60 61

static int __init hardlockup_panic_setup(char *str)
{
	if (!strncmp(str, "panic", 5))
		hardlockup_panic = 1;
62 63
	else if (!strncmp(str, "nopanic", 7))
		hardlockup_panic = 0;
64
	else if (!strncmp(str, "0", 1))
65
		watchdog_enabled = 0;
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
	return 1;
}
__setup("nmi_watchdog=", hardlockup_panic_setup);
#endif

unsigned int __read_mostly softlockup_panic =
			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;

static int __init softlockup_panic_setup(char *str)
{
	softlockup_panic = simple_strtoul(str, NULL, 0);

	return 1;
}
__setup("softlockup_panic=", softlockup_panic_setup);

static int __init nowatchdog_setup(char *str)
{
84
	watchdog_enabled = 0;
85 86 87 88 89 90 91
	return 1;
}
__setup("nowatchdog", nowatchdog_setup);

/* deprecated */
static int __init nosoftlockup_setup(char *str)
{
92
	watchdog_enabled = 0;
93 94 95 96 97
	return 1;
}
__setup("nosoftlockup", nosoftlockup_setup);
/*  */

98 99 100 101 102 103 104
/*
 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
 * lockups can have false positives under extreme conditions. So we generally
 * want a higher threshold for soft lockups than for hard lockups. So we couple
 * the thresholds with a factor: we make the soft threshold twice the amount of
 * time the hard threshold is.
 */
105
static int get_softlockup_thresh(void)
106 107 108
{
	return watchdog_thresh * 2;
}
109 110 111 112 113 114 115 116 117 118 119

/*
 * Returns seconds, approximately.  We don't need nanosecond
 * resolution, and we don't need to waste time with a big divide when
 * 2^30ns == 1.074s.
 */
static unsigned long get_timestamp(int this_cpu)
{
	return cpu_clock(this_cpu) >> 30LL;  /* 2^30 ~= 10^9 */
}

120
static void set_sample_period(void)
121 122
{
	/*
123
	 * convert watchdog_thresh from seconds to ns
124 125 126 127
	 * the divide by 5 is to give hrtimer several chances (two
	 * or three with the current relation between the soft
	 * and hard thresholds) to increment before the
	 * hardlockup detector generates a warning
128
	 */
129
	sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
130 131 132 133 134
}

/* Commands for resetting the watchdog */
static void __touch_watchdog(void)
{
135
	int this_cpu = smp_processor_id();
136

137
	__this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
138 139
}

140
void touch_softlockup_watchdog(void)
141
{
142
	__this_cpu_write(watchdog_touch_ts, 0);
143
}
144
EXPORT_SYMBOL(touch_softlockup_watchdog);
145

146
void touch_all_softlockup_watchdogs(void)
147 148 149 150 151 152 153 154 155 156 157 158
{
	int cpu;

	/*
	 * this is done lockless
	 * do we care if a 0 races with a timestamp?
	 * all it means is the softlock check starts one cycle later
	 */
	for_each_online_cpu(cpu)
		per_cpu(watchdog_touch_ts, cpu) = 0;
}

159
#ifdef CONFIG_HARDLOCKUP_DETECTOR
160 161
void touch_nmi_watchdog(void)
{
162 163 164 165 166 167 168 169
	if (watchdog_enabled) {
		unsigned cpu;

		for_each_present_cpu(cpu) {
			if (per_cpu(watchdog_nmi_touch, cpu) != true)
				per_cpu(watchdog_nmi_touch, cpu) = true;
		}
	}
170
	touch_softlockup_watchdog();
171 172 173
}
EXPORT_SYMBOL(touch_nmi_watchdog);

174 175
#endif

176 177 178 179 180 181
void touch_softlockup_watchdog_sync(void)
{
	__raw_get_cpu_var(softlockup_touch_sync) = true;
	__raw_get_cpu_var(watchdog_touch_ts) = 0;
}

182
#ifdef CONFIG_HARDLOCKUP_DETECTOR
183
/* watchdog detector functions */
184
static int is_hardlockup(void)
185
{
186
	unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
187

188
	if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
189 190
		return 1;

191
	__this_cpu_write(hrtimer_interrupts_saved, hrint);
192 193 194 195
	return 0;
}
#endif

196
static int is_softlockup(unsigned long touch_ts)
197
{
198
	unsigned long now = get_timestamp(smp_processor_id());
199 200

	/* Warn about unreasonable delays: */
201
	if (time_after(now, touch_ts + get_softlockup_thresh()))
202 203 204 205 206
		return now - touch_ts;

	return 0;
}

207
#ifdef CONFIG_HARDLOCKUP_DETECTOR
208

209 210 211 212 213 214 215 216 217
static struct perf_event_attr wd_hw_attr = {
	.type		= PERF_TYPE_HARDWARE,
	.config		= PERF_COUNT_HW_CPU_CYCLES,
	.size		= sizeof(struct perf_event_attr),
	.pinned		= 1,
	.disabled	= 1,
};

/* Callback function for perf event subsystem */
218
static void watchdog_overflow_callback(struct perf_event *event,
219 220 221
		 struct perf_sample_data *data,
		 struct pt_regs *regs)
{
222 223 224
	/* Ensure the watchdog never gets throttled */
	event->hw.interrupts = 0;

225 226
	if (__this_cpu_read(watchdog_nmi_touch) == true) {
		__this_cpu_write(watchdog_nmi_touch, false);
227 228 229 230 231 232 233 234 235
		return;
	}

	/* check for a hardlockup
	 * This is done by making sure our timer interrupt
	 * is incrementing.  The timer interrupt should have
	 * fired multiple times before we overflow'd.  If it hasn't
	 * then this is a good indication the cpu is stuck
	 */
236 237 238
	if (is_hardlockup()) {
		int this_cpu = smp_processor_id();

239
		/* only print hardlockups once */
240
		if (__this_cpu_read(hard_watchdog_warn) == true)
241 242 243 244 245 246 247
			return;

		if (hardlockup_panic)
			panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
		else
			WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);

248
		__this_cpu_write(hard_watchdog_warn, true);
249 250 251
		return;
	}

252
	__this_cpu_write(hard_watchdog_warn, false);
253 254
	return;
}
255 256
#endif /* CONFIG_HARDLOCKUP_DETECTOR */

257 258
static void watchdog_interrupt_count(void)
{
259
	__this_cpu_inc(hrtimer_interrupts);
260
}
261 262 263

static int watchdog_nmi_enable(unsigned int cpu);
static void watchdog_nmi_disable(unsigned int cpu);
264 265 266 267

/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
268
	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
269 270 271 272 273 274 275
	struct pt_regs *regs = get_irq_regs();
	int duration;

	/* kick the hardlockup detector */
	watchdog_interrupt_count();

	/* kick the softlockup detector */
276
	wake_up_process(__this_cpu_read(softlockup_watchdog));
277 278

	/* .. and repeat */
279
	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
280 281

	if (touch_ts == 0) {
282
		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
283 284 285 286
			/*
			 * If the time stamp was touched atomically
			 * make sure the scheduler tick is up to date.
			 */
287
			__this_cpu_write(softlockup_touch_sync, false);
288 289
			sched_clock_tick();
		}
290 291 292

		/* Clear the guest paused flag on watchdog reset */
		kvm_check_and_clear_guest_paused();
293 294 295 296 297 298 299 300 301 302
		__touch_watchdog();
		return HRTIMER_RESTART;
	}

	/* check for a softlockup
	 * This is done by making sure a high priority task is
	 * being scheduled.  The task touches the watchdog to
	 * indicate it is getting cpu time.  If it hasn't then
	 * this is a good indication some task is hogging the cpu
	 */
303
	duration = is_softlockup(touch_ts);
304
	if (unlikely(duration)) {
305 306 307 308 309 310 311 312
		/*
		 * If a virtual machine is stopped by the host it can look to
		 * the watchdog like a soft lockup, check to see if the host
		 * stopped the vm before we issue the warning
		 */
		if (kvm_check_and_clear_guest_paused())
			return HRTIMER_RESTART;

313
		/* only warn once */
314
		if (__this_cpu_read(soft_watchdog_warn) == true)
315 316
			return HRTIMER_RESTART;

317
		printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
318
			smp_processor_id(), duration,
319 320 321 322 323 324 325 326 327 328
			current->comm, task_pid_nr(current));
		print_modules();
		print_irqtrace_events(current);
		if (regs)
			show_regs(regs);
		else
			dump_stack();

		if (softlockup_panic)
			panic("softlockup: hung tasks");
329
		__this_cpu_write(soft_watchdog_warn, true);
330
	} else
331
		__this_cpu_write(soft_watchdog_warn, false);
332 333 334 335

	return HRTIMER_RESTART;
}

336 337 338
static void watchdog_set_prio(unsigned int policy, unsigned int prio)
{
	struct sched_param param = { .sched_priority = prio };
339

340 341 342 343
	sched_setscheduler(current, policy, &param);
}

static void watchdog_enable(unsigned int cpu)
344
{
345
	struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
346

347 348 349 350
	/* kick off the timer for the hardlockup detector */
	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hrtimer->function = watchdog_timer_fn;

351 352 353 354 355 356 357
	if (!watchdog_enabled) {
		kthread_park(current);
		return;
	}

	/* Enable the perf event */
	watchdog_nmi_enable(cpu);
358 359

	/* done here because hrtimer_start can only pin to smp_processor_id() */
360
	hrtimer_start(hrtimer, ns_to_ktime(sample_period),
361 362
		      HRTIMER_MODE_REL_PINNED);

363 364 365 366
	/* initialize timestamp */
	watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
	__touch_watchdog();
}
367

368 369 370
static void watchdog_disable(unsigned int cpu)
{
	struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
371

372 373 374 375
	watchdog_set_prio(SCHED_NORMAL, 0);
	hrtimer_cancel(hrtimer);
	/* disable the perf event */
	watchdog_nmi_disable(cpu);
376 377
}

378 379 380 381 382 383 384 385 386
static int watchdog_should_run(unsigned int cpu)
{
	return __this_cpu_read(hrtimer_interrupts) !=
		__this_cpu_read(soft_lockup_hrtimer_cnt);
}

/*
 * The watchdog thread function - touches the timestamp.
 *
387
 * It only runs once every sample_period seconds (4 seconds by
388 389 390 391 392 393 394 395 396 397
 * default) to reset the softlockup timestamp. If this gets delayed
 * for more than 2*watchdog_thresh seconds then the debug-printout
 * triggers in watchdog_timer_fn().
 */
static void watchdog(unsigned int cpu)
{
	__this_cpu_write(soft_lockup_hrtimer_cnt,
			 __this_cpu_read(hrtimer_interrupts));
	__touch_watchdog();
}
398

399
#ifdef CONFIG_HARDLOCKUP_DETECTOR
400 401 402 403 404 405 406
/*
 * People like the simple clean cpu node info on boot.
 * Reduce the watchdog noise by only printing messages
 * that are different from what cpu0 displayed.
 */
static unsigned long cpu0_err;

407
static int watchdog_nmi_enable(unsigned int cpu)
408 409 410 411 412 413 414 415 416 417 418 419 420
{
	struct perf_event_attr *wd_attr;
	struct perf_event *event = per_cpu(watchdog_ev, cpu);

	/* is it already setup and enabled? */
	if (event && event->state > PERF_EVENT_STATE_OFF)
		goto out;

	/* it is setup but not enabled */
	if (event != NULL)
		goto out_enable;

	wd_attr = &wd_hw_attr;
421
	wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
422 423

	/* Try to register using hardware perf events */
424
	event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
425 426 427 428 429

	/* save cpu0 error for future comparision */
	if (cpu == 0 && IS_ERR(event))
		cpu0_err = PTR_ERR(event);

430
	if (!IS_ERR(event)) {
431 432 433
		/* only print for cpu0 or different than cpu0 */
		if (cpu == 0 || cpu0_err)
			pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
434 435 436
		goto out_save;
	}

437 438 439
	/* skip displaying the same error again */
	if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
		return PTR_ERR(event);
440 441 442

	/* vary the KERN level based on the returned errno */
	if (PTR_ERR(event) == -EOPNOTSUPP)
443
		pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
444
	else if (PTR_ERR(event) == -ENOENT)
445 446
		pr_warning("disabled (cpu%i): hardware events not enabled\n",
			 cpu);
447
	else
448 449
		pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
			cpu, PTR_ERR(event));
450
	return PTR_ERR(event);
451 452 453 454 455 456 457 458 459 460

	/* success path */
out_save:
	per_cpu(watchdog_ev, cpu) = event;
out_enable:
	perf_event_enable(per_cpu(watchdog_ev, cpu));
out:
	return 0;
}

461
static void watchdog_nmi_disable(unsigned int cpu)
462 463 464 465 466 467 468 469 470 471 472 473 474
{
	struct perf_event *event = per_cpu(watchdog_ev, cpu);

	if (event) {
		perf_event_disable(event);
		per_cpu(watchdog_ev, cpu) = NULL;

		/* should be in cleanup, but blocks oprofile */
		perf_event_release_kernel(event);
	}
	return;
}
#else
475 476
static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
static void watchdog_nmi_disable(unsigned int cpu) { return; }
477
#endif /* CONFIG_HARDLOCKUP_DETECTOR */
478 479

/* prepare/enable/disable routines */
480 481
/* sysctl functions */
#ifdef CONFIG_SYSCTL
482 483
static void watchdog_enable_all_cpus(void)
{
484
	unsigned int cpu;
485

486 487 488 489 490
	if (watchdog_disabled) {
		watchdog_disabled = 0;
		for_each_online_cpu(cpu)
			kthread_unpark(per_cpu(softlockup_watchdog, cpu));
	}
491 492 493 494
}

static void watchdog_disable_all_cpus(void)
{
495
	unsigned int cpu;
496

497 498 499 500 501
	if (!watchdog_disabled) {
		watchdog_disabled = 1;
		for_each_online_cpu(cpu)
			kthread_park(per_cpu(softlockup_watchdog, cpu));
	}
502 503 504
}

/*
505
 * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
506 507
 */

508 509
int proc_dowatchdog(struct ctl_table *table, int write,
		    void __user *buffer, size_t *lenp, loff_t *ppos)
510
{
511
	int ret;
512

513 514 515
	if (watchdog_disabled < 0)
		return -ENODEV;

516
	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
517
	if (ret || !write)
518
		return ret;
519

520
	set_sample_period();
521
	if (watchdog_enabled && watchdog_thresh)
522 523 524 525 526
		watchdog_enable_all_cpus();
	else
		watchdog_disable_all_cpus();

	return ret;
527 528 529
}
#endif /* CONFIG_SYSCTL */

530 531 532 533 534 535 536 537
static struct smp_hotplug_thread watchdog_threads = {
	.store			= &softlockup_watchdog,
	.thread_should_run	= watchdog_should_run,
	.thread_fn		= watchdog,
	.thread_comm		= "watchdog/%u",
	.setup			= watchdog_enable,
	.park			= watchdog_disable,
	.unpark			= watchdog_enable,
538 539
};

540
void __init lockup_detector_init(void)
541
{
542
	set_sample_period();
543 544 545 546
	if (smpboot_register_percpu_thread(&watchdog_threads)) {
		pr_err("Failed to create watchdog threads, disabled\n");
		watchdog_disabled = -ENODEV;
	}
547
}