watchdog.c 25.2 KB
Newer Older
1 2 3 4 5
/*
 * Detect hard and soft lockups on a system
 *
 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
 *
6 7 8
 * Note: Most of this code is borrowed heavily from the original softlockup
 * detector, so thanks to Ingo for the initial implementation.
 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 10 11
 * to those contributors as well.
 */

12 13
#define pr_fmt(fmt) "NMI watchdog: " fmt

14 15 16 17 18 19
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/nmi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/sysctl.h>
20
#include <linux/smpboot.h>
21
#include <linux/sched/rt.h>
22
#include <uapi/linux/sched/types.h>
23
#include <linux/tick.h>
24
#include <linux/workqueue.h>
25
#include <linux/sched/clock.h>
26
#include <linux/sched/debug.h>
27 28

#include <asm/irq_regs.h>
29
#include <linux/kvm_para.h>
30
#include <linux/kthread.h>
31

32
/* Watchdog configuration */
33 34
static DEFINE_MUTEX(watchdog_proc_mutex);

35 36 37 38 39
int __read_mostly nmi_watchdog_enabled;

#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED |
						NMI_WATCHDOG_ENABLED;
40
#else
41
unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
42
#endif
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80

#ifdef CONFIG_HARDLOCKUP_DETECTOR
/* boot commands */
/*
 * Should we panic when a soft-lockup or hard-lockup occurs:
 */
unsigned int __read_mostly hardlockup_panic =
			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
/*
 * We may not want to enable hard lockup detection by default in all cases,
 * for example when running the kernel as a guest on a hypervisor. In these
 * cases this function can be called to disable hard lockup detection. This
 * function should only be executed once by the boot processor before the
 * kernel command line parameters are parsed, because otherwise it is not
 * possible to override this in hardlockup_panic_setup().
 */
void hardlockup_detector_disable(void)
{
	watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
}

static int __init hardlockup_panic_setup(char *str)
{
	if (!strncmp(str, "panic", 5))
		hardlockup_panic = 1;
	else if (!strncmp(str, "nopanic", 7))
		hardlockup_panic = 0;
	else if (!strncmp(str, "0", 1))
		watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
	else if (!strncmp(str, "1", 1))
		watchdog_enabled |= NMI_WATCHDOG_ENABLED;
	return 1;
}
__setup("nmi_watchdog=", hardlockup_panic_setup);

#endif

#ifdef CONFIG_SOFTLOCKUP_DETECTOR
81
int __read_mostly soft_watchdog_enabled;
82 83
#endif

84
int __read_mostly watchdog_user_enabled;
85
int __read_mostly watchdog_thresh = 10;
86

87 88
#ifdef CONFIG_SMP
int __read_mostly sysctl_softlockup_all_cpu_backtrace;
89
int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
90
#endif
91
struct cpumask watchdog_cpumask __read_mostly;
92 93
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);

94 95 96 97 98
/*
 * The 'watchdog_running' variable is set to 1 when the watchdog threads
 * are registered/started and is set to 0 when the watchdog threads are
 * unregistered/stopped, so it is an indicator whether the threads exist.
 */
99
static int __read_mostly watchdog_running;
100 101 102 103 104 105 106 107 108 109 110 111
/*
 * If a subsystem has a need to deactivate the watchdog temporarily, it
 * can use the suspend/resume interface to achieve this. The content of
 * the 'watchdog_suspended' variable reflects this state. Existing threads
 * are parked/unparked by the lockup_detector_{suspend|resume} functions
 * (see comment blocks pertaining to those functions for further details).
 *
 * 'watchdog_suspended' also prevents threads from being registered/started
 * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
 * of 'watchdog_running' cannot change while the watchdog is deactivated
 * temporarily (see related code in 'proc' handlers).
 */
112 113 114 115 116
int __read_mostly watchdog_suspended;

/*
 * These functions can be overridden if an architecture implements its
 * own hardlockup detector.
117 118 119 120
 *
 * watchdog_nmi_enable/disable can be implemented to start and stop when
 * softlockup watchdog threads start and stop. The arch must select the
 * SOFTLOCKUP_DETECTOR Kconfig.
121 122 123 124 125 126 127 128 129
 */
int __weak watchdog_nmi_enable(unsigned int cpu)
{
	return 0;
}
void __weak watchdog_nmi_disable(unsigned int cpu)
{
}

130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
/*
 * watchdog_nmi_reconfigure can be implemented to be notified after any
 * watchdog configuration change. The arch hardlockup watchdog should
 * respond to the following variables:
 * - nmi_watchdog_enabled
 * - watchdog_thresh
 * - watchdog_cpumask
 * - sysctl_hardlockup_all_cpu_backtrace
 * - hardlockup_panic
 * - watchdog_suspended
 */
void __weak watchdog_nmi_reconfigure(void)
{
}


146 147 148 149 150 151 152
#ifdef CONFIG_SOFTLOCKUP_DETECTOR

/* Helper for online, unparked cpus. */
#define for_each_watchdog_cpu(cpu) \
	for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)

atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
153

154
static u64 __read_mostly sample_period;
155 156 157 158 159 160

static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
static DEFINE_PER_CPU(bool, softlockup_touch_sync);
static DEFINE_PER_CPU(bool, soft_watchdog_warn);
161 162
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
163
static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
164
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
165
static unsigned long soft_lockup_nmi_warn;
166 167 168 169 170 171 172 173 174 175 176 177 178 179

unsigned int __read_mostly softlockup_panic =
			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;

static int __init softlockup_panic_setup(char *str)
{
	softlockup_panic = simple_strtoul(str, NULL, 0);

	return 1;
}
__setup("softlockup_panic=", softlockup_panic_setup);

static int __init nowatchdog_setup(char *str)
{
180
	watchdog_enabled = 0;
181 182 183 184 185 186
	return 1;
}
__setup("nowatchdog", nowatchdog_setup);

static int __init nosoftlockup_setup(char *str)
{
187
	watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
188 189 190
	return 1;
}
__setup("nosoftlockup", nosoftlockup_setup);
191

192 193 194 195 196 197 198 199
#ifdef CONFIG_SMP
static int __init softlockup_all_cpu_backtrace_setup(char *str)
{
	sysctl_softlockup_all_cpu_backtrace =
		!!simple_strtol(str, NULL, 0);
	return 1;
}
__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
200
#ifdef CONFIG_HARDLOCKUP_DETECTOR
201 202 203 204 205 206 207
static int __init hardlockup_all_cpu_backtrace_setup(char *str)
{
	sysctl_hardlockup_all_cpu_backtrace =
		!!simple_strtol(str, NULL, 0);
	return 1;
}
__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
208
#endif
209
#endif
210

211 212 213 214 215 216 217
/*
 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
 * lockups can have false positives under extreme conditions. So we generally
 * want a higher threshold for soft lockups than for hard lockups. So we couple
 * the thresholds with a factor: we make the soft threshold twice the amount of
 * time the hard threshold is.
 */
218
static int get_softlockup_thresh(void)
219 220 221
{
	return watchdog_thresh * 2;
}
222 223 224 225 226 227

/*
 * Returns seconds, approximately.  We don't need nanosecond
 * resolution, and we don't need to waste time with a big divide when
 * 2^30ns == 1.074s.
 */
228
static unsigned long get_timestamp(void)
229
{
230
	return running_clock() >> 30LL;  /* 2^30 ~= 10^9 */
231 232
}

233
static void set_sample_period(void)
234 235
{
	/*
236
	 * convert watchdog_thresh from seconds to ns
237 238 239 240
	 * the divide by 5 is to give hrtimer several chances (two
	 * or three with the current relation between the soft
	 * and hard thresholds) to increment before the
	 * hardlockup detector generates a warning
241
	 */
242
	sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
243 244 245 246 247
}

/* Commands for resetting the watchdog */
static void __touch_watchdog(void)
{
248
	__this_cpu_write(watchdog_touch_ts, get_timestamp());
249 250
}

251 252 253 254 255 256 257 258 259
/**
 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
 *
 * Call when the scheduler may have stalled for legitimate reasons
 * preventing the watchdog task from executing - e.g. the scheduler
 * entering idle state.  This should only be used for scheduler events.
 * Use touch_softlockup_watchdog() for everything else.
 */
void touch_softlockup_watchdog_sched(void)
260
{
261 262 263 264 265
	/*
	 * Preemption can be enabled.  It doesn't matter which CPU's timestamp
	 * gets zeroed here, so use the raw_ operation.
	 */
	raw_cpu_write(watchdog_touch_ts, 0);
266
}
267 268 269 270

void touch_softlockup_watchdog(void)
{
	touch_softlockup_watchdog_sched();
271
	wq_watchdog_touch(raw_smp_processor_id());
272
}
273
EXPORT_SYMBOL(touch_softlockup_watchdog);
274

275
void touch_all_softlockup_watchdogs(void)
276 277 278 279 280 281 282 283
{
	int cpu;

	/*
	 * this is done lockless
	 * do we care if a 0 races with a timestamp?
	 * all it means is the softlock check starts one cycle later
	 */
284
	for_each_watchdog_cpu(cpu)
285
		per_cpu(watchdog_touch_ts, cpu) = 0;
286
	wq_watchdog_touch(-1);
287 288 289 290
}

void touch_softlockup_watchdog_sync(void)
{
291 292
	__this_cpu_write(softlockup_touch_sync, true);
	__this_cpu_write(watchdog_touch_ts, 0);
293 294
}

295
static int is_softlockup(unsigned long touch_ts)
296
{
297
	unsigned long now = get_timestamp();
298

299
	if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
300 301 302 303
		/* Warn about unreasonable delays. */
		if (time_after(now, touch_ts + get_softlockup_thresh()))
			return now - touch_ts;
	}
304 305 306
	return 0;
}

307 308
/* watchdog detector functions */
bool is_hardlockup(void)
309
{
310
	unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
311

312 313 314 315 316
	if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
		return true;

	__this_cpu_write(hrtimer_interrupts_saved, hrint);
	return false;
317
}
318 319

static void watchdog_interrupt_count(void)
320
{
321
	__this_cpu_inc(hrtimer_interrupts);
322
}
323

324 325 326
static int watchdog_enable_all_cpus(void);
static void watchdog_disable_all_cpus(void);

327 328 329
/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
330
	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
331 332
	struct pt_regs *regs = get_irq_regs();
	int duration;
333
	int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
334

335 336 337
	if (atomic_read(&watchdog_park_in_progress) != 0)
		return HRTIMER_NORESTART;

338 339 340 341
	/* kick the hardlockup detector */
	watchdog_interrupt_count();

	/* kick the softlockup detector */
342
	wake_up_process(__this_cpu_read(softlockup_watchdog));
343 344

	/* .. and repeat */
345
	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
346 347

	if (touch_ts == 0) {
348
		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
349 350 351 352
			/*
			 * If the time stamp was touched atomically
			 * make sure the scheduler tick is up to date.
			 */
353
			__this_cpu_write(softlockup_touch_sync, false);
354 355
			sched_clock_tick();
		}
356 357 358

		/* Clear the guest paused flag on watchdog reset */
		kvm_check_and_clear_guest_paused();
359 360 361 362 363 364 365 366 367 368
		__touch_watchdog();
		return HRTIMER_RESTART;
	}

	/* check for a softlockup
	 * This is done by making sure a high priority task is
	 * being scheduled.  The task touches the watchdog to
	 * indicate it is getting cpu time.  If it hasn't then
	 * this is a good indication some task is hogging the cpu
	 */
369
	duration = is_softlockup(touch_ts);
370
	if (unlikely(duration)) {
371 372 373 374 375 376 377 378
		/*
		 * If a virtual machine is stopped by the host it can look to
		 * the watchdog like a soft lockup, check to see if the host
		 * stopped the vm before we issue the warning
		 */
		if (kvm_check_and_clear_guest_paused())
			return HRTIMER_RESTART;

379
		/* only warn once */
380 381 382 383 384 385 386 387 388 389 390 391 392 393
		if (__this_cpu_read(soft_watchdog_warn) == true) {
			/*
			 * When multiple processes are causing softlockups the
			 * softlockup detector only warns on the first one
			 * because the code relies on a full quiet cycle to
			 * re-arm.  The second process prevents the quiet cycle
			 * and never gets reported.  Use task pointers to detect
			 * this.
			 */
			if (__this_cpu_read(softlockup_task_ptr_saved) !=
			    current) {
				__this_cpu_write(soft_watchdog_warn, false);
				__touch_watchdog();
			}
394
			return HRTIMER_RESTART;
395
		}
396

397 398 399 400 401 402 403 404 405 406 407
		if (softlockup_all_cpu_backtrace) {
			/* Prevent multiple soft-lockup reports if one cpu is already
			 * engaged in dumping cpu back traces
			 */
			if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
				/* Someone else will report us. Let's give up */
				__this_cpu_write(soft_watchdog_warn, true);
				return HRTIMER_RESTART;
			}
		}

408
		pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
409
			smp_processor_id(), duration,
410
			current->comm, task_pid_nr(current));
411
		__this_cpu_write(softlockup_task_ptr_saved, current);
412 413 414 415 416 417 418
		print_modules();
		print_irqtrace_events(current);
		if (regs)
			show_regs(regs);
		else
			dump_stack();

419 420 421 422 423 424 425 426 427 428 429
		if (softlockup_all_cpu_backtrace) {
			/* Avoid generating two back traces for current
			 * given that one is already made above
			 */
			trigger_allbutself_cpu_backtrace();

			clear_bit(0, &soft_lockup_nmi_warn);
			/* Barrier to sync with other cpus */
			smp_mb__after_atomic();
		}

Josh Hunt's avatar
Josh Hunt committed
430
		add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
431 432
		if (softlockup_panic)
			panic("softlockup: hung tasks");
433
		__this_cpu_write(soft_watchdog_warn, true);
434
	} else
435
		__this_cpu_write(soft_watchdog_warn, false);
436 437 438 439

	return HRTIMER_RESTART;
}

440 441 442
static void watchdog_set_prio(unsigned int policy, unsigned int prio)
{
	struct sched_param param = { .sched_priority = prio };
443

444 445 446 447
	sched_setscheduler(current, policy, &param);
}

static void watchdog_enable(unsigned int cpu)
448
{
449
	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
450

451 452 453 454
	/* kick off the timer for the hardlockup detector */
	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hrtimer->function = watchdog_timer_fn;

455 456
	/* Enable the perf event */
	watchdog_nmi_enable(cpu);
457 458

	/* done here because hrtimer_start can only pin to smp_processor_id() */
459
	hrtimer_start(hrtimer, ns_to_ktime(sample_period),
460 461
		      HRTIMER_MODE_REL_PINNED);

462 463 464 465
	/* initialize timestamp */
	watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
	__touch_watchdog();
}
466

467 468
static void watchdog_disable(unsigned int cpu)
{
469
	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
470

471 472 473 474
	watchdog_set_prio(SCHED_NORMAL, 0);
	hrtimer_cancel(hrtimer);
	/* disable the perf event */
	watchdog_nmi_disable(cpu);
475 476
}

477 478 479 480 481
static void watchdog_cleanup(unsigned int cpu, bool online)
{
	watchdog_disable(cpu);
}

482 483 484 485 486 487 488 489 490
static int watchdog_should_run(unsigned int cpu)
{
	return __this_cpu_read(hrtimer_interrupts) !=
		__this_cpu_read(soft_lockup_hrtimer_cnt);
}

/*
 * The watchdog thread function - touches the timestamp.
 *
491
 * It only runs once every sample_period seconds (4 seconds by
492 493 494 495 496 497 498 499 500
 * default) to reset the softlockup timestamp. If this gets delayed
 * for more than 2*watchdog_thresh seconds then the debug-printout
 * triggers in watchdog_timer_fn().
 */
static void watchdog(unsigned int cpu)
{
	__this_cpu_write(soft_lockup_hrtimer_cnt,
			 __this_cpu_read(hrtimer_interrupts));
	__touch_watchdog();
501 502 503 504 505 506 507 508 509 510 511 512 513 514 515

	/*
	 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
	 * failure path. Check for failures that can occur asynchronously -
	 * for example, when CPUs are on-lined - and shut down the hardware
	 * perf event on each CPU accordingly.
	 *
	 * The only non-obvious place this bit can be cleared is through
	 * watchdog_nmi_enable(), so a pr_info() is placed there.  Placing a
	 * pr_info here would be too noisy as it would result in a message
	 * every few seconds if the hardlockup was disabled but the softlockup
	 * enabled.
	 */
	if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
		watchdog_nmi_disable(cpu);
516
}
517

518 519 520 521 522 523 524 525 526 527 528
static struct smp_hotplug_thread watchdog_threads = {
	.store			= &softlockup_watchdog,
	.thread_should_run	= watchdog_should_run,
	.thread_fn		= watchdog,
	.thread_comm		= "watchdog/%u",
	.setup			= watchdog_enable,
	.cleanup		= watchdog_cleanup,
	.park			= watchdog_disable,
	.unpark			= watchdog_enable,
};

529 530
/*
 * park all watchdog threads that are specified in 'watchdog_cpumask'
531 532 533 534 535 536
 *
 * This function returns an error if kthread_park() of a watchdog thread
 * fails. In this situation, the watchdog threads of some CPUs can already
 * be parked and the watchdog threads of other CPUs can still be runnable.
 * Callers are expected to handle this special condition as appropriate in
 * their context.
537 538 539
 *
 * This function may only be called in a context that is protected against
 * races with CPU hotplug - for example, via get_online_cpus().
540 541 542 543 544
 */
static int watchdog_park_threads(void)
{
	int cpu, ret = 0;

545 546
	atomic_set(&watchdog_park_in_progress, 1);

547 548 549 550 551 552
	for_each_watchdog_cpu(cpu) {
		ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
		if (ret)
			break;
	}

553 554
	atomic_set(&watchdog_park_in_progress, 0);

555 556 557 558 559
	return ret;
}

/*
 * unpark all watchdog threads that are specified in 'watchdog_cpumask'
560 561 562
 *
 * This function may only be called in a context that is protected against
 * races with CPU hotplug - for example, via get_online_cpus().
563 564 565 566 567 568 569 570 571
 */
static void watchdog_unpark_threads(void)
{
	int cpu;

	for_each_watchdog_cpu(cpu)
		kthread_unpark(per_cpu(softlockup_watchdog, cpu));
}

572
static int update_watchdog_all_cpus(void)
573
{
574 575 576 577 578 579
	int ret;

	ret = watchdog_park_threads();
	if (ret)
		return ret;

580
	watchdog_unpark_threads();
581 582

	return 0;
583 584
}

585
static int watchdog_enable_all_cpus(void)
586
{
587
	int err = 0;
588

589
	if (!watchdog_running) {
590 591
		err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
							     &watchdog_cpumask);
592 593
		if (err)
			pr_err("Failed to create watchdog threads, disabled\n");
594
		else
595
			watchdog_running = 1;
596 597 598 599 600
	} else {
		/*
		 * Enable/disable the lockup detectors or
		 * change the sample period 'on the fly'.
		 */
601 602 603 604 605 606
		err = update_watchdog_all_cpus();

		if (err) {
			watchdog_disable_all_cpus();
			pr_err("Failed to update lockup detectors, disabled\n");
		}
607
	}
608

609 610 611
	if (err)
		watchdog_enabled = 0;

612
	return err;
613 614 615 616
}

static void watchdog_disable_all_cpus(void)
{
617 618
	if (watchdog_running) {
		watchdog_running = 0;
619
		smpboot_unregister_percpu_thread(&watchdog_threads);
620
	}
621 622
}

623 624 625 626 627 628 629 630
#ifdef CONFIG_SYSCTL
static int watchdog_update_cpus(void)
{
	return smpboot_update_cpumask_percpu_thread(
		    &watchdog_threads, &watchdog_cpumask);
}
#endif

631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
#else /* SOFTLOCKUP */
static int watchdog_park_threads(void)
{
	return 0;
}

static void watchdog_unpark_threads(void)
{
}

static int watchdog_enable_all_cpus(void)
{
	return 0;
}

static void watchdog_disable_all_cpus(void)
{
}

650 651 652 653 654 655 656
#ifdef CONFIG_SYSCTL
static int watchdog_update_cpus(void)
{
	return 0;
}
#endif

657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
static void set_sample_period(void)
{
}
#endif /* SOFTLOCKUP */

/*
 * Suspend the hard and soft lockup detector by parking the watchdog threads.
 */
int lockup_detector_suspend(void)
{
	int ret = 0;

	get_online_cpus();
	mutex_lock(&watchdog_proc_mutex);
	/*
	 * Multiple suspend requests can be active in parallel (counted by
	 * the 'watchdog_suspended' variable). If the watchdog threads are
	 * running, the first caller takes care that they will be parked.
	 * The state of 'watchdog_running' cannot change while a suspend
	 * request is active (see related code in 'proc' handlers).
	 */
	if (watchdog_running && !watchdog_suspended)
		ret = watchdog_park_threads();

	if (ret == 0)
		watchdog_suspended++;
	else {
		watchdog_disable_all_cpus();
		pr_err("Failed to suspend lockup detectors, disabled\n");
		watchdog_enabled = 0;
	}

689 690
	watchdog_nmi_reconfigure();

691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
	mutex_unlock(&watchdog_proc_mutex);

	return ret;
}

/*
 * Resume the hard and soft lockup detector by unparking the watchdog threads.
 */
void lockup_detector_resume(void)
{
	mutex_lock(&watchdog_proc_mutex);

	watchdog_suspended--;
	/*
	 * The watchdog threads are unparked if they were previously running
	 * and if there is no more active suspend request.
	 */
	if (watchdog_running && !watchdog_suspended)
		watchdog_unpark_threads();

711 712
	watchdog_nmi_reconfigure();

713 714 715 716
	mutex_unlock(&watchdog_proc_mutex);
	put_online_cpus();
}

717 718
#ifdef CONFIG_SYSCTL

719
/*
720 721 722 723 724 725 726 727 728 729 730 731 732 733
 * Update the run state of the lockup detectors.
 */
static int proc_watchdog_update(void)
{
	int err = 0;

	/*
	 * Watchdog threads won't be started if they are already active.
	 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
	 * care of this. If those threads are already active, the sample
	 * period will be updated and the lockup detectors will be enabled
	 * or disabled 'on the fly'.
	 */
	if (watchdog_enabled && watchdog_thresh)
734
		err = watchdog_enable_all_cpus();
735 736 737
	else
		watchdog_disable_all_cpus();

738 739
	watchdog_nmi_reconfigure();

740 741 742 743
	return err;

}

744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
/*
 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
 *
 * caller             | table->data points to | 'which' contains the flag(s)
 * -------------------|-----------------------|-----------------------------
 * proc_watchdog      | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
 *                    |                       | with SOFT_WATCHDOG_ENABLED
 * -------------------|-----------------------|-----------------------------
 * proc_nmi_watchdog  | nmi_watchdog_enabled  | NMI_WATCHDOG_ENABLED
 * -------------------|-----------------------|-----------------------------
 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
 */
static int proc_watchdog_common(int which, struct ctl_table *table, int write,
				void __user *buffer, size_t *lenp, loff_t *ppos)
{
	int err, old, new;
	int *watchdog_param = (int *)table->data;

762
	get_online_cpus();
763 764
	mutex_lock(&watchdog_proc_mutex);

765 766 767 768 769 770
	if (watchdog_suspended) {
		/* no parameter changes allowed while watchdog is suspended */
		err = -EAGAIN;
		goto out;
	}

771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
	/*
	 * If the parameter is being read return the state of the corresponding
	 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
	 * run state of the lockup detectors.
	 */
	if (!write) {
		*watchdog_param = (watchdog_enabled & which) != 0;
		err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
	} else {
		err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
		if (err)
			goto out;

		/*
		 * There is a race window between fetching the current value
		 * from 'watchdog_enabled' and storing the new value. During
		 * this race window, watchdog_nmi_enable() can sneak in and
		 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
		 * The 'cmpxchg' detects this race and the loop retries.
		 */
		do {
			old = watchdog_enabled;
			/*
			 * If the parameter value is not zero set the
			 * corresponding bit(s), else clear it(them).
			 */
			if (*watchdog_param)
				new = old | which;
			else
				new = old & ~which;
		} while (cmpxchg(&watchdog_enabled, old, new) != old);

		/*
804 805 806 807 808
		 * Update the run state of the lockup detectors. There is _no_
		 * need to check the value returned by proc_watchdog_update()
		 * and to restore the previous value of 'watchdog_enabled' as
		 * both lockup detectors are disabled if proc_watchdog_update()
		 * returns an error.
809
		 */
810 811 812
		if (old == new)
			goto out;

813 814 815 816
		err = proc_watchdog_update();
	}
out:
	mutex_unlock(&watchdog_proc_mutex);
817
	put_online_cpus();
818 819 820
	return err;
}

821 822 823 824 825 826 827 828 829 830 831 832
/*
 * /proc/sys/kernel/watchdog
 */
int proc_watchdog(struct ctl_table *table, int write,
		  void __user *buffer, size_t *lenp, loff_t *ppos)
{
	return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
				    table, write, buffer, lenp, ppos);
}

/*
 * /proc/sys/kernel/nmi_watchdog
833
 */
834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849
int proc_nmi_watchdog(struct ctl_table *table, int write,
		      void __user *buffer, size_t *lenp, loff_t *ppos)
{
	return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
				    table, write, buffer, lenp, ppos);
}

/*
 * /proc/sys/kernel/soft_watchdog
 */
int proc_soft_watchdog(struct ctl_table *table, int write,
			void __user *buffer, size_t *lenp, loff_t *ppos)
{
	return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
				    table, write, buffer, lenp, ppos);
}
850

851 852 853 854 855
/*
 * /proc/sys/kernel/watchdog_thresh
 */
int proc_watchdog_thresh(struct ctl_table *table, int write,
			 void __user *buffer, size_t *lenp, loff_t *ppos)
856
{
857
	int err, old, new;
858

859
	get_online_cpus();
860
	mutex_lock(&watchdog_proc_mutex);
861

862 863 864 865 866 867
	if (watchdog_suspended) {
		/* no parameter changes allowed while watchdog is suspended */
		err = -EAGAIN;
		goto out;
	}

868
	old = ACCESS_ONCE(watchdog_thresh);
869
	err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
870

871
	if (err || !write)
872
		goto out;
873

874
	/*
875
	 * Update the sample period. Restore on failure.
876
	 */
877 878 879 880
	new = ACCESS_ONCE(watchdog_thresh);
	if (old == new)
		goto out;

881 882
	set_sample_period();
	err = proc_watchdog_update();
883
	if (err) {
884
		watchdog_thresh = old;
885 886
		set_sample_period();
	}
887 888
out:
	mutex_unlock(&watchdog_proc_mutex);
889
	put_online_cpus();
890
	return err;
891
}
892 893 894 895 896 897 898 899 900 901 902 903

/*
 * The cpumask is the mask of possible cpus that the watchdog can run
 * on, not the mask of cpus it is actually running on.  This allows the
 * user to specify a mask that will include cpus that have not yet
 * been brought online, if desired.
 */
int proc_watchdog_cpumask(struct ctl_table *table, int write,
			  void __user *buffer, size_t *lenp, loff_t *ppos)
{
	int err;

904
	get_online_cpus();
905
	mutex_lock(&watchdog_proc_mutex);
906 907 908 909 910 911 912

	if (watchdog_suspended) {
		/* no parameter changes allowed while watchdog is suspended */
		err = -EAGAIN;
		goto out;
	}

913 914 915 916 917 918 919 920 921 922 923 924
	err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
	if (!err && write) {
		/* Remove impossible cpus to keep sysctl output cleaner. */
		cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
			    cpu_possible_mask);

		if (watchdog_running) {
			/*
			 * Failure would be due to being unable to allocate
			 * a temporary cpumask, so we are likely not in a
			 * position to do much else to make things better.
			 */
925
			if (watchdog_update_cpus() != 0)
926 927
				pr_err("cpumask update failed\n");
		}
928 929

		watchdog_nmi_reconfigure();
930
	}
931
out:
932
	mutex_unlock(&watchdog_proc_mutex);
933
	put_online_cpus();
934 935 936
	return err;
}

937 938
#endif /* CONFIG_SYSCTL */

939
void __init lockup_detector_init(void)
940
{
941
	set_sample_period();
942

943 944
#ifdef CONFIG_NO_HZ_FULL
	if (tick_nohz_full_enabled()) {
945 946
		pr_info("Disabling watchdog on nohz_full cores by default\n");
		cpumask_copy(&watchdog_cpumask, housekeeping_mask);
947 948 949 950 951 952
	} else
		cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
#else
	cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
#endif

953
	if (watchdog_enabled)
954
		watchdog_enable_all_cpus();
955
}