locktorture.c 28.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Module-based torture test facility for locking
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
 *
 * Copyright (C) IBM Corporation, 2014
 *
20 21
 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
 *          Davidlohr Bueso <dave@stgolabs.net>
22 23 24 25 26
 *	Based on kernel/rcu/torture.c.
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kthread.h>
27
#include <linux/sched/rt.h>
28
#include <linux/spinlock.h>
29
#include <linux/rwlock.h>
30
#include <linux/mutex.h>
31
#include <linux/rwsem.h>
32 33 34
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
35
#include <uapi/linux/sched/types.h>
36
#include <linux/rtmutex.h>
37 38 39 40
#include <linux/atomic.h>
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/slab.h>
41
#include <linux/percpu-rwsem.h>
42 43 44 45 46 47 48
#include <linux/torture.h>

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");

torture_param(int, nwriters_stress, -1,
	     "Number of write-locking stress-test threads");
49 50
torture_param(int, nreaders_stress, -1,
	     "Number of read-locking stress-test threads");
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0,
	     "Time between CPU hotplugs (s), 0=disable");
torture_param(int, shuffle_interval, 3,
	     "Number of jiffies between shuffles, 0=disable");
torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
torture_param(int, stat_interval, 60,
	     "Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
torture_param(bool, verbose, true,
	     "Enable verbose debugging printk()s");

static char *torture_type = "spin_lock";
module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type,
66
		 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
67 68 69

static struct task_struct *stats_task;
static struct task_struct **writer_tasks;
70
static struct task_struct **reader_tasks;
71 72

static bool lock_is_write_held;
73
static bool lock_is_read_held;
74

75 76 77
struct lock_stress_stats {
	long n_lock_fail;
	long n_lock_acquired;
78 79
};

80
int torture_runnable = IS_ENABLED(MODULE);
81 82
module_param(torture_runnable, int, 0444);
MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
83 84 85 86 87 88 89 90 91 92 93

/* Forward reference. */
static void lock_torture_cleanup(void);

/*
 * Operations vector for selecting different types of tests.
 */
struct lock_torture_ops {
	void (*init)(void);
	int (*writelock)(void);
	void (*write_delay)(struct torture_random_state *trsp);
94
	void (*task_boost)(struct torture_random_state *trsp);
95
	void (*writeunlock)(void);
96 97 98
	int (*readlock)(void);
	void (*read_delay)(struct torture_random_state *trsp);
	void (*readunlock)(void);
99 100

	unsigned long flags; /* for irq spinlocks */
101 102 103
	const char *name;
};

104 105 106 107 108 109 110 111 112 113 114 115
struct lock_torture_cxt {
	int nrealwriters_stress;
	int nrealreaders_stress;
	bool debug_lock;
	atomic_t n_lock_torture_errors;
	struct lock_torture_ops *cur_ops;
	struct lock_stress_stats *lwsa; /* writer statistics */
	struct lock_stress_stats *lrsa; /* reader statistics */
};
static struct lock_torture_cxt cxt = { 0, 0, false,
				       ATOMIC_INIT(0),
				       NULL, NULL};
116 117 118 119
/*
 * Definitions for lock torture testing.
 */

120 121 122 123 124 125 126
static int torture_lock_busted_write_lock(void)
{
	return 0;  /* BUGGY, do not use in real life!!! */
}

static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
{
127
	const unsigned long longdelay_ms = 100;
128 129 130

	/* We want a long delay occasionally to force massive contention.  */
	if (!(torture_random(trsp) %
131 132
	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
		mdelay(longdelay_ms);
133
#ifdef CONFIG_PREEMPT
134
	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
135 136 137 138 139 140 141 142 143
		preempt_schedule();  /* Allow test to be preempted. */
#endif
}

static void torture_lock_busted_write_unlock(void)
{
	  /* BUGGY, do not use in real life!!! */
}

144 145 146 147 148
static void torture_boost_dummy(struct torture_random_state *trsp)
{
	/* Only rtmutexes care about priority */
}

149 150 151
static struct lock_torture_ops lock_busted_ops = {
	.writelock	= torture_lock_busted_write_lock,
	.write_delay	= torture_lock_busted_write_delay,
152
	.task_boost     = torture_boost_dummy,
153
	.writeunlock	= torture_lock_busted_write_unlock,
154 155 156
	.readlock       = NULL,
	.read_delay     = NULL,
	.readunlock     = NULL,
157 158 159
	.name		= "lock_busted"
};

160 161 162 163 164 165 166 167 168 169 170
static DEFINE_SPINLOCK(torture_spinlock);

static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
{
	spin_lock(&torture_spinlock);
	return 0;
}

static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
{
	const unsigned long shortdelay_us = 2;
171
	const unsigned long longdelay_ms = 100;
172 173 174 175 176

	/* We want a short delay mostly to emulate likely code, and
	 * we want a long delay occasionally to force massive contention.
	 */
	if (!(torture_random(trsp) %
177 178
	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
		mdelay(longdelay_ms);
179
	if (!(torture_random(trsp) %
180
	      (cxt.nrealwriters_stress * 2 * shortdelay_us)))
181 182
		udelay(shortdelay_us);
#ifdef CONFIG_PREEMPT
183
	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
184 185 186 187 188 189 190 191 192 193 194 195
		preempt_schedule();  /* Allow test to be preempted. */
#endif
}

static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
{
	spin_unlock(&torture_spinlock);
}

static struct lock_torture_ops spin_lock_ops = {
	.writelock	= torture_spin_lock_write_lock,
	.write_delay	= torture_spin_lock_write_delay,
196
	.task_boost     = torture_boost_dummy,
197
	.writeunlock	= torture_spin_lock_write_unlock,
198 199 200
	.readlock       = NULL,
	.read_delay     = NULL,
	.readunlock     = NULL,
201 202 203 204
	.name		= "spin_lock"
};

static int torture_spin_lock_write_lock_irq(void)
205
__acquires(torture_spinlock)
206 207 208 209
{
	unsigned long flags;

	spin_lock_irqsave(&torture_spinlock, flags);
210
	cxt.cur_ops->flags = flags;
211 212 213 214 215 216
	return 0;
}

static void torture_lock_spin_write_unlock_irq(void)
__releases(torture_spinlock)
{
217
	spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
218 219 220 221 222
}

static struct lock_torture_ops spin_lock_irq_ops = {
	.writelock	= torture_spin_lock_write_lock_irq,
	.write_delay	= torture_spin_lock_write_delay,
223
	.task_boost     = torture_boost_dummy,
224
	.writeunlock	= torture_lock_spin_write_unlock_irq,
225 226 227
	.readlock       = NULL,
	.read_delay     = NULL,
	.readunlock     = NULL,
228 229 230
	.name		= "spin_lock_irq"
};

231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
static DEFINE_RWLOCK(torture_rwlock);

static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
{
	write_lock(&torture_rwlock);
	return 0;
}

static void torture_rwlock_write_delay(struct torture_random_state *trsp)
{
	const unsigned long shortdelay_us = 2;
	const unsigned long longdelay_ms = 100;

	/* We want a short delay mostly to emulate likely code, and
	 * we want a long delay occasionally to force massive contention.
	 */
	if (!(torture_random(trsp) %
	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
		mdelay(longdelay_ms);
	else
		udelay(shortdelay_us);
}

static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
{
	write_unlock(&torture_rwlock);
}

static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
{
	read_lock(&torture_rwlock);
	return 0;
}

static void torture_rwlock_read_delay(struct torture_random_state *trsp)
{
	const unsigned long shortdelay_us = 10;
	const unsigned long longdelay_ms = 100;

	/* We want a short delay mostly to emulate likely code, and
	 * we want a long delay occasionally to force massive contention.
	 */
	if (!(torture_random(trsp) %
	      (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
		mdelay(longdelay_ms);
	else
		udelay(shortdelay_us);
}

static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
{
	read_unlock(&torture_rwlock);
}

static struct lock_torture_ops rw_lock_ops = {
	.writelock	= torture_rwlock_write_lock,
	.write_delay	= torture_rwlock_write_delay,
288
	.task_boost     = torture_boost_dummy,
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
	.writeunlock	= torture_rwlock_write_unlock,
	.readlock       = torture_rwlock_read_lock,
	.read_delay     = torture_rwlock_read_delay,
	.readunlock     = torture_rwlock_read_unlock,
	.name		= "rw_lock"
};

static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
{
	unsigned long flags;

	write_lock_irqsave(&torture_rwlock, flags);
	cxt.cur_ops->flags = flags;
	return 0;
}

static void torture_rwlock_write_unlock_irq(void)
__releases(torture_rwlock)
{
	write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
}

static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
{
	unsigned long flags;

	read_lock_irqsave(&torture_rwlock, flags);
	cxt.cur_ops->flags = flags;
	return 0;
}

static void torture_rwlock_read_unlock_irq(void)
__releases(torture_rwlock)
{
323
	read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
324 325 326 327 328
}

static struct lock_torture_ops rw_lock_irq_ops = {
	.writelock	= torture_rwlock_write_lock_irq,
	.write_delay	= torture_rwlock_write_delay,
329
	.task_boost     = torture_boost_dummy,
330 331 332 333 334 335 336
	.writeunlock	= torture_rwlock_write_unlock_irq,
	.readlock       = torture_rwlock_read_lock_irq,
	.read_delay     = torture_rwlock_read_delay,
	.readunlock     = torture_rwlock_read_unlock_irq,
	.name		= "rw_lock_irq"
};

337 338 339 340 341 342 343 344 345 346 347 348 349 350
static DEFINE_MUTEX(torture_mutex);

static int torture_mutex_lock(void) __acquires(torture_mutex)
{
	mutex_lock(&torture_mutex);
	return 0;
}

static void torture_mutex_delay(struct torture_random_state *trsp)
{
	const unsigned long longdelay_ms = 100;

	/* We want a long delay occasionally to force massive contention.  */
	if (!(torture_random(trsp) %
351
	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
352 353 354 355
		mdelay(longdelay_ms * 5);
	else
		mdelay(longdelay_ms / 5);
#ifdef CONFIG_PREEMPT
356
	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
357 358 359 360 361 362 363 364 365 366 367 368
		preempt_schedule();  /* Allow test to be preempted. */
#endif
}

static void torture_mutex_unlock(void) __releases(torture_mutex)
{
	mutex_unlock(&torture_mutex);
}

static struct lock_torture_ops mutex_lock_ops = {
	.writelock	= torture_mutex_lock,
	.write_delay	= torture_mutex_delay,
369
	.task_boost     = torture_boost_dummy,
370
	.writeunlock	= torture_mutex_unlock,
371 372 373
	.readlock       = NULL,
	.read_delay     = NULL,
	.readunlock     = NULL,
374 375 376
	.name		= "mutex_lock"
};

377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
#include <linux/ww_mutex.h>
static DEFINE_WW_CLASS(torture_ww_class);
static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);

static int torture_ww_mutex_lock(void)
__acquires(torture_ww_mutex_0)
__acquires(torture_ww_mutex_1)
__acquires(torture_ww_mutex_2)
{
	LIST_HEAD(list);
	struct reorder_lock {
		struct list_head link;
		struct ww_mutex *lock;
	} locks[3], *ll, *ln;
	struct ww_acquire_ctx ctx;

	locks[0].lock = &torture_ww_mutex_0;
	list_add(&locks[0].link, &list);

	locks[1].lock = &torture_ww_mutex_1;
	list_add(&locks[1].link, &list);

	locks[2].lock = &torture_ww_mutex_2;
	list_add(&locks[2].link, &list);

	ww_acquire_init(&ctx, &torture_ww_class);

	list_for_each_entry(ll, &list, link) {
		int err;

		err = ww_mutex_lock(ll->lock, &ctx);
		if (!err)
			continue;

		ln = ll;
		list_for_each_entry_continue_reverse(ln, &list, link)
			ww_mutex_unlock(ln->lock);

		if (err != -EDEADLK)
			return err;

		ww_mutex_lock_slow(ll->lock, &ctx);
		list_move(&ll->link, &list);
	}

	ww_acquire_fini(&ctx);
	return 0;
}

static void torture_ww_mutex_unlock(void)
__releases(torture_ww_mutex_0)
__releases(torture_ww_mutex_1)
__releases(torture_ww_mutex_2)
{
	ww_mutex_unlock(&torture_ww_mutex_0);
	ww_mutex_unlock(&torture_ww_mutex_1);
	ww_mutex_unlock(&torture_ww_mutex_2);
}

static struct lock_torture_ops ww_mutex_lock_ops = {
	.writelock	= torture_ww_mutex_lock,
	.write_delay	= torture_mutex_delay,
	.task_boost     = torture_boost_dummy,
	.writeunlock	= torture_ww_mutex_unlock,
	.readlock       = NULL,
	.read_delay     = NULL,
	.readunlock     = NULL,
	.name		= "ww_mutex_lock"
};

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
#ifdef CONFIG_RT_MUTEXES
static DEFINE_RT_MUTEX(torture_rtmutex);

static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
{
	rt_mutex_lock(&torture_rtmutex);
	return 0;
}

static void torture_rtmutex_boost(struct torture_random_state *trsp)
{
	int policy;
	struct sched_param param;
	const unsigned int factor = 50000; /* yes, quite arbitrary */

	if (!rt_task(current)) {
		/*
466
		 * Boost priority once every ~50k operations. When the
467 468 469
		 * task tries to take the lock, the rtmutex it will account
		 * for the new priority, and do any corresponding pi-dance.
		 */
470 471
		if (trsp && !(torture_random(trsp) %
			      (cxt.nrealwriters_stress * factor))) {
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
			policy = SCHED_FIFO;
			param.sched_priority = MAX_RT_PRIO - 1;
		} else /* common case, do nothing */
			return;
	} else {
		/*
		 * The task will remain boosted for another ~500k operations,
		 * then restored back to its original prio, and so forth.
		 *
		 * When @trsp is nil, we want to force-reset the task for
		 * stopping the kthread.
		 */
		if (!trsp || !(torture_random(trsp) %
			       (cxt.nrealwriters_stress * factor * 2))) {
			policy = SCHED_NORMAL;
			param.sched_priority = 0;
		} else /* common case, do nothing */
			return;
	}

	sched_setscheduler_nocheck(current, policy, &param);
}

static void torture_rtmutex_delay(struct torture_random_state *trsp)
{
	const unsigned long shortdelay_us = 2;
	const unsigned long longdelay_ms = 100;

	/*
	 * We want a short delay mostly to emulate likely code, and
	 * we want a long delay occasionally to force massive contention.
	 */
	if (!(torture_random(trsp) %
	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
		mdelay(longdelay_ms);
	if (!(torture_random(trsp) %
	      (cxt.nrealwriters_stress * 2 * shortdelay_us)))
		udelay(shortdelay_us);
#ifdef CONFIG_PREEMPT
	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
		preempt_schedule();  /* Allow test to be preempted. */
#endif
}

static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
{
	rt_mutex_unlock(&torture_rtmutex);
}

static struct lock_torture_ops rtmutex_lock_ops = {
	.writelock	= torture_rtmutex_lock,
	.write_delay	= torture_rtmutex_delay,
	.task_boost     = torture_rtmutex_boost,
	.writeunlock	= torture_rtmutex_unlock,
	.readlock       = NULL,
	.read_delay     = NULL,
	.readunlock     = NULL,
	.name		= "rtmutex_lock"
};
#endif

533 534 535 536 537 538 539 540 541 542 543 544 545
static DECLARE_RWSEM(torture_rwsem);
static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
{
	down_write(&torture_rwsem);
	return 0;
}

static void torture_rwsem_write_delay(struct torture_random_state *trsp)
{
	const unsigned long longdelay_ms = 100;

	/* We want a long delay occasionally to force massive contention.  */
	if (!(torture_random(trsp) %
546
	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
547 548 549 550
		mdelay(longdelay_ms * 10);
	else
		mdelay(longdelay_ms / 10);
#ifdef CONFIG_PREEMPT
551
	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
		preempt_schedule();  /* Allow test to be preempted. */
#endif
}

static void torture_rwsem_up_write(void) __releases(torture_rwsem)
{
	up_write(&torture_rwsem);
}

static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
{
	down_read(&torture_rwsem);
	return 0;
}

static void torture_rwsem_read_delay(struct torture_random_state *trsp)
{
	const unsigned long longdelay_ms = 100;

	/* We want a long delay occasionally to force massive contention.  */
	if (!(torture_random(trsp) %
573
	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
574 575 576 577
		mdelay(longdelay_ms * 2);
	else
		mdelay(longdelay_ms / 2);
#ifdef CONFIG_PREEMPT
578
	if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
579 580 581 582 583 584 585 586 587 588 589 590
		preempt_schedule();  /* Allow test to be preempted. */
#endif
}

static void torture_rwsem_up_read(void) __releases(torture_rwsem)
{
	up_read(&torture_rwsem);
}

static struct lock_torture_ops rwsem_lock_ops = {
	.writelock	= torture_rwsem_down_write,
	.write_delay	= torture_rwsem_write_delay,
591
	.task_boost     = torture_boost_dummy,
592 593 594 595 596 597 598
	.writeunlock	= torture_rwsem_up_write,
	.readlock       = torture_rwsem_down_read,
	.read_delay     = torture_rwsem_read_delay,
	.readunlock     = torture_rwsem_up_read,
	.name		= "rwsem_lock"
};

599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
#include <linux/percpu-rwsem.h>
static struct percpu_rw_semaphore pcpu_rwsem;

void torture_percpu_rwsem_init(void)
{
	BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
}

static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
{
	percpu_down_write(&pcpu_rwsem);
	return 0;
}

static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
{
	percpu_up_write(&pcpu_rwsem);
}

static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem)
{
	percpu_down_read(&pcpu_rwsem);
	return 0;
}

static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
{
	percpu_up_read(&pcpu_rwsem);
}

static struct lock_torture_ops percpu_rwsem_lock_ops = {
	.init		= torture_percpu_rwsem_init,
	.writelock	= torture_percpu_rwsem_down_write,
	.write_delay	= torture_rwsem_write_delay,
	.task_boost     = torture_boost_dummy,
	.writeunlock	= torture_percpu_rwsem_up_write,
	.readlock       = torture_percpu_rwsem_down_read,
	.read_delay     = torture_rwsem_read_delay,
	.readunlock     = torture_percpu_rwsem_up_read,
	.name		= "percpu_rwsem_lock"
};

641 642 643 644 645 646
/*
 * Lock torture writer kthread.  Repeatedly acquires and releases
 * the lock, checking for duplicate acquisitions.
 */
static int lock_torture_writer(void *arg)
{
647
	struct lock_stress_stats *lwsp = arg;
648 649 650
	static DEFINE_TORTURE_RANDOM(rand);

	VERBOSE_TOROUT_STRING("lock_torture_writer task started");
651
	set_user_nice(current, MAX_NICE);
652 653

	do {
654 655
		if ((torture_random(&rand) & 0xfffff) == 0)
			schedule_timeout_uninterruptible(1);
656

657
		cxt.cur_ops->task_boost(&rand);
658
		cxt.cur_ops->writelock();
659
		if (WARN_ON_ONCE(lock_is_write_held))
660
			lwsp->n_lock_fail++;
661
		lock_is_write_held = 1;
662 663 664
		if (WARN_ON_ONCE(lock_is_read_held))
			lwsp->n_lock_fail++; /* rare, but... */

665
		lwsp->n_lock_acquired++;
666
		cxt.cur_ops->write_delay(&rand);
667
		lock_is_write_held = 0;
668
		cxt.cur_ops->writeunlock();
669

670 671
		stutter_wait("lock_torture_writer");
	} while (!torture_must_stop());
672 673

	cxt.cur_ops->task_boost(NULL); /* reset prio */
674 675 676 677
	torture_kthread_stopping("lock_torture_writer");
	return 0;
}

678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
/*
 * Lock torture reader kthread.  Repeatedly acquires and releases
 * the reader lock.
 */
static int lock_torture_reader(void *arg)
{
	struct lock_stress_stats *lrsp = arg;
	static DEFINE_TORTURE_RANDOM(rand);

	VERBOSE_TOROUT_STRING("lock_torture_reader task started");
	set_user_nice(current, MAX_NICE);

	do {
		if ((torture_random(&rand) & 0xfffff) == 0)
			schedule_timeout_uninterruptible(1);
693

694
		cxt.cur_ops->readlock();
695
		lock_is_read_held = 1;
696 697 698
		if (WARN_ON_ONCE(lock_is_write_held))
			lrsp->n_lock_fail++; /* rare, but... */

699
		lrsp->n_lock_acquired++;
700
		cxt.cur_ops->read_delay(&rand);
701
		lock_is_read_held = 0;
702
		cxt.cur_ops->readunlock();
703

704 705 706 707 708 709
		stutter_wait("lock_torture_reader");
	} while (!torture_must_stop());
	torture_kthread_stopping("lock_torture_reader");
	return 0;
}

710 711 712
/*
 * Create an lock-torture-statistics message in the specified buffer.
 */
713 714
static void __torture_print_stats(char *page,
				  struct lock_stress_stats *statp, bool write)
715 716
{
	bool fail = 0;
717
	int i, n_stress;
718
	long max = 0, min = statp ? statp[0].n_lock_acquired : 0;
719 720
	long long sum = 0;

721
	n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
722 723
	for (i = 0; i < n_stress; i++) {
		if (statp[i].n_lock_fail)
724
			fail = true;
725 726 727 728 729
		sum += statp[i].n_lock_acquired;
		if (max < statp[i].n_lock_fail)
			max = statp[i].n_lock_fail;
		if (min > statp[i].n_lock_fail)
			min = statp[i].n_lock_fail;
730 731
	}
	page += sprintf(page,
732 733
			"%s:  Total: %lld  Max/Min: %ld/%ld %s  Fail: %d %s\n",
			write ? "Writes" : "Reads ",
734 735 736
			sum, max, min, max / 2 > min ? "???" : "",
			fail, fail ? "!!!" : "");
	if (fail)
737
		atomic_inc(&cxt.n_lock_torture_errors);
738 739 740 741 742 743 744 745 746 747 748 749
}

/*
 * Print torture statistics.  Caller must ensure that there is only one
 * call to this function at a given time!!!  This is normally accomplished
 * by relying on the module system to only have one copy of the module
 * loaded, and then by giving the lock_torture_stats kthread full control
 * (or the init/cleanup functions when lock_torture_stats thread is not
 * running).
 */
static void lock_torture_stats_print(void)
{
750
	int size = cxt.nrealwriters_stress * 200 + 8192;
751 752
	char *buf;

753 754
	if (cxt.cur_ops->readlock)
		size += cxt.nrealreaders_stress * 200 + 8192;
755

756 757 758 759 760 761
	buf = kmalloc(size, GFP_KERNEL);
	if (!buf) {
		pr_err("lock_torture_stats_print: Out of memory, need: %d",
		       size);
		return;
	}
762

763
	__torture_print_stats(buf, cxt.lwsa, true);
764 765
	pr_alert("%s", buf);
	kfree(buf);
766

767
	if (cxt.cur_ops->readlock) {
768 769 770 771 772 773 774
		buf = kmalloc(size, GFP_KERNEL);
		if (!buf) {
			pr_err("lock_torture_stats_print: Out of memory, need: %d",
			       size);
			return;
		}

775
		__torture_print_stats(buf, cxt.lrsa, false);
776 777 778
		pr_alert("%s", buf);
		kfree(buf);
	}
779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
}

/*
 * Periodically prints torture statistics, if periodic statistics printing
 * was specified via the stat_interval module parameter.
 *
 * No need to worry about fullstop here, since this one doesn't reference
 * volatile state or register callbacks.
 */
static int lock_torture_stats(void *arg)
{
	VERBOSE_TOROUT_STRING("lock_torture_stats task started");
	do {
		schedule_timeout_interruptible(stat_interval * HZ);
		lock_torture_stats_print();
		torture_shutdown_absorb("lock_torture_stats");
	} while (!torture_must_stop());
	torture_kthread_stopping("lock_torture_stats");
	return 0;
}

static inline void
lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
				const char *tag)
{
	pr_alert("%s" TORTURE_FLAG
805
		 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
806 807
		 torture_type, tag, cxt.debug_lock ? " [debug]": "",
		 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
808
		 verbose, shuffle_interval, stutter, shutdown_secs,
809 810 811 812 813 814 815
		 onoff_interval, onoff_holdoff);
}

static void lock_torture_cleanup(void)
{
	int i;

816
	if (torture_cleanup_begin())
817 818
		return;

819 820 821 822 823 824
	/*
	 * Indicates early cleanup, meaning that the test has not run,
	 * such as when passing bogus args when loading the module. As
	 * such, only perform the underlying torture-specific cleanups,
	 * and avoid anything related to locktorture.
	 */
825
	if (!cxt.lwsa && !cxt.lrsa)
826 827
		goto end;

828
	if (writer_tasks) {
829
		for (i = 0; i < cxt.nrealwriters_stress; i++)
830 831 832 833 834 835
			torture_stop_kthread(lock_torture_writer,
					     writer_tasks[i]);
		kfree(writer_tasks);
		writer_tasks = NULL;
	}

836
	if (reader_tasks) {
837
		for (i = 0; i < cxt.nrealreaders_stress; i++)
838 839 840 841 842 843
			torture_stop_kthread(lock_torture_reader,
					     reader_tasks[i]);
		kfree(reader_tasks);
		reader_tasks = NULL;
	}

844 845 846
	torture_stop_kthread(lock_torture_stats, stats_task);
	lock_torture_stats_print();  /* -After- the stats thread is stopped! */

847 848
	if (atomic_read(&cxt.n_lock_torture_errors))
		lock_torture_print_module_parms(cxt.cur_ops,
849 850
						"End of test: FAILURE");
	else if (torture_onoff_failures())
851
		lock_torture_print_module_parms(cxt.cur_ops,
852 853
						"End of test: LOCK_HOTPLUG");
	else
854
		lock_torture_print_module_parms(cxt.cur_ops,
855
						"End of test: SUCCESS");
856 857 858 859

	kfree(cxt.lwsa);
	kfree(cxt.lrsa);

860
end:
861
	torture_cleanup_end();
862 863 864 865
}

static int __init lock_torture_init(void)
{
866
	int i, j;
867 868
	int firsterr = 0;
	static struct lock_torture_ops *torture_ops[] = {
869 870 871 872
		&lock_busted_ops,
		&spin_lock_ops, &spin_lock_irq_ops,
		&rw_lock_ops, &rw_lock_irq_ops,
		&mutex_lock_ops,
873
		&ww_mutex_lock_ops,
874 875 876
#ifdef CONFIG_RT_MUTEXES
		&rtmutex_lock_ops,
#endif
877
		&rwsem_lock_ops,
878
		&percpu_rwsem_lock_ops,
879 880
	};

881
	if (!torture_init_begin(torture_type, verbose, &torture_runnable))
882
		return -EBUSY;
883 884 885

	/* Process args and tell the world that the torturer is on the job. */
	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
886 887
		cxt.cur_ops = torture_ops[i];
		if (strcmp(torture_type, cxt.cur_ops->name) == 0)
888 889 890 891 892 893 894 895 896
			break;
	}
	if (i == ARRAY_SIZE(torture_ops)) {
		pr_alert("lock-torture: invalid torture type: \"%s\"\n",
			 torture_type);
		pr_alert("lock-torture types:");
		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
			pr_alert(" %s", torture_ops[i]->name);
		pr_alert("\n");
897 898
		firsterr = -EINVAL;
		goto unwind;
899
	}
900 901 902 903 904 905 906

	if (nwriters_stress == 0 && nreaders_stress == 0) {
		pr_alert("lock-torture: must run at least one locking thread\n");
		firsterr = -EINVAL;
		goto unwind;
	}

907
	if (cxt.cur_ops->init)
908
		cxt.cur_ops->init();
909 910

	if (nwriters_stress >= 0)
911
		cxt.nrealwriters_stress = nwriters_stress;
912
	else
913
		cxt.nrealwriters_stress = 2 * num_online_cpus();
914 915 916

#ifdef CONFIG_DEBUG_MUTEXES
	if (strncmp(torture_type, "mutex", 5) == 0)
917
		cxt.debug_lock = true;
918
#endif
919 920 921 922
#ifdef CONFIG_DEBUG_RT_MUTEXES
	if (strncmp(torture_type, "rtmutex", 7) == 0)
		cxt.debug_lock = true;
#endif
923
#ifdef CONFIG_DEBUG_SPINLOCK
924 925
	if ((strncmp(torture_type, "spin", 4) == 0) ||
	    (strncmp(torture_type, "rw_lock", 7) == 0))
926
		cxt.debug_lock = true;
927
#endif
928 929

	/* Initialize the statistics so that each run gets its own numbers. */
930 931 932 933 934 935 936 937
	if (nwriters_stress) {
		lock_is_write_held = 0;
		cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
		if (cxt.lwsa == NULL) {
			VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
			firsterr = -ENOMEM;
			goto unwind;
		}
938

939 940 941 942
		for (i = 0; i < cxt.nrealwriters_stress; i++) {
			cxt.lwsa[i].n_lock_fail = 0;
			cxt.lwsa[i].n_lock_acquired = 0;
		}
943 944
	}

945
	if (cxt.cur_ops->readlock) {
946
		if (nreaders_stress >= 0)
947
			cxt.nrealreaders_stress = nreaders_stress;
948 949 950 951 952 953 954
		else {
			/*
			 * By default distribute evenly the number of
			 * readers and writers. We still run the same number
			 * of threads as the writer-only locks default.
			 */
			if (nwriters_stress < 0) /* user doesn't care */
955 956
				cxt.nrealwriters_stress = num_online_cpus();
			cxt.nrealreaders_stress = cxt.nrealwriters_stress;
957 958
		}

959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
		if (nreaders_stress) {
			lock_is_read_held = 0;
			cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
			if (cxt.lrsa == NULL) {
				VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
				firsterr = -ENOMEM;
				kfree(cxt.lwsa);
				cxt.lwsa = NULL;
				goto unwind;
			}

			for (i = 0; i < cxt.nrealreaders_stress; i++) {
				cxt.lrsa[i].n_lock_fail = 0;
				cxt.lrsa[i].n_lock_acquired = 0;
			}
974 975
		}
	}
976

977
	lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
978 979

	/* Prepare torture context. */
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
	if (onoff_interval > 0) {
		firsterr = torture_onoff_init(onoff_holdoff * HZ,
					      onoff_interval * HZ);
		if (firsterr)
			goto unwind;
	}
	if (shuffle_interval > 0) {
		firsterr = torture_shuffle_init(shuffle_interval);
		if (firsterr)
			goto unwind;
	}
	if (shutdown_secs > 0) {
		firsterr = torture_shutdown_init(shutdown_secs,
						 lock_torture_cleanup);
		if (firsterr)
			goto unwind;
	}
	if (stutter > 0) {
		firsterr = torture_stutter_init(stutter);
		if (firsterr)
			goto unwind;
	}

1003 1004 1005 1006 1007 1008 1009 1010
	if (nwriters_stress) {
		writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
				       GFP_KERNEL);
		if (writer_tasks == NULL) {
			VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
			firsterr = -ENOMEM;
			goto unwind;
		}
1011
	}
1012

1013 1014
	if (cxt.cur_ops->readlock) {
		reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]),
1015 1016 1017
				       GFP_KERNEL);
		if (reader_tasks == NULL) {
			VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
1018 1019
			kfree(writer_tasks);
			writer_tasks = NULL;
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
			firsterr = -ENOMEM;
			goto unwind;
		}
	}

	/*
	 * Create the kthreads and start torturing (oh, those poor little locks).
	 *
	 * TODO: Note that we interleave writers with readers, giving writers a
	 * slight advantage, by creating its kthread first. This can be modified
	 * for very specific needs, or even let the user choose the policy, if
	 * ever wanted.
	 */
1033 1034 1035
	for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
		    j < cxt.nrealreaders_stress; i++, j++) {
		if (i >= cxt.nrealwriters_stress)
1036 1037 1038
			goto create_reader;

		/* Create writer. */
1039
		firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
1040 1041 1042
						  writer_tasks[i]);
		if (firsterr)
			goto unwind;
1043 1044

	create_reader:
1045
		if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
1046 1047
			continue;
		/* Create reader. */
1048
		firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
1049 1050 1051
						  reader_tasks[j]);
		if (firsterr)
			goto unwind;
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
	}
	if (stat_interval > 0) {
		firsterr = torture_create_kthread(lock_torture_stats, NULL,
						  stats_task);
		if (firsterr)
			goto unwind;
	}
	torture_init_end();
	return 0;

unwind:
	torture_init_end();
	lock_torture_cleanup();
	return firsterr;
}

module_init(lock_torture_init);
module_exit(lock_torture_cleanup);