lockdep.c 126 KB
Newer Older
Ingo Molnar's avatar
Ingo Molnar committed
1 2 3 4 5 6 7
/*
 * kernel/lockdep.c
 *
 * Runtime locking correctness validator
 *
 * Started by Ingo Molnar:
 *
Peter Zijlstra's avatar
Peter Zijlstra committed
8
 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9
 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
Ingo Molnar's avatar
Ingo Molnar committed
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * this code maps all the lock dependencies as they occur in a live kernel
 * and will warn about the following classes of locking bugs:
 *
 * - lock inversion scenarios
 * - circular lock dependencies
 * - hardirq/softirq safe/unsafe locking bugs
 *
 * Bugs are reported even if the current locking scenario does not cause
 * any deadlock at this point.
 *
 * I.e. if anytime in the past two locks were taken in a different order,
 * even if it happened for another task, even if those were different
 * locks (but of the same class as this lock), this code will detect it.
 *
 * Thanks to Arjan van de Ven for coming up with the initial idea of
 * mapping lock dependencies runtime.
 */
28
#define DISABLE_BRANCH_PROFILING
Ingo Molnar's avatar
Ingo Molnar committed
29 30
#include <linux/mutex.h>
#include <linux/sched.h>
31
#include <linux/sched/clock.h>
32
#include <linux/sched/task.h>
33
#include <linux/sched/mm.h>
Ingo Molnar's avatar
Ingo Molnar committed
34 35 36 37 38 39 40 41 42 43
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/stacktrace.h>
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
44
#include <linux/utsname.h>
Peter Zijlstra's avatar
Peter Zijlstra committed
45
#include <linux/hash.h>
46
#include <linux/ftrace.h>
Peter Zijlstra's avatar
Peter Zijlstra committed
47
#include <linux/stringify.h>
48
#include <linux/bitops.h>
49
#include <linux/gfp.h>
50
#include <linux/random.h>
51
#include <linux/jhash.h>
Peter Zijlstra's avatar
Peter Zijlstra committed
52

Ingo Molnar's avatar
Ingo Molnar committed
53 54 55 56
#include <asm/sections.h>

#include "lockdep_internals.h"

57
#define CREATE_TRACE_POINTS
58
#include <trace/events/lock.h>
59

60 61 62 63
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
#include <linux/slab.h>
#endif

Peter Zijlstra's avatar
Peter Zijlstra committed
64 65 66 67 68 69 70 71 72 73 74 75 76 77
#ifdef CONFIG_PROVE_LOCKING
int prove_locking = 1;
module_param(prove_locking, int, 0644);
#else
#define prove_locking 0
#endif

#ifdef CONFIG_LOCK_STAT
int lock_stat = 1;
module_param(lock_stat, int, 0644);
#else
#define lock_stat 0
#endif

Ingo Molnar's avatar
Ingo Molnar committed
78
/*
79 80
 * lockdep_lock: protects the lockdep graph, the hashes and the
 *               class/list/hash allocators.
Ingo Molnar's avatar
Ingo Molnar committed
81 82 83
 *
 * This is one of the rare exceptions where it's justified
 * to use a raw spinlock - we really dont want the spinlock
84
 * code to recurse back into the lockdep code...
Ingo Molnar's avatar
Ingo Molnar committed
85
 */
86
static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
87 88 89

static int graph_lock(void)
{
90
	arch_spin_lock(&lockdep_lock);
91 92 93 94 95 96 97
	/*
	 * Make sure that if another CPU detected a bug while
	 * walking the graph we dont change it (while the other
	 * CPU is busy printing out stuff with the graph lock
	 * dropped already)
	 */
	if (!debug_locks) {
98
		arch_spin_unlock(&lockdep_lock);
99 100
		return 0;
	}
101 102
	/* prevent any recursions within lockdep from causing deadlocks */
	current->lockdep_recursion++;
103 104 105 106 107
	return 1;
}

static inline int graph_unlock(void)
{
Peter Zijlstra's avatar
Peter Zijlstra committed
108 109 110 111 112
	if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
		/*
		 * The lockdep graph lock isn't locked while we expect it to
		 * be, we're confused now, bye!
		 */
113
		return DEBUG_LOCKS_WARN_ON(1);
Peter Zijlstra's avatar
Peter Zijlstra committed
114
	}
115

116
	current->lockdep_recursion--;
117
	arch_spin_unlock(&lockdep_lock);
118 119 120 121 122 123 124 125 126 127 128
	return 0;
}

/*
 * Turn lock debugging off and return with 0 if it was off already,
 * and also release the graph lock:
 */
static inline int debug_locks_off_graph_unlock(void)
{
	int ret = debug_locks_off();

129
	arch_spin_unlock(&lockdep_lock);
130 131 132

	return ret;
}
Ingo Molnar's avatar
Ingo Molnar committed
133 134

unsigned long nr_list_entries;
Peter Zijlstra's avatar
Peter Zijlstra committed
135
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
Ingo Molnar's avatar
Ingo Molnar committed
136 137 138 139 140 141 142 143 144 145

/*
 * All data structures here are protected by the global debug_lock.
 *
 * Mutex key structs only get allocated, once during bootup, and never
 * get freed - this significantly simplifies the debugging code.
 */
unsigned long nr_lock_classes;
static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];

146 147 148
static inline struct lock_class *hlock_class(struct held_lock *hlock)
{
	if (!hlock->class_idx) {
Peter Zijlstra's avatar
Peter Zijlstra committed
149 150 151
		/*
		 * Someone passed in garbage, we give up.
		 */
152 153 154 155 156 157
		DEBUG_LOCKS_WARN_ON(1);
		return NULL;
	}
	return lock_classes + hlock->class_idx - 1;
}

Peter Zijlstra's avatar
Peter Zijlstra committed
158
#ifdef CONFIG_LOCK_STAT
159
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
Peter Zijlstra's avatar
Peter Zijlstra committed
160

161 162
static inline u64 lockstat_clock(void)
{
163
	return local_clock();
164 165
}

Peter Zijlstra's avatar
Peter Zijlstra committed
166
static int lock_point(unsigned long points[], unsigned long ip)
Peter Zijlstra's avatar
Peter Zijlstra committed
167 168 169
{
	int i;

Peter Zijlstra's avatar
Peter Zijlstra committed
170 171 172
	for (i = 0; i < LOCKSTAT_POINTS; i++) {
		if (points[i] == 0) {
			points[i] = ip;
Peter Zijlstra's avatar
Peter Zijlstra committed
173 174
			break;
		}
Peter Zijlstra's avatar
Peter Zijlstra committed
175
		if (points[i] == ip)
Peter Zijlstra's avatar
Peter Zijlstra committed
176 177 178 179 180 181
			break;
	}

	return i;
}

182
static void lock_time_inc(struct lock_time *lt, u64 time)
Peter Zijlstra's avatar
Peter Zijlstra committed
183 184 185 186
{
	if (time > lt->max)
		lt->max = time;

187
	if (time < lt->min || !lt->nr)
Peter Zijlstra's avatar
Peter Zijlstra committed
188 189 190 191 192 193
		lt->min = time;

	lt->total += time;
	lt->nr++;
}

194 195
static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
{
196 197 198 199 200 201 202 203 204
	if (!src->nr)
		return;

	if (src->max > dst->max)
		dst->max = src->max;

	if (src->min < dst->min || !dst->nr)
		dst->min = src->min;

205 206 207 208 209 210 211 212 213 214 215 216
	dst->total += src->total;
	dst->nr += src->nr;
}

struct lock_class_stats lock_stats(struct lock_class *class)
{
	struct lock_class_stats stats;
	int cpu, i;

	memset(&stats, 0, sizeof(struct lock_class_stats));
	for_each_possible_cpu(cpu) {
		struct lock_class_stats *pcs =
217
			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
218 219 220 221

		for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
			stats.contention_point[i] += pcs->contention_point[i];

Peter Zijlstra's avatar
Peter Zijlstra committed
222 223 224
		for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
			stats.contending_point[i] += pcs->contending_point[i];

225 226 227 228 229
		lock_time_add(&pcs->read_waittime, &stats.read_waittime);
		lock_time_add(&pcs->write_waittime, &stats.write_waittime);

		lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
		lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
230 231 232

		for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
			stats.bounces[i] += pcs->bounces[i];
233 234 235 236 237 238 239 240 241 242 243
	}

	return stats;
}

void clear_lock_stats(struct lock_class *class)
{
	int cpu;

	for_each_possible_cpu(cpu) {
		struct lock_class_stats *cpu_stats =
244
			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
245 246 247 248

		memset(cpu_stats, 0, sizeof(struct lock_class_stats));
	}
	memset(class->contention_point, 0, sizeof(class->contention_point));
Peter Zijlstra's avatar
Peter Zijlstra committed
249
	memset(class->contending_point, 0, sizeof(class->contending_point));
250 251
}

Peter Zijlstra's avatar
Peter Zijlstra committed
252 253
static struct lock_class_stats *get_lock_stats(struct lock_class *class)
{
254
	return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
Peter Zijlstra's avatar
Peter Zijlstra committed
255 256 257 258
}

static void put_lock_stats(struct lock_class_stats *stats)
{
259
	put_cpu_var(cpu_lock_stats);
Peter Zijlstra's avatar
Peter Zijlstra committed
260 261 262 263 264
}

static void lock_release_holdtime(struct held_lock *hlock)
{
	struct lock_class_stats *stats;
265
	u64 holdtime;
Peter Zijlstra's avatar
Peter Zijlstra committed
266 267 268 269

	if (!lock_stat)
		return;

270
	holdtime = lockstat_clock() - hlock->holdtime_stamp;
Peter Zijlstra's avatar
Peter Zijlstra committed
271

272
	stats = get_lock_stats(hlock_class(hlock));
Peter Zijlstra's avatar
Peter Zijlstra committed
273 274 275 276 277 278 279 280 281 282 283 284
	if (hlock->read)
		lock_time_inc(&stats->read_holdtime, holdtime);
	else
		lock_time_inc(&stats->write_holdtime, holdtime);
	put_lock_stats(stats);
}
#else
static inline void lock_release_holdtime(struct held_lock *hlock)
{
}
#endif

Ingo Molnar's avatar
Ingo Molnar committed
285 286 287 288 289 290 291 292 293 294 295 296
/*
 * We keep a global list of all lock classes. The list only grows,
 * never shrinks. The list is only accessed with the lockdep
 * spinlock lock held.
 */
LIST_HEAD(all_lock_classes);

/*
 * The lockdep classes are in a hash-table as well, for fast lookup:
 */
#define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
#define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)
Peter Zijlstra's avatar
Peter Zijlstra committed
297
#define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
Ingo Molnar's avatar
Ingo Molnar committed
298 299
#define classhashentry(key)	(classhash_table + __classhashfn((key)))

300
static struct hlist_head classhash_table[CLASSHASH_SIZE];
Ingo Molnar's avatar
Ingo Molnar committed
301 302 303 304 305 306 307

/*
 * We put the lock dependency chains into a hash-table as well, to cache
 * their existence:
 */
#define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
#define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
Peter Zijlstra's avatar
Peter Zijlstra committed
308
#define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
Ingo Molnar's avatar
Ingo Molnar committed
309 310
#define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))

311
static struct hlist_head chainhash_table[CHAINHASH_SIZE];
Ingo Molnar's avatar
Ingo Molnar committed
312 313 314 315 316 317 318

/*
 * The hash key of the lock dependency chains is a hash itself too:
 * it's a hash of all locks taken up to that lock, including that lock.
 * It's a 64-bit hash, because it's important for the keys to be
 * unique.
 */
319 320 321 322 323 324 325 326
static inline u64 iterate_chain_key(u64 key, u32 idx)
{
	u32 k0 = key, k1 = key >> 32;

	__jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */

	return k0 | (u64)k1 << 32;
}
Ingo Molnar's avatar
Ingo Molnar committed
327

328
void lockdep_off(void)
Ingo Molnar's avatar
Ingo Molnar committed
329 330 331 332 333
{
	current->lockdep_recursion++;
}
EXPORT_SYMBOL(lockdep_off);

334
void lockdep_on(void)
Ingo Molnar's avatar
Ingo Molnar committed
335 336 337 338 339 340 341 342 343 344
{
	current->lockdep_recursion--;
}
EXPORT_SYMBOL(lockdep_on);

/*
 * Debugging switches:
 */

#define VERBOSE			0
345
#define VERY_VERBOSE		0
Ingo Molnar's avatar
Ingo Molnar committed
346 347 348 349 350 351 352 353 354

#if VERBOSE
# define HARDIRQ_VERBOSE	1
# define SOFTIRQ_VERBOSE	1
#else
# define HARDIRQ_VERBOSE	0
# define SOFTIRQ_VERBOSE	0
#endif

355
#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
Ingo Molnar's avatar
Ingo Molnar committed
356 357 358 359 360
/*
 * Quick filtering for interesting events:
 */
static int class_filter(struct lock_class *class)
{
361 362
#if 0
	/* Example */
Ingo Molnar's avatar
Ingo Molnar committed
363
	if (class->name_version == 1 &&
364
			!strcmp(class->name, "lockname"))
Ingo Molnar's avatar
Ingo Molnar committed
365 366
		return 1;
	if (class->name_version == 1 &&
367
			!strcmp(class->name, "&struct->lockfield"))
Ingo Molnar's avatar
Ingo Molnar committed
368
		return 1;
369
#endif
370 371
	/* Filter everything else. 1 would be to allow everything else */
	return 0;
Ingo Molnar's avatar
Ingo Molnar committed
372 373 374 375 376 377 378 379 380 381 382 383 384
}
#endif

static int verbose(struct lock_class *class)
{
#if VERBOSE
	return class_filter(class);
#endif
	return 0;
}

/*
 * Stack-trace: tightly packed array of stack backtrace
385
 * addresses. Protected by the graph_lock.
Ingo Molnar's avatar
Ingo Molnar committed
386 387 388 389
 */
unsigned long nr_stack_trace_entries;
static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];

390 391 392 393
static void print_lockdep_off(const char *bug_msg)
{
	printk(KERN_DEBUG "%s\n", bug_msg);
	printk(KERN_DEBUG "turning off the locking correctness validator.\n");
394
#ifdef CONFIG_LOCK_STAT
395
	printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
396
#endif
397 398
}

Ingo Molnar's avatar
Ingo Molnar committed
399 400 401 402 403 404
static int save_trace(struct stack_trace *trace)
{
	trace->nr_entries = 0;
	trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
	trace->entries = stack_trace + nr_stack_trace_entries;

405 406
	trace->skip = 3;

407
	save_stack_trace(trace);
Ingo Molnar's avatar
Ingo Molnar committed
408

Peter Zijlstra's avatar
Peter Zijlstra committed
409 410 411 412 413 414 415
	/*
	 * Some daft arches put -1 at the end to indicate its a full trace.
	 *
	 * <rant> this is buggy anyway, since it takes a whole extra entry so a
	 * complete trace that maxes out the entries provided will be reported
	 * as incomplete, friggin useless </rant>
	 */
416 417
	if (trace->nr_entries != 0 &&
	    trace->entries[trace->nr_entries-1] == ULONG_MAX)
Peter Zijlstra's avatar
Peter Zijlstra committed
418 419
		trace->nr_entries--;

Ingo Molnar's avatar
Ingo Molnar committed
420 421 422 423
	trace->max_entries = trace->nr_entries;

	nr_stack_trace_entries += trace->nr_entries;

Peter Zijlstra's avatar
Peter Zijlstra committed
424
	if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
425 426 427
		if (!debug_locks_off_graph_unlock())
			return 0;

428
		print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
429 430
		dump_stack();

Ingo Molnar's avatar
Ingo Molnar committed
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
		return 0;
	}

	return 1;
}

unsigned int nr_hardirq_chains;
unsigned int nr_softirq_chains;
unsigned int nr_process_chains;
unsigned int max_lockdep_depth;

#ifdef CONFIG_DEBUG_LOCKDEP
/*
 * Various lockdep statistics:
 */
446
DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
Ingo Molnar's avatar
Ingo Molnar committed
447 448 449 450 451 452
#endif

/*
 * Locking printouts:
 */

453
#define __USAGE(__STATE)						\
Peter Zijlstra's avatar
Peter Zijlstra committed
454 455 456 457
	[LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",	\
	[LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",		\
	[LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
	[LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
458

Ingo Molnar's avatar
Ingo Molnar committed
459 460
static const char *usage_str[] =
{
461 462 463 464
#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
#include "lockdep_states.h"
#undef LOCKDEP_STATE
	[LOCK_USED] = "INITIAL USE",
Ingo Molnar's avatar
Ingo Molnar committed
465 466 467 468
};

const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
{
Alexey Dobriyan's avatar
Alexey Dobriyan committed
469
	return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
Ingo Molnar's avatar
Ingo Molnar committed
470 471
}

472
static inline unsigned long lock_flag(enum lock_usage_bit bit)
Ingo Molnar's avatar
Ingo Molnar committed
473
{
474 475
	return 1UL << bit;
}
Ingo Molnar's avatar
Ingo Molnar committed
476

477 478 479 480 481 482 483 484 485 486
static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
{
	char c = '.';

	if (class->usage_mask & lock_flag(bit + 2))
		c = '+';
	if (class->usage_mask & lock_flag(bit)) {
		c = '-';
		if (class->usage_mask & lock_flag(bit + 2))
			c = '?';
Ingo Molnar's avatar
Ingo Molnar committed
487 488
	}

489 490
	return c;
}
491

492
void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
493
{
494
	int i = 0;
495

496 497 498 499 500 501 502
#define LOCKDEP_STATE(__STATE) 						\
	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);	\
	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
#include "lockdep_states.h"
#undef LOCKDEP_STATE

	usage[i] = '\0';
Ingo Molnar's avatar
Ingo Molnar committed
503 504
}

505
static void __print_lock_name(struct lock_class *class)
506 507 508 509
{
	char str[KSYM_NAME_LEN];
	const char *name;

Ingo Molnar's avatar
Ingo Molnar committed
510 511 512
	name = class->name;
	if (!name) {
		name = __get_key_name(class->key, str);
Dmitry Vyukov's avatar
Dmitry Vyukov committed
513
		printk(KERN_CONT "%s", name);
Ingo Molnar's avatar
Ingo Molnar committed
514
	} else {
Dmitry Vyukov's avatar
Dmitry Vyukov committed
515
		printk(KERN_CONT "%s", name);
Ingo Molnar's avatar
Ingo Molnar committed
516
		if (class->name_version > 1)
Dmitry Vyukov's avatar
Dmitry Vyukov committed
517
			printk(KERN_CONT "#%d", class->name_version);
Ingo Molnar's avatar
Ingo Molnar committed
518
		if (class->subclass)
Dmitry Vyukov's avatar
Dmitry Vyukov committed
519
			printk(KERN_CONT "/%d", class->subclass);
Ingo Molnar's avatar
Ingo Molnar committed
520
	}
521 522 523 524 525 526 527 528
}

static void print_lock_name(struct lock_class *class)
{
	char usage[LOCK_USAGE_CHARS];

	get_usage_chars(class, usage);

Dmitry Vyukov's avatar
Dmitry Vyukov committed
529
	printk(KERN_CONT " (");
530
	__print_lock_name(class);
Dmitry Vyukov's avatar
Dmitry Vyukov committed
531
	printk(KERN_CONT "){%s}", usage);
Ingo Molnar's avatar
Ingo Molnar committed
532 533 534 535 536
}

static void print_lockdep_cache(struct lockdep_map *lock)
{
	const char *name;
537
	char str[KSYM_NAME_LEN];
Ingo Molnar's avatar
Ingo Molnar committed
538 539 540 541 542

	name = lock->name;
	if (!name)
		name = __get_key_name(lock->key->subkeys, str);

Dmitry Vyukov's avatar
Dmitry Vyukov committed
543
	printk(KERN_CONT "%s", name);
Ingo Molnar's avatar
Ingo Molnar committed
544 545 546 547
}

static void print_lock(struct held_lock *hlock)
{
548 549 550 551 552 553 554 555 556 557
	/*
	 * We can be called locklessly through debug_show_all_locks() so be
	 * extra careful, the hlock might have been released and cleared.
	 */
	unsigned int class_idx = hlock->class_idx;

	/* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
	barrier();

	if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
Dmitry Vyukov's avatar
Dmitry Vyukov committed
558
		printk(KERN_CONT "<RELEASED>\n");
559 560 561 562
		return;
	}

	print_lock_name(lock_classes + class_idx - 1);
Dmitry Vyukov's avatar
Dmitry Vyukov committed
563 564
	printk(KERN_CONT ", at: [<%p>] %pS\n",
		(void *)hlock->acquire_ip, (void *)hlock->acquire_ip);
Ingo Molnar's avatar
Ingo Molnar committed
565 566 567 568 569 570 571
}

static void lockdep_print_held_locks(struct task_struct *curr)
{
	int i, depth = curr->lockdep_depth;

	if (!depth) {
572
		printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
Ingo Molnar's avatar
Ingo Molnar committed
573 574 575
		return;
	}
	printk("%d lock%s held by %s/%d:\n",
576
		depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
Ingo Molnar's avatar
Ingo Molnar committed
577 578 579 580 581 582 583

	for (i = 0; i < depth; i++) {
		printk(" #%d: ", i);
		print_lock(curr->held_locks + i);
	}
}

584
static void print_kernel_ident(void)
Peter Zijlstra's avatar
Peter Zijlstra committed
585
{
586
	printk("%s %.*s %s\n", init_utsname()->release,
Peter Zijlstra's avatar
Peter Zijlstra committed
587
		(int)strcspn(init_utsname()->version, " "),
588 589
		init_utsname()->version,
		print_tainted());
Peter Zijlstra's avatar
Peter Zijlstra committed
590 591 592 593 594 595 596 597 598 599
}

static int very_verbose(struct lock_class *class)
{
#if VERY_VERBOSE
	return class_filter(class);
#endif
	return 0;
}

Ingo Molnar's avatar
Ingo Molnar committed
600
/*
Peter Zijlstra's avatar
Peter Zijlstra committed
601
 * Is this the address of a static object:
Ingo Molnar's avatar
Ingo Molnar committed
602
 */
603
#ifdef __KERNEL__
Peter Zijlstra's avatar
Peter Zijlstra committed
604
static int static_obj(void *obj)
Ingo Molnar's avatar
Ingo Molnar committed
605
{
Peter Zijlstra's avatar
Peter Zijlstra committed
606 607 608 609
	unsigned long start = (unsigned long) &_stext,
		      end   = (unsigned long) &_end,
		      addr  = (unsigned long) obj;

Ingo Molnar's avatar
Ingo Molnar committed
610
	/*
Peter Zijlstra's avatar
Peter Zijlstra committed
611
	 * static variable?
Ingo Molnar's avatar
Ingo Molnar committed
612
	 */
Peter Zijlstra's avatar
Peter Zijlstra committed
613 614
	if ((addr >= start) && (addr < end))
		return 1;
Ingo Molnar's avatar
Ingo Molnar committed
615

616 617 618
	if (arch_is_kernel_data(addr))
		return 1;

Ingo Molnar's avatar
Ingo Molnar committed
619
	/*
620
	 * in-kernel percpu var?
Ingo Molnar's avatar
Ingo Molnar committed
621
	 */
622 623
	if (is_kernel_percpu_address(addr))
		return 1;
Ingo Molnar's avatar
Ingo Molnar committed
624

Peter Zijlstra's avatar
Peter Zijlstra committed
625
	/*
626
	 * module static or percpu var?
Peter Zijlstra's avatar
Peter Zijlstra committed
627
	 */
628
	return is_module_address(addr) || is_module_percpu_address(addr);
629
}
630
#endif
631

Ingo Molnar's avatar
Ingo Molnar committed
632
/*
Peter Zijlstra's avatar
Peter Zijlstra committed
633 634
 * To make lock name printouts unique, we calculate a unique
 * class->name_version generation counter:
Ingo Molnar's avatar
Ingo Molnar committed
635
 */
Peter Zijlstra's avatar
Peter Zijlstra committed
636
static int count_matching_names(struct lock_class *new_class)
Ingo Molnar's avatar
Ingo Molnar committed
637
{
Peter Zijlstra's avatar
Peter Zijlstra committed
638 639
	struct lock_class *class;
	int count = 0;
Ingo Molnar's avatar
Ingo Molnar committed
640

Peter Zijlstra's avatar
Peter Zijlstra committed
641
	if (!new_class->name)
Ingo Molnar's avatar
Ingo Molnar committed
642 643
		return 0;

644
	list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
Peter Zijlstra's avatar
Peter Zijlstra committed
645 646 647 648 649
		if (new_class->key - new_class->subclass == class->key)
			return class->name_version;
		if (class->name && !strcmp(class->name, new_class->name))
			count = max(count, class->name_version);
	}
Ingo Molnar's avatar
Ingo Molnar committed
650

Peter Zijlstra's avatar
Peter Zijlstra committed
651
	return count + 1;
Ingo Molnar's avatar
Ingo Molnar committed
652 653
}

Peter Zijlstra's avatar
Peter Zijlstra committed
654 655 656 657 658 659 660
/*
 * Register a lock's class in the hash-table, if the class is not present
 * yet. Otherwise we look it up. We cache the result in the lock object
 * itself, so actual lookup of the hash should be once per lock object.
 */
static inline struct lock_class *
look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
Ingo Molnar's avatar
Ingo Molnar committed
661
{
Peter Zijlstra's avatar
Peter Zijlstra committed
662
	struct lockdep_subclass_key *key;
663
	struct hlist_head *hash_head;
Peter Zijlstra's avatar
Peter Zijlstra committed
664
	struct lock_class *class;
665
	bool is_static = false;
Ingo Molnar's avatar
Ingo Molnar committed
666

667 668 669 670 671 672 673 674 675 676
	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
		debug_locks_off();
		printk(KERN_ERR
			"BUG: looking up invalid subclass: %u\n", subclass);
		printk(KERN_ERR
			"turning off the locking correctness validator.\n");
		dump_stack();
		return NULL;
	}

Peter Zijlstra's avatar
Peter Zijlstra committed
677 678
	/*
	 * Static locks do not have their class-keys yet - for them the key
679 680 681
	 * is the lock object itself. If the lock is in the per cpu area,
	 * the canonical address of the lock (per cpu offset removed) is
	 * used.
Peter Zijlstra's avatar
Peter Zijlstra committed
682
	 */
683 684 685 686 687 688 689 690 691 692 693 694 695
	if (unlikely(!lock->key)) {
		unsigned long can_addr, addr = (unsigned long)lock;

		if (__is_kernel_percpu_address(addr, &can_addr))
			lock->key = (void *)can_addr;
		else if (__is_module_percpu_address(addr, &can_addr))
			lock->key = (void *)can_addr;
		else if (static_obj(lock))
			lock->key = (void *)lock;
		else
			return ERR_PTR(-EINVAL);
		is_static = true;
	}
Ingo Molnar's avatar
Ingo Molnar committed
696

Peter Zijlstra's avatar
Peter Zijlstra committed
697 698 699 700 701 702
	/*
	 * NOTE: the class-key must be unique. For dynamic locks, a static
	 * lock_class_key variable is passed in through the mutex_init()
	 * (or spin_lock_init()) call - which acts as the key. For static
	 * locks we use the lock object itself as the key.
	 */
Peter Zijlstra's avatar
Peter Zijlstra committed
703 704
	BUILD_BUG_ON(sizeof(struct lock_class_key) >
			sizeof(struct lockdep_map));
Ingo Molnar's avatar
Ingo Molnar committed
705

Peter Zijlstra's avatar
Peter Zijlstra committed
706
	key = lock->key->subkeys + subclass;
707

Peter Zijlstra's avatar
Peter Zijlstra committed
708
	hash_head = classhashentry(key);
709

Peter Zijlstra's avatar
Peter Zijlstra committed
710
	/*
711
	 * We do an RCU walk of the hash, see lockdep_free_key_range().
Peter Zijlstra's avatar
Peter Zijlstra committed
712
	 */
713 714 715
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return NULL;

716
	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
Peter Zijlstra's avatar
Peter Zijlstra committed
717
		if (class->key == key) {
Peter Zijlstra's avatar
Peter Zijlstra committed
718 719 720 721
			/*
			 * Huh! same key, different name? Did someone trample
			 * on some memory? We're most confused.
			 */
Peter Zijlstra's avatar
Peter Zijlstra committed
722
			WARN_ON_ONCE(class->name != lock->name);
Peter Zijlstra's avatar
Peter Zijlstra committed
723
			return class;
Peter Zijlstra's avatar
Peter Zijlstra committed
724 725
		}
	}
Ingo Molnar's avatar
Ingo Molnar committed
726

727
	return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
Ingo Molnar's avatar
Ingo Molnar committed
728 729
}

730 731 732 733 734 735 736 737 738 739 740 741
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
static void cross_init(struct lockdep_map *lock, int cross);
static int cross_lock(struct lockdep_map *lock);
static int lock_acquire_crosslock(struct held_lock *hlock);
static int lock_release_crosslock(struct lockdep_map *lock);
#else
static inline void cross_init(struct lockdep_map *lock, int cross) {}
static inline int cross_lock(struct lockdep_map *lock) { return 0; }
static inline int lock_acquire_crosslock(struct held_lock *hlock) { return 2; }
static inline int lock_release_crosslock(struct lockdep_map *lock) { return 2; }
#endif

Ingo Molnar's avatar
Ingo Molnar committed
742
/*
Peter Zijlstra's avatar
Peter Zijlstra committed
743 744 745
 * Register a lock's class in the hash-table, if the class is not present
 * yet. Otherwise we look it up. We cache the result in the lock object
 * itself, so actual lookup of the hash should be once per lock object.
Ingo Molnar's avatar
Ingo Molnar committed
746
 */
747
static struct lock_class *
Peter Zijlstra's avatar
Peter Zijlstra committed
748
register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
Ingo Molnar's avatar
Ingo Molnar committed
749
{
Peter Zijlstra's avatar
Peter Zijlstra committed
750
	struct lockdep_subclass_key *key;
751
	struct hlist_head *hash_head;
Peter Zijlstra's avatar
Peter Zijlstra committed
752
	struct lock_class *class;
753 754

	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
Peter Zijlstra's avatar
Peter Zijlstra committed
755 756

	class = look_up_lock_class(lock, subclass);
757
	if (likely(!IS_ERR_OR_NULL(class)))
758
		goto out_set_class_cache;
Peter Zijlstra's avatar
Peter Zijlstra committed
759 760 761

	/*
	 * Debug-check: all keys must be persistent!
762 763
	 */
	if (IS_ERR(class)) {
Peter Zijlstra's avatar
Peter Zijlstra committed
764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
		debug_locks_off();
		printk("INFO: trying to register non-static key.\n");
		printk("the code is fine but needs lockdep annotation.\n");
		printk("turning off the locking correctness validator.\n");
		dump_stack();
		return NULL;
	}

	key = lock->key->subkeys + subclass;
	hash_head = classhashentry(key);

	if (!graph_lock()) {
		return NULL;
	}
	/*
	 * We have to do the hash-walk again, to avoid races
	 * with another CPU:
	 */
782
	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
Peter Zijlstra's avatar
Peter Zijlstra committed
783 784
		if (class->key == key)
			goto out_unlock_set;
785 786
	}

Peter Zijlstra's avatar
Peter Zijlstra committed
787 788 789 790 791 792 793 794 795
	/*
	 * Allocate a new key from the static array, and add it to
	 * the hash:
	 */
	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
		if (!debug_locks_off_graph_unlock()) {
			return NULL;
		}

796
		print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
797
		dump_stack();
Peter Zijlstra's avatar
Peter Zijlstra committed
798 799 800
		return NULL;
	}
	class = lock_classes + nr_lock_classes++;
801
	debug_atomic_inc(nr_unused_locks);
Peter Zijlstra's avatar
Peter Zijlstra committed
802 803 804 805 806 807 808 809 810 811 812
	class->key = key;
	class->name = lock->name;
	class->subclass = subclass;
	INIT_LIST_HEAD(&class->lock_entry);
	INIT_LIST_HEAD(&class->locks_before);
	INIT_LIST_HEAD(&class->locks_after);
	class->name_version = count_matching_names(class);
	/*
	 * We use RCU's safe list-add method to make
	 * parallel walking of the hash-list safe:
	 */
813
	hlist_add_head_rcu(&class->hash_entry, hash_head);
814 815 816 817
	/*
	 * Add it to the global list of classes:
	 */
	list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
Peter Zijlstra's avatar
Peter Zijlstra committed
818 819 820 821 822 823

	if (verbose(class)) {
		graph_unlock();

		printk("\nnew class %p: %s", class->key, class->name);
		if (class->name_version > 1)
Dmitry Vyukov's avatar
Dmitry Vyukov committed
824 825
			printk(KERN_CONT "#%d", class->name_version);
		printk(KERN_CONT "\n");
Peter Zijlstra's avatar
Peter Zijlstra committed
826 827 828 829 830 831 832 833 834
		dump_stack();

		if (!graph_lock()) {
			return NULL;
		}
	}
out_unlock_set:
	graph_unlock();

835
out_set_class_cache:
Peter Zijlstra's avatar
Peter Zijlstra committed
836
	if (!subclass || force)
837 838 839
		lock->class_cache[0] = class;
	else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
		lock->class_cache[subclass] = class;
Peter Zijlstra's avatar
Peter Zijlstra committed
840

Peter Zijlstra's avatar
Peter Zijlstra committed
841 842 843 844
	/*
	 * Hash collision, did we smoke some? We found a class with a matching
	 * hash but the subclass -- which is hashed in -- didn't match.
	 */
Peter Zijlstra's avatar
Peter Zijlstra committed
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
		return NULL;

	return class;
}

#ifdef CONFIG_PROVE_LOCKING
/*
 * Allocate a lockdep entry. (assumes the graph_lock held, returns
 * with NULL on failure)
 */
static struct lock_list *alloc_list_entry(void)
{
	if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
		if (!debug_locks_off_graph_unlock())
			return NULL;

862
		print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
863
		dump_stack();
Peter Zijlstra's avatar
Peter Zijlstra committed
864 865 866 867 868 869 870 871
		return NULL;
	}
	return list_entries + nr_list_entries++;
}

/*
 * Add a new dependency to the head of the list:
 */
872 873 874
static int add_lock_to_list(struct lock_class *this, struct list_head *head,
			    unsigned long ip, int distance,
			    struct stack_trace *trace)
Peter Zijlstra's avatar
Peter Zijlstra committed
875 876 877 878 879 880 881 882 883 884
{
	struct lock_list *entry;
	/*
	 * Lock not present yet - get a new dependency struct and
	 * add it to the list:
	 */
	entry = alloc_list_entry();
	if (!entry)
		return 0;

885 886
	entry->class = this;
	entry->distance = distance;
Yong Zhang's avatar
Yong Zhang committed
887
	entry->trace = *trace;
Peter Zijlstra's avatar
Peter Zijlstra committed
888
	/*
889 890 891
	 * Both allocation and removal are done under the graph lock; but
	 * iteration is under RCU-sched; see look_up_lock_class() and
	 * lockdep_free_key_range().
Peter Zijlstra's avatar
Peter Zijlstra committed
892 893 894 895 896 897
	 */
	list_add_tail_rcu(&entry->entry, head);

	return 1;
}

Peter Zijlstra's avatar
Peter Zijlstra committed
898 899 900
/*
 * For good efficiency of modular, we use power of 2
 */
Peter Zijlstra's avatar
Peter Zijlstra committed
901 902 903
#define MAX_CIRCULAR_QUEUE_SIZE		4096UL
#define CQ_MASK				(MAX_CIRCULAR_QUEUE_SIZE-1)

Peter Zijlstra's avatar
Peter Zijlstra committed
904 905
/*
 * The circular_queue and helpers is used to implement the
Peter Zijlstra's avatar
Peter Zijlstra committed
906 907 908
 * breadth-first search(BFS)algorithem, by which we can build
 * the shortest path from the next lock to be acquired to the
 * previous held lock if there is a circular between them.
Peter Zijlstra's avatar
Peter Zijlstra committed
909
 */
Peter Zijlstra's avatar
Peter Zijlstra committed
910 911 912 913 914 915 916
struct circular_queue {
	unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
	unsigned int  front, rear;
};

static struct circular_queue lock_cq;

917
unsigned int max_bfs_queue_depth;
Peter Zijlstra's avatar
Peter Zijlstra committed
918

919 920
static unsigned int lockdep_dependency_gen_id;

Peter Zijlstra's avatar
Peter Zijlstra committed
921 922 923
static inline void __cq_init(struct circular_queue *cq)
{
	cq->front = cq->rear = 0;
924
	lockdep_dependency_gen_id++;
Peter Zijlstra's avatar
Peter Zijlstra committed
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
}

static inline int __cq_empty(struct circular_queue *cq)
{
	return (cq->front == cq->rear);
}

static inline int __cq_full(struct circular_queue *cq)
{
	return ((cq->rear + 1) & CQ_MASK) == cq->front;
}

static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
{
	if (__cq_full(cq))
		return -1;

	cq->element[cq->rear] = elem;
	cq->rear = (cq->rear + 1) & CQ_MASK;
	return 0;
}

static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
{
	if (__cq_empty(cq))
		return -1;

	*elem = cq->element[cq->front];
	cq->front = (cq->front + 1) & CQ_MASK;
	return 0;
}

static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
{
	return (cq->rear - cq->front) & CQ_MASK;
}

static inline void mark_lock_accessed(struct lock_list *lock,
					struct lock_list *parent)
{
	unsigned long nr;
Peter Zijlstra's avatar
Peter Zijlstra committed
966

Peter Zijlstra's avatar
Peter Zijlstra committed
967
	nr = lock - list_entries;
Peter Zijlstra's avatar
Peter Zijlstra committed
968
	WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
Peter Zijlstra's avatar
Peter Zijlstra committed
969
	lock->parent = parent;
970
	lock->class->dep_gen_id = lockdep_dependency_gen_id;
Peter Zijlstra's avatar
Peter Zijlstra committed
971 972 973 974 975
}

static inline unsigned long lock_accessed(struct lock_list *lock)
{
	unsigned long nr;
Peter Zijlstra's avatar
Peter Zijlstra committed
976

Peter Zijlstra's avatar
Peter Zijlstra committed
977
	nr = lock - list_entries;
Peter Zijlstra's avatar
Peter Zijlstra committed
978
	WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
979
	return lock->class->dep_gen_id == lockdep_dependency_gen_id;
Peter Zijlstra's avatar
Peter Zijlstra committed
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
}

static inline struct lock_list *get_lock_parent(struct lock_list *child)
{
	return child->parent;
}

static inline int get_lock_depth(struct lock_list *child)
{
	int depth = 0;
	struct lock_list *parent;

	while ((parent = get_lock_parent(child))) {
		child = parent;
		depth++;
	}
	return depth;
}

999
static int __bfs(struct lock_list *source_entry,
Peter Zijlstra's avatar
Peter Zijlstra committed
1000 1001 1002 1003
		 void *data,
		 int (*match)(struct lock_list *entry, void *data),
		 struct lock_list **target_entry,
		 int forward)
1004 1005
{
	struct lock_list *entry;
1006
	struct list_head *head;
1007 1008 1009
	struct circular_queue *cq = &lock_cq;
	int ret = 1;

1010
	if (match(source_entry, data)) {
1011 1012 1013 1014 1015
		*target_entry = source_entry;
		ret = 0;
		goto exit;
	}

1016 1017 1018 1019 1020 1021 1022 1023 1024
	if (forward)
		head = &source_entry->class->locks_after;
	else
		head = &source_entry->class->locks_before;

	if (list_empty(head))
		goto exit;

	__cq_init(cq);
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
	__cq_enqueue(cq, (unsigned long)source_entry);

	while (!__cq_empty(cq)) {
		struct lock_list *lock;

		__cq_dequeue(cq, (unsigned long *)&lock);

		if (!lock->class) {
			ret = -2;
			goto exit;
		}

		if (forward)
			head = &lock->class->locks_after;
		else
			head = &lock->class->locks_before;

1042 1043 1044
		DEBUG_LOCKS_WARN_ON(!irqs_disabled());

		list_for_each_entry_rcu(entry, head, entry) {
1045
			if (!lock_accessed(entry)) {
1046
				unsigned int cq_depth;
1047
				mark_lock_accessed(entry, lock);
1048
				if (match(entry, data)) {
1049 1050 1051 1052 1053 1054 1055 1056 1057
					*target_entry = entry;
					ret = 0;
					goto exit;
				}

				if (__cq_enqueue(cq, (unsigned long)entry)) {
					ret = -1;
					goto exit;
				}
1058 1059 1060
				cq_depth = __cq_get_elem_count(cq);
				if (max_bfs_queue_depth < cq_depth)
					max_bfs_queue_depth = cq_depth;
1061 1062 1063 1064 1065 1066 1067
			}
		}
	}
exit:
	return ret;
}

1068
static inline int __bfs_forwards(struct lock_list *src_entry,
1069 1070 1071
			void *data,
			int (*match)(struct lock_list *entry, void *data),
			struct lock_list **target_entry)
1072
{
1073
	return __bfs(src_entry, data, match, target_entry, 1);
1074 1075 1076

}

1077
static inline int __bfs_backwards(struct lock_list *src_entry,
1078 1079 1080
			void *data,
			int (*match)(struct lock_list *entry, void *data),
			struct lock_list **target_entry)
1081
{
1082
	return __bfs(src_entry, data, match, target_entry, 0);
1083 1084 1085

}

Peter Zijlstra's avatar
Peter Zijlstra committed
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
/*
 * Recursive, forwards-direction lock-dependency checking, used for
 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
 * checking.
 */

/*
 * Print a dependency chain entry (this is only done when a deadlock
 * has been detected):
 */
static noinline int
1097
print_circular_bug_entry(struct lock_list *target, int depth)
Peter Zijlstra's avatar
Peter Zijlstra committed
1098 1099 1100 1101 1102
{
	if (debug_locks_silent)
		return 0;
	printk("\n-> #%u", depth);
	print_lock_name(target->class);
Dmitry Vyukov's avatar
Dmitry Vyukov committed
1103
	printk(KERN_CONT ":\n");
Peter Zijlstra's avatar
Peter Zijlstra committed
1104 1105 1106 1107 1108
	print_stack_trace(&target->trace, 6);

	return 0;
}

1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
static void
print_circular_lock_scenario(struct held_lock *src,
			     struct held_lock *tgt,
			     struct lock_list *prt)
{
	struct lock_class *source = hlock_class(src);
	struct lock_class *target = hlock_class(tgt);
	struct lock_class *parent = prt->class;

	/*
	 * A direct locking problem where unsafe_class lock is taken
	 * directly by safe_class lock, then all we need to show
	 * is the deadlock scenario, as it is obvious that the
	 * unsafe lock is taken under the safe lock.
	 *
	 * But if there is a chain instead, where the safe lock takes
	 * an intermediate lock (middle_class) where this lock is
	 * not the same as the safe lock, then the lock chain is
	 * used to describe the problem. Otherwise we would need
	 * to show a different CPU case for each link in the chain
	 * from the safe_class lock to the unsafe_class lock.
	 */
	if (parent != source) {
		printk("Chain exists of:\n  ");
		__print_lock_name(source);
Dmitry Vyukov's avatar
Dmitry Vyukov committed
1134
		printk(KERN_CONT " --> ");
1135
		__print_lock_name(parent);
Dmitry Vyukov's avatar
Dmitry Vyukov committed
1136
		printk(KERN_CONT " --> ");
1137
		__print_lock_name(target);
Dmitry Vyukov's avatar
Dmitry Vyukov committed
1138
		printk(KERN_CONT "\n\n");
1139 1140
	}

1141 1142 1143 1144 1145