slub.c 141 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Christoph Lameter's avatar
Christoph Lameter committed
2 3 4 5
/*
 * SLUB: A slab allocator that limits cache line use instead of queuing
 * objects in per cpu and per node lists.
 *
6 7
 * The allocator synchronizes using per slab locks or atomic operatios
 * and only uses a centralized lock to manage a pool of partial slabs.
Christoph Lameter's avatar
Christoph Lameter committed
8
 *
Christoph Lameter's avatar
Christoph Lameter committed
9
 * (C) 2007 SGI, Christoph Lameter
10
 * (C) 2011 Linux Foundation, Christoph Lameter
Christoph Lameter's avatar
Christoph Lameter committed
11 12 13
 */

#include <linux/mm.h>
Nick Piggin's avatar
Nick Piggin committed
14
#include <linux/swap.h> /* struct reclaim_state */
Christoph Lameter's avatar
Christoph Lameter committed
15 16 17 18 19
#include <linux/module.h>
#include <linux/bit_spinlock.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/slab.h>
20
#include "slab.h"
21
#include <linux/proc_fs.h>
22
#include <linux/notifier.h>
Christoph Lameter's avatar
Christoph Lameter committed
23
#include <linux/seq_file.h>
24
#include <linux/kasan.h>
Christoph Lameter's avatar
Christoph Lameter committed
25 26 27 28
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
#include <linux/ctype.h>
29
#include <linux/debugobjects.h>
Christoph Lameter's avatar
Christoph Lameter committed
30
#include <linux/kallsyms.h>
31
#include <linux/memory.h>
Roman Zippel's avatar
Roman Zippel committed
32
#include <linux/math64.h>
Akinobu Mita's avatar
Akinobu Mita committed
33
#include <linux/fault-inject.h>
34
#include <linux/stacktrace.h>
35
#include <linux/prefetch.h>
36
#include <linux/memcontrol.h>
37
#include <linux/random.h>
Christoph Lameter's avatar
Christoph Lameter committed
38

39 40
#include <trace/events/kmem.h>

41 42
#include "internal.h"

Christoph Lameter's avatar
Christoph Lameter committed
43 44
/*
 * Lock order:
45
 *   1. slab_mutex (Global Mutex)
46 47
 *   2. node->list_lock
 *   3. slab_lock(page) (Only on some arches and for debugging)
Christoph Lameter's avatar
Christoph Lameter committed
48
 *
49
 *   slab_mutex
50
 *
51
 *   The role of the slab_mutex is to protect the list of all the slabs
52 53 54 55 56 57 58 59 60 61 62 63 64 65
 *   and to synchronize major metadata changes to slab cache structures.
 *
 *   The slab_lock is only used for debugging and on arches that do not
 *   have the ability to do a cmpxchg_double. It only protects the second
 *   double word in the page struct. Meaning
 *	A. page->freelist	-> List of object free in a page
 *	B. page->counters	-> Counters of objects
 *	C. page->frozen		-> frozen state
 *
 *   If a slab is frozen then it is exempt from list management. It is not
 *   on any list. The processor that froze the slab is the one who can
 *   perform list operations on the page. Other processors may put objects
 *   onto the freelist but the processor that froze the slab is the only
 *   one that can retrieve the objects from the page's freelist.
Christoph Lameter's avatar
Christoph Lameter committed
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
 *
 *   The list_lock protects the partial and full list on each node and
 *   the partial slab counter. If taken then no new slabs may be added or
 *   removed from the lists nor make the number of partial slabs be modified.
 *   (Note that the total number of slabs is an atomic value that may be
 *   modified without taking the list lock).
 *
 *   The list_lock is a centralized lock and thus we avoid taking it as
 *   much as possible. As long as SLUB does not have to handle partial
 *   slabs, operations can continue without any centralized lock. F.e.
 *   allocating a long series of objects that fill up slabs does not require
 *   the list lock.
 *   Interrupts are disabled during allocation and deallocation in order to
 *   make the slab allocator safe to use in the context of an irq. In addition
 *   interrupts are disabled to ensure that the processor does not change
 *   while handling per_cpu slabs, due to kernel preemption.
 *
 * SLUB assigns one slab for allocation to each processor.
 * Allocations only occur from these slabs called cpu slabs.
 *
86 87
 * Slabs with free elements are kept on a partial list and during regular
 * operations no list for full slabs is used. If an object in a full slab is
Christoph Lameter's avatar
Christoph Lameter committed
88
 * freed then the slab will show up again on the partial lists.
89 90
 * We track full slabs for debugging purposes though because otherwise we
 * cannot scan all objects.
Christoph Lameter's avatar
Christoph Lameter committed
91 92 93 94 95 96 97
 *
 * Slabs are freed when they become empty. Teardown and setup is
 * minimal so we rely on the page allocators per cpu caches for
 * fast frees and allocs.
 *
 * Overloading of page flags that are otherwise used for LRU management.
 *
98 99 100 101 102 103 104 105 106 107 108 109
 * PageActive 		The slab is frozen and exempt from list processing.
 * 			This means that the slab is dedicated to a purpose
 * 			such as satisfying allocations for a specific
 * 			processor. Objects may be freed in the slab while
 * 			it is frozen but slab_free will then skip the usual
 * 			list operations. It is up to the processor holding
 * 			the slab to integrate the slab into the slab lists
 * 			when the slab is no longer needed.
 *
 * 			One use of this flag is to mark slabs that are
 * 			used for allocations. Then such a slab becomes a cpu
 * 			slab. The cpu slab may be equipped with an additional
110
 * 			freelist that allows lockless access to
111 112
 * 			free objects in addition to the regular freelist
 * 			that requires the slab lock.
Christoph Lameter's avatar
Christoph Lameter committed
113 114 115
 *
 * PageError		Slab requires special handling due to debug
 * 			options set. This moves	slab handling out of
116
 * 			the fast path and disables lockless freelists.
Christoph Lameter's avatar
Christoph Lameter committed
117 118
 */

119 120
static inline int kmem_cache_debug(struct kmem_cache *s)
{
121
#ifdef CONFIG_SLUB_DEBUG
122
	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
123
#else
124
	return 0;
125
#endif
126
}
127

128
void *fixup_red_left(struct kmem_cache *s, void *p)
129 130 131 132 133 134 135
{
	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
		p += s->red_left_pad;

	return p;
}

136 137 138 139 140 141 142 143 144
static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_CPU_PARTIAL
	return !kmem_cache_debug(s);
#else
	return false;
#endif
}

Christoph Lameter's avatar
Christoph Lameter committed
145 146 147 148 149 150 151 152 153 154 155
/*
 * Issues still to be resolved:
 *
 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
 *
 * - Variable sizing of the per node arrays
 */

/* Enable to test recovery from slab corruption on boot */
#undef SLUB_RESILIENCY_TEST

156 157 158
/* Enable to log cmpxchg failures */
#undef SLUB_DEBUG_CMPXCHG

159 160 161 162
/*
 * Mininum number of partial slabs. These will be left on the partial
 * lists even if they are empty. kmem_cache_shrink may reclaim them.
 */
163
#define MIN_PARTIAL 5
164

165 166 167
/*
 * Maximum number of desirable partial slabs.
 * The existence of more partial slabs makes kmem_cache_shrink
168
 * sort the partial list by the number of objects in use.
169 170 171
 */
#define MAX_PARTIAL 10

172
#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
Christoph Lameter's avatar
Christoph Lameter committed
173
				SLAB_POISON | SLAB_STORE_USER)
174

175 176 177 178 179 180 181 182
/*
 * These debug flags cannot use CMPXCHG because there might be consistency
 * issues when checking or reading debug information
 */
#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
				SLAB_TRACE)


183
/*
184 185 186
 * Debugging flags that require metadata to be stored in the slab.  These get
 * disabled when slub_debug=O is used and a cache's min order increases with
 * metadata.
187
 */
188
#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
189

190 191
#define OO_SHIFT	16
#define OO_MASK		((1 << OO_SHIFT) - 1)
192
#define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
193

Christoph Lameter's avatar
Christoph Lameter committed
194
/* Internal SLUB flags */
195
#define __OBJECT_POISON		0x80000000UL /* Poison object */
196
#define __CMPXCHG_DOUBLE	0x40000000UL /* Use cmpxchg_double */
Christoph Lameter's avatar
Christoph Lameter committed
197

198 199 200
/*
 * Tracking user of a slab.
 */
201
#define TRACK_ADDRS_COUNT 16
202
struct track {
203
	unsigned long addr;	/* Called from address */
204 205 206
#ifdef CONFIG_STACKTRACE
	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
#endif
207 208 209 210 211 212 213
	int cpu;		/* Was running on cpu */
	int pid;		/* Pid context */
	unsigned long when;	/* When did the operation occur */
};

enum track_item { TRACK_ALLOC, TRACK_FREE };

214
#ifdef CONFIG_SYSFS
Christoph Lameter's avatar
Christoph Lameter committed
215 216
static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
217
static void memcg_propagate_slab_attrs(struct kmem_cache *s);
218
static void sysfs_slab_remove(struct kmem_cache *s);
Christoph Lameter's avatar
Christoph Lameter committed
219
#else
220 221 222
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
							{ return 0; }
223
static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
224
static inline void sysfs_slab_remove(struct kmem_cache *s) { }
Christoph Lameter's avatar
Christoph Lameter committed
225 226
#endif

227
static inline void stat(const struct kmem_cache *s, enum stat_item si)
228 229
{
#ifdef CONFIG_SLUB_STATS
230 231 232 233 234
	/*
	 * The rmw is racy on a preemptible kernel but this is acceptable, so
	 * avoid this_cpu_add()'s irq-disable overhead.
	 */
	raw_cpu_inc(s->cpu_slab->stat[si]);
235 236 237
#endif
}

Christoph Lameter's avatar
Christoph Lameter committed
238 239 240 241
/********************************************************************
 * 			Core slab cache functions
 *******************************************************************/

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
/*
 * Returns freelist pointer (ptr). With hardening, this is obfuscated
 * with an XOR of the address where the pointer is held and a per-cache
 * random number.
 */
static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
				 unsigned long ptr_addr)
{
#ifdef CONFIG_SLAB_FREELIST_HARDENED
	return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
#else
	return ptr;
#endif
}

/* Returns the freelist pointer recorded at location ptr_addr. */
static inline void *freelist_dereference(const struct kmem_cache *s,
					 void *ptr_addr)
{
	return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
			    (unsigned long)ptr_addr);
}

265 266
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
267
	return freelist_dereference(s, object + s->offset);
268 269
}

270 271
static void prefetch_freepointer(const struct kmem_cache *s, void *object)
{
272 273
	if (object)
		prefetch(freelist_dereference(s, object + s->offset));
274 275
}

276 277
static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
{
278
	unsigned long freepointer_addr;
279 280
	void *p;

281 282 283
	if (!debug_pagealloc_enabled())
		return get_freepointer(s, object);

284 285 286
	freepointer_addr = (unsigned long)object + s->offset;
	probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
	return freelist_ptr(s, p, freepointer_addr);
287 288
}

289 290
static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
{
291 292
	unsigned long freeptr_addr = (unsigned long)object + s->offset;

293 294 295 296
#ifdef CONFIG_SLAB_FREELIST_HARDENED
	BUG_ON(object == fp); /* naive detection of double free or corruption */
#endif

297
	*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
298 299 300
}

/* Loop over all objects in a slab */
301
#define for_each_object(__p, __s, __addr, __objects) \
302 303 304
	for (__p = fixup_red_left(__s, __addr); \
		__p < (__addr) + (__objects) * (__s)->size; \
		__p += (__s)->size)
305

306
#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
307 308 309
	for (__p = fixup_red_left(__s, __addr), __idx = 1; \
		__idx <= __objects; \
		__p += (__s)->size, __idx++)
310

311 312 313 314 315 316
/* Determine object index from a given position */
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
{
	return (p - addr) / s->size;
}

317 318 319 320 321
static inline int order_objects(int order, unsigned long size, int reserved)
{
	return ((PAGE_SIZE << order) - reserved) / size;
}

322
static inline struct kmem_cache_order_objects oo_make(int order,
323
		unsigned long size, int reserved)
324 325
{
	struct kmem_cache_order_objects x = {
326
		(order << OO_SHIFT) + order_objects(order, size, reserved)
327 328 329 330 331 332 333
	};

	return x;
}

static inline int oo_order(struct kmem_cache_order_objects x)
{
334
	return x.x >> OO_SHIFT;
335 336 337 338
}

static inline int oo_objects(struct kmem_cache_order_objects x)
{
339
	return x.x & OO_MASK;
340 341
}

342 343 344 345 346
/*
 * Per slab locking using the pagelock
 */
static __always_inline void slab_lock(struct page *page)
{
347
	VM_BUG_ON_PAGE(PageTail(page), page);
348 349 350 351 352
	bit_spin_lock(PG_locked, &page->flags);
}

static __always_inline void slab_unlock(struct page *page)
{
353
	VM_BUG_ON_PAGE(PageTail(page), page);
354 355 356
	__bit_spin_unlock(PG_locked, &page->flags);
}

357 358 359 360 361 362
static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
{
	struct page tmp;
	tmp.counters = counters_new;
	/*
	 * page->counters can cover frozen/inuse/objects as well
363 364
	 * as page->_refcount.  If we assign to ->counters directly
	 * we run the risk of losing updates to page->_refcount, so
365 366 367 368 369 370 371
	 * be careful and only assign to the fields we need.
	 */
	page->frozen  = tmp.frozen;
	page->inuse   = tmp.inuse;
	page->objects = tmp.objects;
}

372 373 374 375 376 377 378
/* Interrupts must be disabled (for the fallback code to work right) */
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
		void *freelist_old, unsigned long counters_old,
		void *freelist_new, unsigned long counters_new,
		const char *n)
{
	VM_BUG_ON(!irqs_disabled());
379 380
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
381
	if (s->flags & __CMPXCHG_DOUBLE) {
382
		if (cmpxchg_double(&page->freelist, &page->counters,
383 384
				   freelist_old, counters_old,
				   freelist_new, counters_new))
385
			return true;
386 387 388 389
	} else
#endif
	{
		slab_lock(page);
390 391
		if (page->freelist == freelist_old &&
					page->counters == counters_old) {
392
			page->freelist = freelist_new;
393
			set_page_slub_counters(page, counters_new);
394
			slab_unlock(page);
395
			return true;
396 397 398 399 400 401 402 403
		}
		slab_unlock(page);
	}

	cpu_relax();
	stat(s, CMPXCHG_DOUBLE_FAIL);

#ifdef SLUB_DEBUG_CMPXCHG
404
	pr_info("%s %s: cmpxchg double redo ", n, s->name);
405 406
#endif

407
	return false;
408 409
}

410 411 412 413 414
static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
		void *freelist_old, unsigned long counters_old,
		void *freelist_new, unsigned long counters_new,
		const char *n)
{
415 416
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
417
	if (s->flags & __CMPXCHG_DOUBLE) {
418
		if (cmpxchg_double(&page->freelist, &page->counters,
419 420
				   freelist_old, counters_old,
				   freelist_new, counters_new))
421
			return true;
422 423 424
	} else
#endif
	{
425 426 427
		unsigned long flags;

		local_irq_save(flags);
428
		slab_lock(page);
429 430
		if (page->freelist == freelist_old &&
					page->counters == counters_old) {
431
			page->freelist = freelist_new;
432
			set_page_slub_counters(page, counters_new);
433
			slab_unlock(page);
434
			local_irq_restore(flags);
435
			return true;
436
		}
437
		slab_unlock(page);
438
		local_irq_restore(flags);
439 440 441 442 443 444
	}

	cpu_relax();
	stat(s, CMPXCHG_DOUBLE_FAIL);

#ifdef SLUB_DEBUG_CMPXCHG
445
	pr_info("%s %s: cmpxchg double redo ", n, s->name);
446 447
#endif

448
	return false;
449 450
}

451
#ifdef CONFIG_SLUB_DEBUG
452 453 454
/*
 * Determine a map of object in use on a page.
 *
455
 * Node listlock must be held to guarantee that the page does
456 457 458 459 460 461 462 463 464 465 466
 * not vanish from under us.
 */
static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
{
	void *p;
	void *addr = page_address(page);

	for (p = page->freelist; p; p = get_freepointer(s, p))
		set_bit(slab_index(p, s, addr), map);
}

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
static inline int size_from_object(struct kmem_cache *s)
{
	if (s->flags & SLAB_RED_ZONE)
		return s->size - s->red_left_pad;

	return s->size;
}

static inline void *restore_red_left(struct kmem_cache *s, void *p)
{
	if (s->flags & SLAB_RED_ZONE)
		p -= s->red_left_pad;

	return p;
}

483 484 485
/*
 * Debug settings:
 */
486
#if defined(CONFIG_SLUB_DEBUG_ON)
487 488
static int slub_debug = DEBUG_DEFAULT_FLAGS;
#else
489
static int slub_debug;
490
#endif
491 492

static char *slub_debug_slabs;
493
static int disable_higher_order_debug;
494

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
/*
 * slub is about to manipulate internal object metadata.  This memory lies
 * outside the range of the allocated object, so accessing it would normally
 * be reported by kasan as a bounds error.  metadata_access_enable() is used
 * to tell kasan that these accesses are OK.
 */
static inline void metadata_access_enable(void)
{
	kasan_disable_current();
}

static inline void metadata_access_disable(void)
{
	kasan_enable_current();
}

Christoph Lameter's avatar
Christoph Lameter committed
511 512 513
/*
 * Object debugging
 */
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533

/* Verify that a pointer has an address that is valid within a slab page */
static inline int check_valid_pointer(struct kmem_cache *s,
				struct page *page, void *object)
{
	void *base;

	if (!object)
		return 1;

	base = page_address(page);
	object = restore_red_left(s, object);
	if (object < base || object >= base + page->objects * s->size ||
		(object - base) % s->size) {
		return 0;
	}

	return 1;
}

534 535
static void print_section(char *level, char *text, u8 *addr,
			  unsigned int length)
Christoph Lameter's avatar
Christoph Lameter committed
536
{
537
	metadata_access_enable();
538
	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
539
			length, 1);
540
	metadata_access_disable();
Christoph Lameter's avatar
Christoph Lameter committed
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
}

static struct track *get_track(struct kmem_cache *s, void *object,
	enum track_item alloc)
{
	struct track *p;

	if (s->offset)
		p = object + s->offset + sizeof(void *);
	else
		p = object + s->inuse;

	return p + alloc;
}

static void set_track(struct kmem_cache *s, void *object,
557
			enum track_item alloc, unsigned long addr)
Christoph Lameter's avatar
Christoph Lameter committed
558
{
Akinobu Mita's avatar
Akinobu Mita committed
559
	struct track *p = get_track(s, object, alloc);
Christoph Lameter's avatar
Christoph Lameter committed
560 561

	if (addr) {
562 563 564 565 566 567 568 569
#ifdef CONFIG_STACKTRACE
		struct stack_trace trace;
		int i;

		trace.nr_entries = 0;
		trace.max_entries = TRACK_ADDRS_COUNT;
		trace.entries = p->addrs;
		trace.skip = 3;
570
		metadata_access_enable();
571
		save_stack_trace(&trace);
572
		metadata_access_disable();
573 574 575 576 577 578 579 580 581

		/* See rant in lockdep.c */
		if (trace.nr_entries != 0 &&
		    trace.entries[trace.nr_entries - 1] == ULONG_MAX)
			trace.nr_entries--;

		for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
			p->addrs[i] = 0;
#endif
Christoph Lameter's avatar
Christoph Lameter committed
582 583
		p->addr = addr;
		p->cpu = smp_processor_id();
584
		p->pid = current->pid;
Christoph Lameter's avatar
Christoph Lameter committed
585 586 587 588 589 590 591
		p->when = jiffies;
	} else
		memset(p, 0, sizeof(struct track));
}

static void init_tracking(struct kmem_cache *s, void *object)
{
592 593 594
	if (!(s->flags & SLAB_STORE_USER))
		return;

595 596
	set_track(s, object, TRACK_FREE, 0UL);
	set_track(s, object, TRACK_ALLOC, 0UL);
Christoph Lameter's avatar
Christoph Lameter committed
597 598 599 600 601 602 603
}

static void print_track(const char *s, struct track *t)
{
	if (!t->addr)
		return;

604 605
	pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
	       s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
606 607 608 609 610
#ifdef CONFIG_STACKTRACE
	{
		int i;
		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
			if (t->addrs[i])
611
				pr_err("\t%pS\n", (void *)t->addrs[i]);
612 613 614 615
			else
				break;
	}
#endif
616 617 618 619 620 621 622 623 624 625 626 627 628
}

static void print_tracking(struct kmem_cache *s, void *object)
{
	if (!(s->flags & SLAB_STORE_USER))
		return;

	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
	print_track("Freed", get_track(s, object, TRACK_FREE));
}

static void print_page_info(struct page *page)
{
629
	pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
630
	       page, page->objects, page->inuse, page->freelist, page->flags);
631 632 633 634 635

}

static void slab_bug(struct kmem_cache *s, char *fmt, ...)
{
636
	struct va_format vaf;
637 638 639
	va_list args;

	va_start(args, fmt);
640 641
	vaf.fmt = fmt;
	vaf.va = &args;
642
	pr_err("=============================================================================\n");
643
	pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
644
	pr_err("-----------------------------------------------------------------------------\n\n");
645

646
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
647
	va_end(args);
Christoph Lameter's avatar
Christoph Lameter committed
648 649
}

650 651
static void slab_fix(struct kmem_cache *s, char *fmt, ...)
{
652
	struct va_format vaf;
653 654 655
	va_list args;

	va_start(args, fmt);
656 657 658
	vaf.fmt = fmt;
	vaf.va = &args;
	pr_err("FIX %s: %pV\n", s->name, &vaf);
659 660 661 662
	va_end(args);
}

static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
Christoph Lameter's avatar
Christoph Lameter committed
663 664
{
	unsigned int off;	/* Offset of last byte */
665
	u8 *addr = page_address(page);
666 667 668 669 670

	print_tracking(s, p);

	print_page_info(page);

671 672
	pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
	       p, p - addr, get_freepointer(s, p));
673

674
	if (s->flags & SLAB_RED_ZONE)
675 676
		print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
			      s->red_left_pad);
677
	else if (p > addr + 16)
678
		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
Christoph Lameter's avatar
Christoph Lameter committed
679

680 681
	print_section(KERN_ERR, "Object ", p,
		      min_t(unsigned long, s->object_size, PAGE_SIZE));
Christoph Lameter's avatar
Christoph Lameter committed
682
	if (s->flags & SLAB_RED_ZONE)
683
		print_section(KERN_ERR, "Redzone ", p + s->object_size,
684
			s->inuse - s->object_size);
Christoph Lameter's avatar
Christoph Lameter committed
685 686 687 688 689 690

	if (s->offset)
		off = s->offset + sizeof(void *);
	else
		off = s->inuse;

691
	if (s->flags & SLAB_STORE_USER)
Christoph Lameter's avatar
Christoph Lameter committed
692 693
		off += 2 * sizeof(struct track);

694 695
	off += kasan_metadata_size(s);

696
	if (off != size_from_object(s))
Christoph Lameter's avatar
Christoph Lameter committed
697
		/* Beginning of the filler is the free pointer */
698 699
		print_section(KERN_ERR, "Padding ", p + off,
			      size_from_object(s) - off);
700 701

	dump_stack();
Christoph Lameter's avatar
Christoph Lameter committed
702 703
}

704
void object_err(struct kmem_cache *s, struct page *page,
Christoph Lameter's avatar
Christoph Lameter committed
705 706
			u8 *object, char *reason)
{
707
	slab_bug(s, "%s", reason);
708
	print_trailer(s, page, object);
Christoph Lameter's avatar
Christoph Lameter committed
709 710
}

711
static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
712
			const char *fmt, ...)
Christoph Lameter's avatar
Christoph Lameter committed
713 714 715 716
{
	va_list args;
	char buf[100];

717 718
	va_start(args, fmt);
	vsnprintf(buf, sizeof(buf), fmt, args);
Christoph Lameter's avatar
Christoph Lameter committed
719
	va_end(args);
720
	slab_bug(s, "%s", buf);
721
	print_page_info(page);
Christoph Lameter's avatar
Christoph Lameter committed
722 723 724
	dump_stack();
}

725
static void init_object(struct kmem_cache *s, void *object, u8 val)
Christoph Lameter's avatar
Christoph Lameter committed
726 727 728
{
	u8 *p = object;

729 730 731
	if (s->flags & SLAB_RED_ZONE)
		memset(p - s->red_left_pad, val, s->red_left_pad);

Christoph Lameter's avatar
Christoph Lameter committed
732
	if (s->flags & __OBJECT_POISON) {
733 734
		memset(p, POISON_FREE, s->object_size - 1);
		p[s->object_size - 1] = POISON_END;
Christoph Lameter's avatar
Christoph Lameter committed
735 736 737
	}

	if (s->flags & SLAB_RED_ZONE)
738
		memset(p + s->object_size, val, s->inuse - s->object_size);
Christoph Lameter's avatar
Christoph Lameter committed
739 740
}

741 742 743 744 745 746 747 748 749
static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
						void *from, void *to)
{
	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
	memset(from, data, to - from);
}

static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
			u8 *object, char *what,
750
			u8 *start, unsigned int value, unsigned int bytes)
751 752 753 754
{
	u8 *fault;
	u8 *end;

755
	metadata_access_enable();
756
	fault = memchr_inv(start, value, bytes);
757
	metadata_access_disable();
758 759 760 761 762 763 764 765
	if (!fault)
		return 1;

	end = start + bytes;
	while (end > fault && end[-1] == value)
		end--;

	slab_bug(s, "%s overwritten", what);
766
	pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
767 768 769 770 771
					fault, end - 1, fault[0], value);
	print_trailer(s, page, object);

	restore_bytes(s, what, value, fault, end);
	return 0;
Christoph Lameter's avatar
Christoph Lameter committed
772 773 774 775 776 777 778 779 780
}

/*
 * Object layout:
 *
 * object address
 * 	Bytes of the object to be managed.
 * 	If the freepointer may overlay the object then the free
 * 	pointer is the first word of the object.
781
 *
Christoph Lameter's avatar
Christoph Lameter committed
782 783 784
 * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
 * 	0xa5 (POISON_END)
 *
785
 * object + s->object_size
Christoph Lameter's avatar
Christoph Lameter committed
786
 * 	Padding to reach word boundary. This is also used for Redzoning.
787
 * 	Padding is extended by another word if Redzoning is enabled and
788
 * 	object_size == inuse.
789
 *
Christoph Lameter's avatar
Christoph Lameter committed
790 791 792 793
 * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
 * 	0xcc (RED_ACTIVE) for objects in use.
 *
 * object + s->inuse
794 795
 * 	Meta data starts here.
 *
Christoph Lameter's avatar
Christoph Lameter committed
796 797
 * 	A. Free pointer (if we cannot overwrite object on free)
 * 	B. Tracking data for SLAB_STORE_USER
798
 * 	C. Padding to reach required alignment boundary or at mininum
799
 * 		one word if debugging is on to be able to detect writes
800 801 802
 * 		before the word boundary.
 *
 *	Padding is done using 0x5a (POISON_INUSE)
Christoph Lameter's avatar
Christoph Lameter committed
803 804
 *
 * object + s->size
805
 * 	Nothing is used beyond s->size.
Christoph Lameter's avatar
Christoph Lameter committed
806
 *
807
 * If slabcaches are merged then the object_size and inuse boundaries are mostly
808
 * ignored. And therefore no slab options that rely on these boundaries
Christoph Lameter's avatar
Christoph Lameter committed
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823
 * may be used with merged slabcaches.
 */

static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
{
	unsigned long off = s->inuse;	/* The end of info */

	if (s->offset)
		/* Freepointer is placed after the object. */
		off += sizeof(void *);

	if (s->flags & SLAB_STORE_USER)
		/* We also have user information there */
		off += 2 * sizeof(struct track);

824 825
	off += kasan_metadata_size(s);

826
	if (size_from_object(s) == off)
Christoph Lameter's avatar
Christoph Lameter committed
827 828
		return 1;

829
	return check_bytes_and_report(s, page, p, "Object padding",
830
			p + off, POISON_INUSE, size_from_object(s) - off);
Christoph Lameter's avatar
Christoph Lameter committed
831 832
}

833
/* Check the pad bytes at the end of a slab page */
Christoph Lameter's avatar
Christoph Lameter committed
834 835
static int slab_pad_check(struct kmem_cache *s, struct page *page)
{
836 837 838 839 840
	u8 *start;
	u8 *fault;
	u8 *end;
	int length;
	int remainder;
Christoph Lameter's avatar
Christoph Lameter committed
841 842 843 844

	if (!(s->flags & SLAB_POISON))
		return 1;

845
	start = page_address(page);
846
	length = (PAGE_SIZE << compound_order(page)) - s->reserved;
847 848
	end = start + length;
	remainder = length % s->size;
Christoph Lameter's avatar
Christoph Lameter committed
849 850 851
	if (!remainder)
		return 1;

852
	metadata_access_enable();
853
	fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
854
	metadata_access_disable();
855 856 857 858 859 860
	if (!fault)
		return 1;
	while (end > fault && end[-1] == POISON_INUSE)
		end--;

	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
861
	print_section(KERN_ERR, "Padding ", end - remainder, remainder);
862

863
	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
864
	return 0;
Christoph Lameter's avatar
Christoph Lameter committed
865 866 867
}

static int check_object(struct kmem_cache *s, struct page *page,
868
					void *object, u8 val)
Christoph Lameter's avatar
Christoph Lameter committed
869 870
{
	u8 *p = object;
871
	u8 *endobject = object + s->object_size;
Christoph Lameter's avatar
Christoph Lameter committed
872 873

	if (s->flags & SLAB_RED_ZONE) {
874 875 876 877
		if (!check_bytes_and_report(s, page, object, "Redzone",
			object - s->red_left_pad, val, s->red_left_pad))
			return 0;

878
		if (!check_bytes_and_report(s, page, object, "Redzone",
879
			endobject, val, s->inuse - s->object_size))
Christoph Lameter's avatar
Christoph Lameter committed
880 881
			return 0;
	} else {
882
		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
883
			check_bytes_and_report(s, page, p, "Alignment padding",
884 885
				endobject, POISON_INUSE,
				s->inuse - s->object_size);
886
		}
Christoph Lameter's avatar
Christoph Lameter committed
887 888 889
	}

	if (s->flags & SLAB_POISON) {
890
		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
891
			(!check_bytes_and_report(s, page, p, "Poison", p,
892
					POISON_FREE, s->object_size - 1) ||
893
			 !check_bytes_and_report(s, page, p, "Poison",
894
				p + s->object_size - 1, POISON_END, 1)))
Christoph Lameter's avatar
Christoph Lameter committed
895 896 897 898 899 900 901
			return 0;
		/*
		 * check_pad_bytes cleans up on its own.
		 */
		check_pad_bytes(s, page, p);
	}

902
	if (!s->offset && val == SLUB_RED_ACTIVE)
Christoph Lameter's avatar
Christoph Lameter committed
903 904 905 906 907 908 909 910 911 912
		/*
		 * Object and freepointer overlap. Cannot check
		 * freepointer while object is allocated.
		 */
		return 1;

	/* Check free pointer validity */
	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
		object_err(s, page, p, "Freepointer corrupt");
		/*
913
		 * No choice but to zap it and thus lose the remainder
Christoph Lameter's avatar
Christoph Lameter committed
914
		 * of the free objects in this slab. May cause
915
		 * another error because the object count is now wrong.
Christoph Lameter's avatar
Christoph Lameter committed
916
		 */
917
		set_freepointer(s, p, NULL);
Christoph Lameter's avatar
Christoph Lameter committed
918 919 920 921 922 923 924
		return 0;
	}
	return 1;
}

static int check_slab(struct kmem_cache *s, struct page *page)
{
925 926
	int maxobj;

Christoph Lameter's avatar
Christoph Lameter committed
927 928 929
	VM_BUG_ON(!irqs_disabled());

	if (!PageSlab(page)) {
930
		slab_err(s, page, "Not a valid slab page");
Christoph Lameter's avatar
Christoph Lameter committed
931 932
		return 0;
	}
933

934
	maxobj = order_objects(compound_order(page), s->size, s->reserved);
935 936
	if (page->objects > maxobj) {
		slab_err(s, page, "objects %u > max %u",
937
			page->objects, maxobj);
938 939 940
		return 0;
	}
	if (page->inuse > page->objects) {
941
		slab_err(s, page, "inuse %u > max %u",
942
			page->inuse, page->objects);
Christoph Lameter's avatar
Christoph Lameter committed
943 944 945 946 947 948 949 950
		return 0;
	}
	/* Slab_pad_check fixes things up after itself */
	slab_pad_check(s, page);
	return 1;
}

/*
951 952
 * Determine if a certain object on a page is on the freelist. Must hold the
 * slab lock to guarantee that the chains are in a consistent state.
Christoph Lameter's avatar
Christoph Lameter committed
953 954 955 956
 */
static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
{
	int nr = 0;
957
	void *fp;
Christoph Lameter's avatar
Christoph Lameter committed
958
	void *object = NULL;
959
	int max_objects;
Christoph Lameter's avatar
Christoph Lameter committed
960

961
	fp = page->freelist;
962
	while (fp && nr <= page->objects) {
Christoph Lameter's avatar
Christoph Lameter committed
963 964 965 966 967 968
		if (fp == search)
			return 1;
		if (!check_valid_pointer(s, page, fp)) {
			if (object) {
				object_err(s, page, object,
					"Freechain corrupt");
969
				set_freepointer(s, object, NULL);
Christoph Lameter's avatar
Christoph Lameter committed
970
			} else {
971
				slab_err(s, page, "Freepointer corrupt");
972
				page->freelist = NULL;
973
				page->inuse = page->objects;
974
				slab_fix(s, "Freelist cleared");
Christoph Lameter's avatar
Christoph Lameter committed
975 976 977 978 979 980 981 982 983
				return 0;
			}
			break;
		}
		object = fp;
		fp = get_freepointer(s, object);
		nr++;
	}

984
	max_objects = order_objects(compound_order(page), s->size, s->reserved);
985 986
	if (max_objects > MAX_OBJS_PER_PAGE)
		max_objects = MAX_OBJS_PER_PAGE;
987 988

	if (page->objects != max_objects) {
Joe Perches's avatar
Joe Perches committed
989 990
		slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
			 page->objects, max_objects);
991 992 993
		page->objects = max_objects;
		slab_fix(s, "Number of objects adjusted.");
	}
994
	if (page->inuse != page->objects - nr) {
Joe Perches's avatar
Joe Perches committed
995 996
		slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
			 page->inuse, page->objects - nr);
997
		page->inuse = page->objects - nr;
998
		slab_fix(s, "Object count adjusted.");
Christoph Lameter's avatar
Christoph Lameter committed
999 1000 1001 1002
	}
	return search == NULL;
}

1003 1004
static void trace(struct kmem_cache *s, struct page *page, void *object,
								int alloc)
1005 1006
{
	if (s->flags & SLAB_TRACE) {
1007
		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1008 1009 1010 1011 1012 1013
			s->name,
			alloc ? "alloc" : "free",
			object, page->inuse,
			page->freelist);

		if (!alloc)
1014
			print_section(KERN_INFO, "Object ", (void *)object,
1015
					s->object_size);
1016 1017 1018 1019 1020

		dump_stack();
	}
}

1021
/*
1022
 * Tracking of fully allocated slabs for debugging purposes.
1023
 */
1024 1025
static void add_full(struct kmem_cache *s,
	struct kmem_cache_node *n, struct page *page)
1026
{
1027 1028 1029
	if (!(s->flags & SLAB_STORE_USER))
		return;

1030
	lockdep_assert_held(&n->list_lock);
1031 1032 1033
	list_add(&page->lru, &n->full);
}

1034
static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1035 1036 1037 1038
{
	if (!(s->flags & SLAB_STORE_USER))
		return;

1039
	lockdep_assert_held(&n->list_lock);
1040 1041 1042
	list_del(&page->lru);
}

1043 1044 1045 1046 1047 1048 1049 1050
/* Tracking of the number of slabs for debugging purposes */
static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{
	struct kmem_cache_node *n = get_node(s, node);

	return atomic_long_read(&n->nr_slabs);
}

1051 1052 1053 1054 1055
static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
{
	return atomic_long_read(&n->nr_slabs);
}

1056
static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1057 1058 1059 1060 1061 1062 1063 1064 1065
{
	struct kmem_cache_node *n = get_node(s, node);

	/*
	 * May be called early in order to allocate a slab for the
	 * kmem_cache_node structure. Solve the chicken-egg
	 * dilemma by deferring the increment of the count during
	 * bootstrap (see early_kmem_cache_node_alloc).
	 */
1066
	if (likely(n)) {
1067
		atomic_long_inc(&n->nr_slabs);
1068 1069
		atomic_long_add(objects, &n->total_objects);
	}
1070
}
1071
static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1072 1073 1074 1075
{
	struct kmem_cache_node *n = get_node(s, node);

	atomic_long_dec(&n->nr_slabs);
1076
	atomic_long_sub(objects, &n->total_objects);
1077 1078 1079
}

/* Object debug checks for alloc/free paths */
1080 1081 1082 1083 1084 1085
static void setup_object_debug(struct kmem_cache *s, struct page *page,
								void *object)
{
	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
		return;

1086
	init_object(s, object, SLUB_RED_INACTIVE);
1087 1088 1089
	init_tracking(s, object);
}

1090
static inline int alloc_consistency_checks(struct kmem_cache *s,
1091
					struct page *page,
1092
					void *object, unsigned long addr)
Christoph Lameter's avatar
Christoph Lameter committed
1093 1094
{
	if (!check_slab(s, page))
1095
		return 0;
Christoph Lameter's avatar
Christoph Lameter committed
1096 1097 1098

	if (!check_valid_pointer(s, page, object)) {
		object_err(s, page, object, "Freelist Pointer check fails");
1099
		return 0;
Christoph Lameter's avatar
Christoph Lameter committed
1100 1101
	}

1102
	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
		return 0;

	return 1;
}

static noinline int alloc_debug_processing(struct kmem_cache *s,
					struct page *page,
					void *object, unsigned long addr)
{
	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
		if (!alloc_consistency_checks(s, page, object, addr))
			goto bad;
	}
Christoph Lameter's avatar
Christoph Lameter committed
1116

1117 1118 1119 1120
	/* Success perform special debug activities for allocs */
	if (s->flags & SLAB_STORE_USER)
		set_track(s, object, TRACK_ALLOC, addr);
	trace(s, page, object, 1);
1121
	init_object(s, object, SLUB_RED_ACTIVE);
Christoph Lameter's avatar
Christoph Lameter committed
1122
	return 1;
1123

Christoph Lameter's avatar
Christoph Lameter committed
1124 1125 1126 1127 1128
bad:
	if (PageSlab(page)) {
		/*
		 * If this is a slab page then lets do the best we can
		 * to avoid issues in the future. Marking all objects
1129
		 * as used avoids touching the remaining objects.
Christoph Lameter's avatar
Christoph Lameter committed
1130
		 */
1131
		slab_fix(s, "Marking all objects used");
1132
		page->inuse = page->objects;
1133
		page->freelist = NULL;
Christoph Lameter's avatar
Christoph Lameter committed
1134 1135 1136 1137
	}
	return 0;
}

1138 1139
static inline int free_consistency_checks(struct kmem_cache *s,
		struct page *page, void *object, unsigned long addr)
Christoph Lameter's avatar
Christoph Lameter committed
1140 1141
{
	if (!check_valid_pointer(s, page, object)) {
1142
		slab_err(s, page, "Invalid object pointer 0x%p", object);
1143
		return 0;
Christoph Lameter's avatar
Christoph Lameter committed
1144 1145 1146
	}

	if (on_freelist(s, page, object)) {
1147
		object_err(s, page, object, "Object already free");
1148
		return 0;
Christoph Lameter's avatar
Christoph Lameter committed
1149 1150
	}

1151
	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1152
		return 0;
Christoph Lameter's avatar
Christoph Lameter committed
1153

1154
	if (unlikely(s != page->slab_cache)) {
1155
		if (!PageSlab(page)) {
Joe Perches's avatar
Joe Perches committed
1156 1157
			slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
				 object);
1158
		} else if (!page->slab_cache) {
1159 1160
			pr_err("SLUB <none>: no slab for object 0x%p.\n",
			       object);
1161
			dump_stack();
1162
		} else
1163 1164
			object_err(s, page, object,
					"page slab pointer corrupt.");
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
		return 0;
	}
	return 1;
}

/* Supports checking bulk free of a constructed freelist */
static noinline int free_debug_processing(
	struct kmem_cache *s, struct page *page,
	void *head, void *tail, int bulk_cnt,
	unsigned long addr)
{
	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
	void *object = head;
	int cnt = 0;
	unsigned long uninitialized_var(flags);
	int ret = 0;

	spin_lock_irqsave(&n->list_lock, flags);
	slab_lock(page);

	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
		if (!check_slab(s, page))
			goto out;
	}

next_object:
	cnt++;

	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
		if (!free_consistency_checks(s, page, object, addr))
			goto out;
Christoph Lameter's avatar
Christoph Lameter committed
1196
	}
1197 1198 1199 1200

	if (s->flags & SLAB_STORE_USER)
		set_track(s, object, TRACK_FREE, addr);
	trace(s, page, object, 0);
1201
	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1202
	init_object(s, object, SLUB_RED_INACTIVE);
1203 1204 1205 1206 1207 1208

	/* Reached end of constructed freelist yet? */
	if (object != tail) {
		object = get_freepointer(s, object);
		goto next_object;
	}
1209 1210
	ret = 1;

1211
out:
1212 1213 1214 1215
	if (cnt != bulk_cnt)
		slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
			 bulk_cnt, cnt);

1216
	slab_unlock(page);
1217
	spin_unlock_irqrestore(&n->list_lock, flags);
1218 1219 1220
	if (!ret)
		slab_fix(s, "Object at 0x%p not freed", object);
	return ret;
Christoph Lameter's avatar
Christoph Lameter committed
1221 1222
}

1223 1224
static int __init setup_slub_debug(char *str)
{
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
	slub_debug = DEBUG_DEFAULT_FLAGS;
	if (*str++ != '=' || !*str)
		/*
		 * No options specified. Switch on full debugging.
		 */
		goto out;

	if (*str == ',')
		/*
		 * No options but restriction on slabs. This means full
		 * debugging for slabs matching a pattern.
		 */
		goto check_slabs;

	slub_debug = 0;
	if (*str == '-')
		/*
		 * Switch off all debugging measures.
		 */
		goto out;

	/*
	 * Determine which debug features should be switched on
	 */
1249
	for (; *str && *str != ','; str++) {
1250 1251
		switch (tolower(*str)) {
		case 'f':
1252
			slub_debug |= SLAB_CONSISTENCY_CHECKS;
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
			break;
		case 'z':
			slub_debug |= SLAB_RED_ZONE;
			break;
		case 'p':
			slub_debug |= SLAB_POISON;
			break;
		case 'u':
			slub_debug |= SLAB_STORE_USER;
			break;
		case 't':
			slub_debug |= SLAB_TRACE;
			break;
1266 1267 1268
		case 'a':
			slub_debug |= SLAB_FAILSLAB;
			break;
1269 1270 1271 1272 1273 1274 1275
		case 'o':
			/*
			 * Avoid enabling debugging on caches if its minimum
			 * order would increase as a result.
			 */
			disable_higher_order_debug = 1;
			break;
1276
		default:
1277 1278
			pr_err("slub_debug option '%c' unknown. skipped\n",
			       *str);
1279
		}
1280 1281
	}

1282
check_slabs:
1283 1284
	if (*str == ',')
		slub_debug_slabs = str + 1;
1285
out:
1286 1287 1288 1289 1290
	return 1;
}

__setup("slub_debug", setup_slub_debug);

1291
unsigned long kmem_cache_flags(unsigned long object_size,
1292
	unsigned long flags, const char *name,
1293
	void (*ctor)(void *))
1294 1295
{
	/*
1296
	 * Enable debugging if selected on the kernel commandline.
1297
	 */
1298 1299
	if (slub_debug && (!slub_debug_slabs || (name &&
		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
1300
		flags |= slub_debug;
1301 1302

	return flags;
1303
}
1304
#else /* !CONFIG_SLUB_DEBUG */
1305 1306
static inline void setup_object_debug(struct kmem_cache *s,
			struct page *page, void *object) {}
1307

1308
static inline int alloc_debug_processing(struct kmem_cache *s,
1309
	struct page *page, void *object, unsigned long addr) { return 0; }
1310

1311
static inline int free_debug_processing(
1312 1313
	struct kmem_cache *s, struct page *page,
	void *head, void *tail, int bulk_cnt,
1314
	unsigned long addr) { return 0; }