slub.c 141 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Christoph Lameter's avatar
Christoph Lameter committed
2 3 4 5
/*
 * SLUB: A slab allocator that limits cache line use instead of queuing
 * objects in per cpu and per node lists.
 *
6 7
 * The allocator synchronizes using per slab locks or atomic operatios
 * and only uses a centralized lock to manage a pool of partial slabs.
Christoph Lameter's avatar
Christoph Lameter committed
8
 *
Christoph Lameter's avatar
Christoph Lameter committed
9
 * (C) 2007 SGI, Christoph Lameter
10
 * (C) 2011 Linux Foundation, Christoph Lameter
Christoph Lameter's avatar
Christoph Lameter committed
11 12 13
 */

#include <linux/mm.h>
Nick Piggin's avatar
Nick Piggin committed
14
#include <linux/swap.h> /* struct reclaim_state */
Christoph Lameter's avatar
Christoph Lameter committed
15 16 17 18 19
#include <linux/module.h>
#include <linux/bit_spinlock.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/slab.h>
20
#include "slab.h"
21
#include <linux/proc_fs.h>
22
#include <linux/notifier.h>
Christoph Lameter's avatar
Christoph Lameter committed
23
#include <linux/seq_file.h>
24
#include <linux/kasan.h>
Christoph Lameter's avatar
Christoph Lameter committed
25 26 27 28
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
#include <linux/ctype.h>
29
#include <linux/debugobjects.h>
Christoph Lameter's avatar
Christoph Lameter committed
30
#include <linux/kallsyms.h>
31
#include <linux/memory.h>
Roman Zippel's avatar
Roman Zippel committed
32
#include <linux/math64.h>
Akinobu Mita's avatar
Akinobu Mita committed
33
#include <linux/fault-inject.h>
34
#include <linux/stacktrace.h>
35
#include <linux/prefetch.h>
36
#include <linux/memcontrol.h>
37
#include <linux/random.h>
Christoph Lameter's avatar
Christoph Lameter committed
38

39 40
#include <trace/events/kmem.h>

41 42
#include "internal.h"

Christoph Lameter's avatar
Christoph Lameter committed
43 44
/*
 * Lock order:
45
 *   1. slab_mutex (Global Mutex)
46 47
 *   2. node->list_lock
 *   3. slab_lock(page) (Only on some arches and for debugging)
Christoph Lameter's avatar
Christoph Lameter committed
48
 *
49
 *   slab_mutex
50
 *
51
 *   The role of the slab_mutex is to protect the list of all the slabs
52 53 54 55 56 57 58 59 60 61 62 63 64 65
 *   and to synchronize major metadata changes to slab cache structures.
 *
 *   The slab_lock is only used for debugging and on arches that do not
 *   have the ability to do a cmpxchg_double. It only protects the second
 *   double word in the page struct. Meaning
 *	A. page->freelist	-> List of object free in a page
 *	B. page->counters	-> Counters of objects
 *	C. page->frozen		-> frozen state
 *
 *   If a slab is frozen then it is exempt from list management. It is not
 *   on any list. The processor that froze the slab is the one who can
 *   perform list operations on the page. Other processors may put objects
 *   onto the freelist but the processor that froze the slab is the only
 *   one that can retrieve the objects from the page's freelist.
Christoph Lameter's avatar
Christoph Lameter committed
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
 *
 *   The list_lock protects the partial and full list on each node and
 *   the partial slab counter. If taken then no new slabs may be added or
 *   removed from the lists nor make the number of partial slabs be modified.
 *   (Note that the total number of slabs is an atomic value that may be
 *   modified without taking the list lock).
 *
 *   The list_lock is a centralized lock and thus we avoid taking it as
 *   much as possible. As long as SLUB does not have to handle partial
 *   slabs, operations can continue without any centralized lock. F.e.
 *   allocating a long series of objects that fill up slabs does not require
 *   the list lock.
 *   Interrupts are disabled during allocation and deallocation in order to
 *   make the slab allocator safe to use in the context of an irq. In addition
 *   interrupts are disabled to ensure that the processor does not change
 *   while handling per_cpu slabs, due to kernel preemption.
 *
 * SLUB assigns one slab for allocation to each processor.
 * Allocations only occur from these slabs called cpu slabs.
 *
86 87
 * Slabs with free elements are kept on a partial list and during regular
 * operations no list for full slabs is used. If an object in a full slab is
Christoph Lameter's avatar
Christoph Lameter committed
88
 * freed then the slab will show up again on the partial lists.
89 90
 * We track full slabs for debugging purposes though because otherwise we
 * cannot scan all objects.
Christoph Lameter's avatar
Christoph Lameter committed
91 92 93 94 95 96 97
 *
 * Slabs are freed when they become empty. Teardown and setup is
 * minimal so we rely on the page allocators per cpu caches for
 * fast frees and allocs.
 *
 * Overloading of page flags that are otherwise used for LRU management.
 *
98 99 100 101 102 103 104 105 106 107 108 109
 * PageActive 		The slab is frozen and exempt from list processing.
 * 			This means that the slab is dedicated to a purpose
 * 			such as satisfying allocations for a specific
 * 			processor. Objects may be freed in the slab while
 * 			it is frozen but slab_free will then skip the usual
 * 			list operations. It is up to the processor holding
 * 			the slab to integrate the slab into the slab lists
 * 			when the slab is no longer needed.
 *
 * 			One use of this flag is to mark slabs that are
 * 			used for allocations. Then such a slab becomes a cpu
 * 			slab. The cpu slab may be equipped with an additional
110
 * 			freelist that allows lockless access to
111 112
 * 			free objects in addition to the regular freelist
 * 			that requires the slab lock.
Christoph Lameter's avatar
Christoph Lameter committed
113 114 115
 *
 * PageError		Slab requires special handling due to debug
 * 			options set. This moves	slab handling out of
116
 * 			the fast path and disables lockless freelists.
Christoph Lameter's avatar
Christoph Lameter committed
117 118
 */

119 120
static inline int kmem_cache_debug(struct kmem_cache *s)
{
121
#ifdef CONFIG_SLUB_DEBUG
122
	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
123
#else
124
	return 0;
125
#endif
126
}
127

128
void *fixup_red_left(struct kmem_cache *s, void *p)
129 130 131 132 133 134 135
{
	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
		p += s->red_left_pad;

	return p;
}

136 137 138 139 140 141 142 143 144
static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_CPU_PARTIAL
	return !kmem_cache_debug(s);
#else
	return false;
#endif
}

Christoph Lameter's avatar
Christoph Lameter committed
145 146 147 148 149 150 151 152 153 154 155
/*
 * Issues still to be resolved:
 *
 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
 *
 * - Variable sizing of the per node arrays
 */

/* Enable to test recovery from slab corruption on boot */
#undef SLUB_RESILIENCY_TEST

156 157 158
/* Enable to log cmpxchg failures */
#undef SLUB_DEBUG_CMPXCHG

159 160 161 162
/*
 * Mininum number of partial slabs. These will be left on the partial
 * lists even if they are empty. kmem_cache_shrink may reclaim them.
 */
163
#define MIN_PARTIAL 5
164

165 166 167
/*
 * Maximum number of desirable partial slabs.
 * The existence of more partial slabs makes kmem_cache_shrink
168
 * sort the partial list by the number of objects in use.
169 170 171
 */
#define MAX_PARTIAL 10

172
#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
Christoph Lameter's avatar
Christoph Lameter committed
173
				SLAB_POISON | SLAB_STORE_USER)
174

175 176 177 178 179 180 181 182
/*
 * These debug flags cannot use CMPXCHG because there might be consistency
 * issues when checking or reading debug information
 */
#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
				SLAB_TRACE)


183
/*
184 185 186
 * Debugging flags that require metadata to be stored in the slab.  These get
 * disabled when slub_debug=O is used and a cache's min order increases with
 * metadata.
187
 */
188
#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
189

190 191
#define OO_SHIFT	16
#define OO_MASK		((1 << OO_SHIFT) - 1)
192
#define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
193

Christoph Lameter's avatar
Christoph Lameter committed
194
/* Internal SLUB flags */
195
/* Poison object */
196
#define __OBJECT_POISON		((slab_flags_t __force)0x80000000U)
197
/* Use cmpxchg_double */
198
#define __CMPXCHG_DOUBLE	((slab_flags_t __force)0x40000000U)
Christoph Lameter's avatar
Christoph Lameter committed
199

200 201 202
/*
 * Tracking user of a slab.
 */
203
#define TRACK_ADDRS_COUNT 16
204
struct track {
205
	unsigned long addr;	/* Called from address */
206 207 208
#ifdef CONFIG_STACKTRACE
	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
#endif
209 210 211 212 213 214 215
	int cpu;		/* Was running on cpu */
	int pid;		/* Pid context */
	unsigned long when;	/* When did the operation occur */
};

enum track_item { TRACK_ALLOC, TRACK_FREE };

216
#ifdef CONFIG_SYSFS
Christoph Lameter's avatar
Christoph Lameter committed
217 218
static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
219
static void memcg_propagate_slab_attrs(struct kmem_cache *s);
220
static void sysfs_slab_remove(struct kmem_cache *s);
Christoph Lameter's avatar
Christoph Lameter committed
221
#else
222 223 224
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
							{ return 0; }
225
static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
226
static inline void sysfs_slab_remove(struct kmem_cache *s) { }
Christoph Lameter's avatar
Christoph Lameter committed
227 228
#endif

229
static inline void stat(const struct kmem_cache *s, enum stat_item si)
230 231
{
#ifdef CONFIG_SLUB_STATS
232 233 234 235 236
	/*
	 * The rmw is racy on a preemptible kernel but this is acceptable, so
	 * avoid this_cpu_add()'s irq-disable overhead.
	 */
	raw_cpu_inc(s->cpu_slab->stat[si]);
237 238 239
#endif
}

Christoph Lameter's avatar
Christoph Lameter committed
240 241 242 243
/********************************************************************
 * 			Core slab cache functions
 *******************************************************************/

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
/*
 * Returns freelist pointer (ptr). With hardening, this is obfuscated
 * with an XOR of the address where the pointer is held and a per-cache
 * random number.
 */
static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
				 unsigned long ptr_addr)
{
#ifdef CONFIG_SLAB_FREELIST_HARDENED
	return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
#else
	return ptr;
#endif
}

/* Returns the freelist pointer recorded at location ptr_addr. */
static inline void *freelist_dereference(const struct kmem_cache *s,
					 void *ptr_addr)
{
	return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
			    (unsigned long)ptr_addr);
}

267 268
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
269
	return freelist_dereference(s, object + s->offset);
270 271
}

272 273
static void prefetch_freepointer(const struct kmem_cache *s, void *object)
{
274 275
	if (object)
		prefetch(freelist_dereference(s, object + s->offset));
276 277
}

278 279
static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
{
280
	unsigned long freepointer_addr;
281 282
	void *p;

283 284 285
	if (!debug_pagealloc_enabled())
		return get_freepointer(s, object);

286 287 288
	freepointer_addr = (unsigned long)object + s->offset;
	probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
	return freelist_ptr(s, p, freepointer_addr);
289 290
}

291 292
static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
{
293 294
	unsigned long freeptr_addr = (unsigned long)object + s->offset;

295 296 297 298
#ifdef CONFIG_SLAB_FREELIST_HARDENED
	BUG_ON(object == fp); /* naive detection of double free or corruption */
#endif

299
	*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
300 301 302
}

/* Loop over all objects in a slab */
303
#define for_each_object(__p, __s, __addr, __objects) \
304 305 306
	for (__p = fixup_red_left(__s, __addr); \
		__p < (__addr) + (__objects) * (__s)->size; \
		__p += (__s)->size)
307

308
#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
309 310 311
	for (__p = fixup_red_left(__s, __addr), __idx = 1; \
		__idx <= __objects; \
		__p += (__s)->size, __idx++)
312

313 314 315 316 317 318
/* Determine object index from a given position */
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
{
	return (p - addr) / s->size;
}

319 320 321 322 323
static inline int order_objects(int order, unsigned long size, int reserved)
{
	return ((PAGE_SIZE << order) - reserved) / size;
}

324
static inline struct kmem_cache_order_objects oo_make(int order,
325
		unsigned long size, int reserved)
326 327
{
	struct kmem_cache_order_objects x = {
328
		(order << OO_SHIFT) + order_objects(order, size, reserved)
329 330 331 332 333 334 335
	};

	return x;
}

static inline int oo_order(struct kmem_cache_order_objects x)
{
336
	return x.x >> OO_SHIFT;
337 338 339 340
}

static inline int oo_objects(struct kmem_cache_order_objects x)
{
341
	return x.x & OO_MASK;
342 343
}

344 345 346 347 348
/*
 * Per slab locking using the pagelock
 */
static __always_inline void slab_lock(struct page *page)
{
349
	VM_BUG_ON_PAGE(PageTail(page), page);
350 351 352 353 354
	bit_spin_lock(PG_locked, &page->flags);
}

static __always_inline void slab_unlock(struct page *page)
{
355
	VM_BUG_ON_PAGE(PageTail(page), page);
356 357 358
	__bit_spin_unlock(PG_locked, &page->flags);
}

359 360 361 362 363 364
static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
{
	struct page tmp;
	tmp.counters = counters_new;
	/*
	 * page->counters can cover frozen/inuse/objects as well
365 366
	 * as page->_refcount.  If we assign to ->counters directly
	 * we run the risk of losing updates to page->_refcount, so
367 368 369 370 371 372 373
	 * be careful and only assign to the fields we need.
	 */
	page->frozen  = tmp.frozen;
	page->inuse   = tmp.inuse;
	page->objects = tmp.objects;
}

374 375 376 377 378 379 380
/* Interrupts must be disabled (for the fallback code to work right) */
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
		void *freelist_old, unsigned long counters_old,
		void *freelist_new, unsigned long counters_new,
		const char *n)
{
	VM_BUG_ON(!irqs_disabled());
381 382
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
383
	if (s->flags & __CMPXCHG_DOUBLE) {
384
		if (cmpxchg_double(&page->freelist, &page->counters,
385 386
				   freelist_old, counters_old,
				   freelist_new, counters_new))
387
			return true;
388 389 390 391
	} else
#endif
	{
		slab_lock(page);
392 393
		if (page->freelist == freelist_old &&
					page->counters == counters_old) {
394
			page->freelist = freelist_new;
395
			set_page_slub_counters(page, counters_new);
396
			slab_unlock(page);
397
			return true;
398 399 400 401 402 403 404 405
		}
		slab_unlock(page);
	}

	cpu_relax();
	stat(s, CMPXCHG_DOUBLE_FAIL);

#ifdef SLUB_DEBUG_CMPXCHG
406
	pr_info("%s %s: cmpxchg double redo ", n, s->name);
407 408
#endif

409
	return false;
410 411
}

412 413 414 415 416
static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
		void *freelist_old, unsigned long counters_old,
		void *freelist_new, unsigned long counters_new,
		const char *n)
{
417 418
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
419
	if (s->flags & __CMPXCHG_DOUBLE) {
420
		if (cmpxchg_double(&page->freelist, &page->counters,
421 422
				   freelist_old, counters_old,
				   freelist_new, counters_new))
423
			return true;
424 425 426
	} else
#endif
	{
427 428 429
		unsigned long flags;

		local_irq_save(flags);
430
		slab_lock(page);
431 432
		if (page->freelist == freelist_old &&
					page->counters == counters_old) {
433
			page->freelist = freelist_new;
434
			set_page_slub_counters(page, counters_new);
435
			slab_unlock(page);
436
			local_irq_restore(flags);
437
			return true;
438
		}
439
		slab_unlock(page);
440
		local_irq_restore(flags);
441 442 443 444 445 446
	}

	cpu_relax();
	stat(s, CMPXCHG_DOUBLE_FAIL);

#ifdef SLUB_DEBUG_CMPXCHG
447
	pr_info("%s %s: cmpxchg double redo ", n, s->name);
448 449
#endif

450
	return false;
451 452
}

453
#ifdef CONFIG_SLUB_DEBUG
454 455 456
/*
 * Determine a map of object in use on a page.
 *
457
 * Node listlock must be held to guarantee that the page does
458 459 460 461 462 463 464 465 466 467 468
 * not vanish from under us.
 */
static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
{
	void *p;
	void *addr = page_address(page);

	for (p = page->freelist; p; p = get_freepointer(s, p))
		set_bit(slab_index(p, s, addr), map);
}

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
static inline int size_from_object(struct kmem_cache *s)
{
	if (s->flags & SLAB_RED_ZONE)
		return s->size - s->red_left_pad;

	return s->size;
}

static inline void *restore_red_left(struct kmem_cache *s, void *p)
{
	if (s->flags & SLAB_RED_ZONE)
		p -= s->red_left_pad;

	return p;
}

485 486 487
/*
 * Debug settings:
 */
488
#if defined(CONFIG_SLUB_DEBUG_ON)
489
static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
490
#else
491
static slab_flags_t slub_debug;
492
#endif
493 494

static char *slub_debug_slabs;
495
static int disable_higher_order_debug;
496

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
/*
 * slub is about to manipulate internal object metadata.  This memory lies
 * outside the range of the allocated object, so accessing it would normally
 * be reported by kasan as a bounds error.  metadata_access_enable() is used
 * to tell kasan that these accesses are OK.
 */
static inline void metadata_access_enable(void)
{
	kasan_disable_current();
}

static inline void metadata_access_disable(void)
{
	kasan_enable_current();
}

Christoph Lameter's avatar
Christoph Lameter committed
513 514 515
/*
 * Object debugging
 */
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535

/* Verify that a pointer has an address that is valid within a slab page */
static inline int check_valid_pointer(struct kmem_cache *s,
				struct page *page, void *object)
{
	void *base;

	if (!object)
		return 1;

	base = page_address(page);
	object = restore_red_left(s, object);
	if (object < base || object >= base + page->objects * s->size ||
		(object - base) % s->size) {
		return 0;
	}

	return 1;
}

536 537
static void print_section(char *level, char *text, u8 *addr,
			  unsigned int length)
Christoph Lameter's avatar
Christoph Lameter committed
538
{
539
	metadata_access_enable();
540
	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
541
			length, 1);
542
	metadata_access_disable();
Christoph Lameter's avatar
Christoph Lameter committed
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
}

static struct track *get_track(struct kmem_cache *s, void *object,
	enum track_item alloc)
{
	struct track *p;

	if (s->offset)
		p = object + s->offset + sizeof(void *);
	else
		p = object + s->inuse;

	return p + alloc;
}

static void set_track(struct kmem_cache *s, void *object,
559
			enum track_item alloc, unsigned long addr)
Christoph Lameter's avatar
Christoph Lameter committed
560
{
Akinobu Mita's avatar
Akinobu Mita committed
561
	struct track *p = get_track(s, object, alloc);
Christoph Lameter's avatar
Christoph Lameter committed
562 563

	if (addr) {
564 565 566 567 568 569 570 571
#ifdef CONFIG_STACKTRACE
		struct stack_trace trace;
		int i;

		trace.nr_entries = 0;
		trace.max_entries = TRACK_ADDRS_COUNT;
		trace.entries = p->addrs;
		trace.skip = 3;
572
		metadata_access_enable();
573
		save_stack_trace(&trace);
574
		metadata_access_disable();
575 576 577 578 579 580 581 582 583

		/* See rant in lockdep.c */
		if (trace.nr_entries != 0 &&
		    trace.entries[trace.nr_entries - 1] == ULONG_MAX)
			trace.nr_entries--;

		for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
			p->addrs[i] = 0;
#endif
Christoph Lameter's avatar
Christoph Lameter committed
584 585
		p->addr = addr;
		p->cpu = smp_processor_id();
586
		p->pid = current->pid;
Christoph Lameter's avatar
Christoph Lameter committed
587 588 589 590 591 592 593
		p->when = jiffies;
	} else
		memset(p, 0, sizeof(struct track));
}

static void init_tracking(struct kmem_cache *s, void *object)
{
594 595 596
	if (!(s->flags & SLAB_STORE_USER))
		return;

597 598
	set_track(s, object, TRACK_FREE, 0UL);
	set_track(s, object, TRACK_ALLOC, 0UL);
Christoph Lameter's avatar
Christoph Lameter committed
599 600 601 602 603 604 605
}

static void print_track(const char *s, struct track *t)
{
	if (!t->addr)
		return;

606 607
	pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
	       s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
608 609 610 611 612
#ifdef CONFIG_STACKTRACE
	{
		int i;
		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
			if (t->addrs[i])
613
				pr_err("\t%pS\n", (void *)t->addrs[i]);
614 615 616 617
			else
				break;
	}
#endif
618 619 620 621 622 623 624 625 626 627 628 629 630
}

static void print_tracking(struct kmem_cache *s, void *object)
{
	if (!(s->flags & SLAB_STORE_USER))
		return;

	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
	print_track("Freed", get_track(s, object, TRACK_FREE));
}

static void print_page_info(struct page *page)
{
631
	pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
632
	       page, page->objects, page->inuse, page->freelist, page->flags);
633 634 635 636 637

}

static void slab_bug(struct kmem_cache *s, char *fmt, ...)
{
638
	struct va_format vaf;
639 640 641
	va_list args;

	va_start(args, fmt);
642 643
	vaf.fmt = fmt;
	vaf.va = &args;
644
	pr_err("=============================================================================\n");
645
	pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
646
	pr_err("-----------------------------------------------------------------------------\n\n");
647

648
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
649
	va_end(args);
Christoph Lameter's avatar
Christoph Lameter committed
650 651
}

652 653
static void slab_fix(struct kmem_cache *s, char *fmt, ...)
{
654
	struct va_format vaf;
655 656 657
	va_list args;

	va_start(args, fmt);
658 659 660
	vaf.fmt = fmt;
	vaf.va = &args;
	pr_err("FIX %s: %pV\n", s->name, &vaf);
661 662 663 664
	va_end(args);
}

static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
Christoph Lameter's avatar
Christoph Lameter committed
665 666
{
	unsigned int off;	/* Offset of last byte */
667
	u8 *addr = page_address(page);
668 669 670 671 672

	print_tracking(s, p);

	print_page_info(page);

673 674
	pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
	       p, p - addr, get_freepointer(s, p));
675

676
	if (s->flags & SLAB_RED_ZONE)
677 678
		print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
			      s->red_left_pad);
679
	else if (p > addr + 16)
680
		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
Christoph Lameter's avatar
Christoph Lameter committed
681

682 683
	print_section(KERN_ERR, "Object ", p,
		      min_t(unsigned long, s->object_size, PAGE_SIZE));
Christoph Lameter's avatar
Christoph Lameter committed
684
	if (s->flags & SLAB_RED_ZONE)
685
		print_section(KERN_ERR, "Redzone ", p + s->object_size,
686
			s->inuse - s->object_size);
Christoph Lameter's avatar
Christoph Lameter committed
687 688 689 690 691 692

	if (s->offset)
		off = s->offset + sizeof(void *);
	else
		off = s->inuse;

693
	if (s->flags & SLAB_STORE_USER)
Christoph Lameter's avatar
Christoph Lameter committed
694 695
		off += 2 * sizeof(struct track);

696 697
	off += kasan_metadata_size(s);

698
	if (off != size_from_object(s))
Christoph Lameter's avatar
Christoph Lameter committed
699
		/* Beginning of the filler is the free pointer */
700 701
		print_section(KERN_ERR, "Padding ", p + off,
			      size_from_object(s) - off);
702 703

	dump_stack();
Christoph Lameter's avatar
Christoph Lameter committed
704 705
}

706
void object_err(struct kmem_cache *s, struct page *page,
Christoph Lameter's avatar
Christoph Lameter committed
707 708
			u8 *object, char *reason)
{
709
	slab_bug(s, "%s", reason);
710
	print_trailer(s, page, object);
Christoph Lameter's avatar
Christoph Lameter committed
711 712
}

713 714
static void slab_err(struct kmem_cache *s, struct page *page,
			const char *fmt, ...)
Christoph Lameter's avatar
Christoph Lameter committed
715 716 717 718
{
	va_list args;
	char buf[100];

719 720
	va_start(args, fmt);
	vsnprintf(buf, sizeof(buf), fmt, args);
Christoph Lameter's avatar
Christoph Lameter committed
721
	va_end(args);
722
	slab_bug(s, "%s", buf);
723
	print_page_info(page);
Christoph Lameter's avatar
Christoph Lameter committed
724 725 726
	dump_stack();
}

727
static void init_object(struct kmem_cache *s, void *object, u8 val)
Christoph Lameter's avatar
Christoph Lameter committed
728 729 730
{
	u8 *p = object;

731 732 733
	if (s->flags & SLAB_RED_ZONE)
		memset(p - s->red_left_pad, val, s->red_left_pad);

Christoph Lameter's avatar
Christoph Lameter committed
734
	if (s->flags & __OBJECT_POISON) {
735 736
		memset(p, POISON_FREE, s->object_size - 1);
		p[s->object_size - 1] = POISON_END;
Christoph Lameter's avatar
Christoph Lameter committed
737 738 739
	}

	if (s->flags & SLAB_RED_ZONE)
740
		memset(p + s->object_size, val, s->inuse - s->object_size);
Christoph Lameter's avatar
Christoph Lameter committed
741 742
}

743 744 745 746 747 748 749 750 751
static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
						void *from, void *to)
{
	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
	memset(from, data, to - from);
}

static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
			u8 *object, char *what,
752
			u8 *start, unsigned int value, unsigned int bytes)
753 754 755 756
{
	u8 *fault;
	u8 *end;

757
	metadata_access_enable();
758
	fault = memchr_inv(start, value, bytes);
759
	metadata_access_disable();
760 761 762 763 764 765 766 767
	if (!fault)
		return 1;

	end = start + bytes;
	while (end > fault && end[-1] == value)
		end--;

	slab_bug(s, "%s overwritten", what);
768
	pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
769 770 771 772 773
					fault, end - 1, fault[0], value);
	print_trailer(s, page, object);

	restore_bytes(s, what, value, fault, end);
	return 0;
Christoph Lameter's avatar
Christoph Lameter committed
774 775 776 777 778 779 780 781 782
}

/*
 * Object layout:
 *
 * object address
 * 	Bytes of the object to be managed.
 * 	If the freepointer may overlay the object then the free
 * 	pointer is the first word of the object.
783
 *
Christoph Lameter's avatar
Christoph Lameter committed
784 785 786
 * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
 * 	0xa5 (POISON_END)
 *
787
 * object + s->object_size
Christoph Lameter's avatar
Christoph Lameter committed
788
 * 	Padding to reach word boundary. This is also used for Redzoning.
789
 * 	Padding is extended by another word if Redzoning is enabled and
790
 * 	object_size == inuse.
791
 *
Christoph Lameter's avatar
Christoph Lameter committed
792 793 794 795
 * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
 * 	0xcc (RED_ACTIVE) for objects in use.
 *
 * object + s->inuse
796 797
 * 	Meta data starts here.
 *
Christoph Lameter's avatar
Christoph Lameter committed
798 799
 * 	A. Free pointer (if we cannot overwrite object on free)
 * 	B. Tracking data for SLAB_STORE_USER
800
 * 	C. Padding to reach required alignment boundary or at mininum
801
 * 		one word if debugging is on to be able to detect writes
802 803 804
 * 		before the word boundary.
 *
 *	Padding is done using 0x5a (POISON_INUSE)
Christoph Lameter's avatar
Christoph Lameter committed
805 806
 *
 * object + s->size
807
 * 	Nothing is used beyond s->size.
Christoph Lameter's avatar
Christoph Lameter committed
808
 *
809
 * If slabcaches are merged then the object_size and inuse boundaries are mostly
810
 * ignored. And therefore no slab options that rely on these boundaries
Christoph Lameter's avatar
Christoph Lameter committed
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
 * may be used with merged slabcaches.
 */

static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
{
	unsigned long off = s->inuse;	/* The end of info */

	if (s->offset)
		/* Freepointer is placed after the object. */
		off += sizeof(void *);

	if (s->flags & SLAB_STORE_USER)
		/* We also have user information there */
		off += 2 * sizeof(struct track);

826 827
	off += kasan_metadata_size(s);

828
	if (size_from_object(s) == off)
Christoph Lameter's avatar
Christoph Lameter committed
829 830
		return 1;

831
	return check_bytes_and_report(s, page, p, "Object padding",
832
			p + off, POISON_INUSE, size_from_object(s) - off);
Christoph Lameter's avatar
Christoph Lameter committed
833 834
}

835
/* Check the pad bytes at the end of a slab page */
Christoph Lameter's avatar
Christoph Lameter committed
836 837
static int slab_pad_check(struct kmem_cache *s, struct page *page)
{
838 839 840 841 842
	u8 *start;
	u8 *fault;
	u8 *end;
	int length;
	int remainder;
Christoph Lameter's avatar
Christoph Lameter committed
843 844 845 846

	if (!(s->flags & SLAB_POISON))
		return 1;

847
	start = page_address(page);
848
	length = (PAGE_SIZE << compound_order(page)) - s->reserved;
849 850
	end = start + length;
	remainder = length % s->size;
Christoph Lameter's avatar
Christoph Lameter committed
851 852 853
	if (!remainder)
		return 1;

854
	metadata_access_enable();
855
	fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
856
	metadata_access_disable();
857 858 859 860 861 862
	if (!fault)
		return 1;
	while (end > fault && end[-1] == POISON_INUSE)
		end--;

	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
863
	print_section(KERN_ERR, "Padding ", end - remainder, remainder);
864

865
	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
866
	return 0;
Christoph Lameter's avatar
Christoph Lameter committed
867 868 869
}

static int check_object(struct kmem_cache *s, struct page *page,
870
					void *object, u8 val)
Christoph Lameter's avatar
Christoph Lameter committed
871 872
{
	u8 *p = object;
873
	u8 *endobject = object + s->object_size;
Christoph Lameter's avatar
Christoph Lameter committed
874 875

	if (s->flags & SLAB_RED_ZONE) {
876 877 878 879
		if (!check_bytes_and_report(s, page, object, "Redzone",
			object - s->red_left_pad, val, s->red_left_pad))
			return 0;

880
		if (!check_bytes_and_report(s, page, object, "Redzone",
881
			endobject, val, s->inuse - s->object_size))
Christoph Lameter's avatar
Christoph Lameter committed
882 883
			return 0;
	} else {
884
		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
885
			check_bytes_and_report(s, page, p, "Alignment padding",
886 887
				endobject, POISON_INUSE,
				s->inuse - s->object_size);
888
		}
Christoph Lameter's avatar
Christoph Lameter committed
889 890 891
	}

	if (s->flags & SLAB_POISON) {
892
		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
893
			(!check_bytes_and_report(s, page, p, "Poison", p,
894
					POISON_FREE, s->object_size - 1) ||
895
			 !check_bytes_and_report(s, page, p, "Poison",
896
				p + s->object_size - 1, POISON_END, 1)))
Christoph Lameter's avatar
Christoph Lameter committed
897 898 899 900 901 902 903
			return 0;
		/*
		 * check_pad_bytes cleans up on its own.
		 */
		check_pad_bytes(s, page, p);
	}

904
	if (!s->offset && val == SLUB_RED_ACTIVE)
Christoph Lameter's avatar
Christoph Lameter committed
905 906 907 908 909 910 911 912 913 914
		/*
		 * Object and freepointer overlap. Cannot check
		 * freepointer while object is allocated.
		 */
		return 1;

	/* Check free pointer validity */
	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
		object_err(s, page, p, "Freepointer corrupt");
		/*
915
		 * No choice but to zap it and thus lose the remainder
Christoph Lameter's avatar
Christoph Lameter committed
916
		 * of the free objects in this slab. May cause
917
		 * another error because the object count is now wrong.
Christoph Lameter's avatar
Christoph Lameter committed
918
		 */
919
		set_freepointer(s, p, NULL);
Christoph Lameter's avatar
Christoph Lameter committed
920 921 922 923 924 925 926
		return 0;
	}
	return 1;
}

static int check_slab(struct kmem_cache *s, struct page *page)
{
927 928
	int maxobj;

Christoph Lameter's avatar
Christoph Lameter committed
929 930 931
	VM_BUG_ON(!irqs_disabled());

	if (!PageSlab(page)) {
932
		slab_err(s, page, "Not a valid slab page");
Christoph Lameter's avatar
Christoph Lameter committed
933 934
		return 0;
	}
935

936
	maxobj = order_objects(compound_order(page), s->size, s->reserved);
937 938
	if (page->objects > maxobj) {
		slab_err(s, page, "objects %u > max %u",
939
			page->objects, maxobj);
940 941 942
		return 0;
	}
	if (page->inuse > page->objects) {
943
		slab_err(s, page, "inuse %u > max %u",
944
			page->inuse, page->objects);
Christoph Lameter's avatar
Christoph Lameter committed
945 946 947 948 949 950 951 952
		return 0;
	}
	/* Slab_pad_check fixes things up after itself */
	slab_pad_check(s, page);
	return 1;
}

/*
953 954
 * Determine if a certain object on a page is on the freelist. Must hold the
 * slab lock to guarantee that the chains are in a consistent state.
Christoph Lameter's avatar
Christoph Lameter committed
955 956 957 958
 */
static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
{
	int nr = 0;
959
	void *fp;
Christoph Lameter's avatar
Christoph Lameter committed
960
	void *object = NULL;
961
	int max_objects;
Christoph Lameter's avatar
Christoph Lameter committed
962

963
	fp = page->freelist;
964
	while (fp && nr <= page->objects) {
Christoph Lameter's avatar
Christoph Lameter committed
965 966 967 968 969 970
		if (fp == search)
			return 1;
		if (!check_valid_pointer(s, page, fp)) {
			if (object) {
				object_err(s, page, object,
					"Freechain corrupt");
971
				set_freepointer(s, object, NULL);
Christoph Lameter's avatar
Christoph Lameter committed
972
			} else {
973
				slab_err(s, page, "Freepointer corrupt");
974
				page->freelist = NULL;
975
				page->inuse = page->objects;
976
				slab_fix(s, "Freelist cleared");
Christoph Lameter's avatar
Christoph Lameter committed
977 978 979 980 981 982 983 984 985
				return 0;
			}
			break;
		}
		object = fp;
		fp = get_freepointer(s, object);
		nr++;
	}

986
	max_objects = order_objects(compound_order(page), s->size, s->reserved);
987 988
	if (max_objects > MAX_OBJS_PER_PAGE)
		max_objects = MAX_OBJS_PER_PAGE;
989 990

	if (page->objects != max_objects) {
Joe Perches's avatar
Joe Perches committed
991 992
		slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
			 page->objects, max_objects);
993 994 995
		page->objects = max_objects;
		slab_fix(s, "Number of objects adjusted.");
	}
996
	if (page->inuse != page->objects - nr) {
Joe Perches's avatar
Joe Perches committed
997 998
		slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
			 page->inuse, page->objects - nr);
999
		page->inuse = page->objects - nr;
1000
		slab_fix(s, "Object count adjusted.");
Christoph Lameter's avatar
Christoph Lameter committed
1001 1002 1003 1004
	}
	return search == NULL;
}

1005 1006
static void trace(struct kmem_cache *s, struct page *page, void *object,
								int alloc)
1007 1008
{
	if (s->flags & SLAB_TRACE) {
1009
		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1010 1011 1012 1013 1014 1015
			s->name,
			alloc ? "alloc" : "free",
			object, page->inuse,
			page->freelist);

		if (!alloc)
1016
			print_section(KERN_INFO, "Object ", (void *)object,
1017
					s->object_size);
1018 1019 1020 1021 1022

		dump_stack();
	}
}

1023
/*
1024
 * Tracking of fully allocated slabs for debugging purposes.
1025
 */
1026 1027
static void add_full(struct kmem_cache *s,
	struct kmem_cache_node *n, struct page *page)
1028
{
1029 1030 1031
	if (!(s->flags & SLAB_STORE_USER))
		return;

1032
	lockdep_assert_held(&n->list_lock);
1033 1034 1035
	list_add(&page->lru, &n->full);
}

1036
static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1037 1038 1039 1040
{
	if (!(s->flags & SLAB_STORE_USER))
		return;

1041
	lockdep_assert_held(&n->list_lock);
1042 1043 1044
	list_del(&page->lru);
}

1045 1046 1047 1048 1049 1050 1051 1052
/* Tracking of the number of slabs for debugging purposes */
static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{
	struct kmem_cache_node *n = get_node(s, node);

	return atomic_long_read(&n->nr_slabs);
}

1053 1054 1055 1056 1057
static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
{
	return atomic_long_read(&n->nr_slabs);
}

1058
static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1059 1060 1061 1062 1063 1064 1065 1066 1067
{
	struct kmem_cache_node *n = get_node(s, node);

	/*
	 * May be called early in order to allocate a slab for the
	 * kmem_cache_node structure. Solve the chicken-egg
	 * dilemma by deferring the increment of the count during
	 * bootstrap (see early_kmem_cache_node_alloc).
	 */
1068
	if (likely(n)) {
1069
		atomic_long_inc(&n->nr_slabs);
1070 1071
		atomic_long_add(objects, &n->total_objects);
	}
1072
}
1073
static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1074 1075 1076 1077
{
	struct kmem_cache_node *n = get_node(s, node);

	atomic_long_dec(&n->nr_slabs);
1078
	atomic_long_sub(objects, &n->total_objects);
1079 1080 1081
}

/* Object debug checks for alloc/free paths */
1082 1083 1084 1085 1086 1087
static void setup_object_debug(struct kmem_cache *s, struct page *page,
								void *object)
{
	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
		return;

1088
	init_object(s, object, SLUB_RED_INACTIVE);
1089 1090 1091
	init_tracking(s, object);
}

1092
static inline int alloc_consistency_checks(struct kmem_cache *s,
1093
					struct page *page,
1094
					void *object, unsigned long addr)
Christoph Lameter's avatar
Christoph Lameter committed
1095 1096
{
	if (!check_slab(s, page))
1097
		return 0;
Christoph Lameter's avatar
Christoph Lameter committed
1098 1099 1100

	if (!check_valid_pointer(s, page, object)) {
		object_err(s, page, object, "Freelist Pointer check fails");
1101
		return 0;
Christoph Lameter's avatar
Christoph Lameter committed
1102 1103
	}

1104
	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
		return 0;

	return 1;
}

static noinline int alloc_debug_processing(struct kmem_cache *s,
					struct page *page,
					void *object, unsigned long addr)
{
	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
		if (!alloc_consistency_checks(s, page, object, addr))
			goto bad;
	}
Christoph Lameter's avatar
Christoph Lameter committed
1118

1119 1120 1121 1122
	/* Success perform special debug activities for allocs */
	if (s->flags & SLAB_STORE_USER)
		set_track(s, object, TRACK_ALLOC, addr);
	trace(s, page, object, 1);
1123
	init_object(s, object, SLUB_RED_ACTIVE);
Christoph Lameter's avatar
Christoph Lameter committed
1124
	return 1;
1125

Christoph Lameter's avatar
Christoph Lameter committed
1126 1127 1128 1129 1130
bad:
	if (PageSlab(page)) {
		/*
		 * If this is a slab page then lets do the best we can
		 * to avoid issues in the future. Marking all objects
1131
		 * as used avoids touching the remaining objects.
Christoph Lameter's avatar
Christoph Lameter committed
1132
		 */
1133
		slab_fix(s, "Marking all objects used");
1134
		page->inuse = page->objects;
1135
		page->freelist = NULL;
Christoph Lameter's avatar
Christoph Lameter committed
1136 1137 1138 1139
	}
	return 0;
}

1140 1141
static inline int free_consistency_checks(struct kmem_cache *s,
		struct page *page, void *object, unsigned long addr)
Christoph Lameter's avatar
Christoph Lameter committed
1142 1143
{
	if (!check_valid_pointer(s, page, object)) {
1144
		slab_err(s, page, "Invalid object pointer 0x%p", object);
1145
		return 0;
Christoph Lameter's avatar
Christoph Lameter committed
1146 1147 1148
	}

	if (on_freelist(s, page, object)) {
1149
		object_err(s, page, object, "Object already free");
1150
		return 0;
Christoph Lameter's avatar
Christoph Lameter committed
1151 1152
	}

1153
	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1154
		return 0;
Christoph Lameter's avatar
Christoph Lameter committed
1155

1156
	if (unlikely(s != page->slab_cache)) {
1157
		if (!PageSlab(page)) {
Joe Perches's avatar
Joe Perches committed
1158 1159
			slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
				 object);
1160
		} else if (!page->slab_cache) {
1161 1162
			pr_err("SLUB <none>: no slab for object 0x%p.\n",
			       object);
1163
			dump_stack();
1164
		} else
1165 1166
			object_err(s, page, object,
					"page slab pointer corrupt.");
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
		return 0;
	}
	return 1;
}

/* Supports checking bulk free of a constructed freelist */
static noinline int free_debug_processing(
	struct kmem_cache *s, struct page *page,
	void *head, void *tail, int bulk_cnt,
	unsigned long addr)
{
	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
	void *object = head;
	int cnt = 0;
	unsigned long uninitialized_var(flags);
	int ret = 0;

	spin_lock_irqsave(&n->list_lock, flags);
	slab_lock(page);

	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
		if (!check_slab(s, page))
			goto out;
	}

next_object:
	cnt++;

	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
		if (!free_consistency_checks(s, page, object, addr))
			goto out;
Christoph Lameter's avatar
Christoph Lameter committed
1198
	}
1199 1200 1201 1202

	if (s->flags & SLAB_STORE_USER)
		set_track(s, object, TRACK_FREE, addr);
	trace(s, page, object, 0);
1203
	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1204
	init_object(s, object, SLUB_RED_INACTIVE);
1205 1206 1207 1208 1209 1210

	/* Reached end of constructed freelist yet? */
	if (object != tail) {
		object = get_freepointer(s, object);
		goto next_object;
	}
1211 1212
	ret = 1;

1213
out:
1214 1215 1216 1217
	if (cnt != bulk_cnt)
		slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
			 bulk_cnt, cnt);

1218
	slab_unlock(page);
1219
	spin_unlock_irqrestore(&n->list_lock, flags);
1220 1221 1222
	if (!ret)
		slab_fix(s, "Object at 0x%p not freed", object);
	return ret;
Christoph Lameter's avatar
Christoph Lameter committed
1223 1224
}

1225 1226
static int __init setup_slub_debug(char *str)
{
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
	slub_debug = DEBUG_DEFAULT_FLAGS;
	if (*str++ != '=' || !*str)
		/*
		 * No options specified. Switch on full debugging.
		 */
		goto out;

	if (*str == ',')
		/*
		 * No options but restriction on slabs. This means full
		 * debugging for slabs matching a pattern.
		 */
		goto check_slabs;

	slub_debug = 0;
	if (*str == '-')
		/*
		 * Switch off all debugging measures.
		 */
		goto out;

	/*
	 * Determine which debug features should be switched on
	 */
1251
	for (; *str && *str != ','; str++) {
1252 1253
		switch (tolower(*str)) {
		case 'f':
1254
			slub_debug |= SLAB_CONSISTENCY_CHECKS;
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
			break;
		case 'z':
			slub_debug |= SLAB_RED_ZONE;
			break;
		case 'p':
			slub_debug |= SLAB_POISON;
			break;
		case 'u':
			slub_debug |= SLAB_STORE_USER;
			break;
		case 't':
			slub_debug |= SLAB_TRACE;
			break;
1268 1269 1270
		case 'a':
			slub_debug |= SLAB_FAILSLAB;
			break;
1271 1272 1273 1274 1275 1276 1277
		case 'o':
			/*
			 * Avoid enabling debugging on caches if its minimum
			 * order would increase as a result.
			 */
			disable_higher_order_debug = 1;
			break;
1278
		default:
1279 1280
			pr_err("slub_debug option '%c' unknown. skipped\n",
			       *str);
1281
		}
1282 1283
	}

1284
check_slabs:
1285 1286
	if (*str == ',')
		slub_debug_slabs = str + 1;
1287
out:
1288 1289 1290 1291 1292
	return 1;
}

__setup("slub_debug", setup_slub_debug);

1293 1294
slab_flags_t kmem_cache_flags(unsigned long object_size,
	slab_flags_t flags, const char *name,
1295
	void (*ctor)(void *))
1296 1297
{
	/*
1298
	 * Enable debugging if selected on the kernel commandline.
1299
	 */
1300 1301
	if (slub_debug && (!slub_debug_slabs || (name &&
		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
1302
		flags |= slub_debug;
1303 1304

	return flags;
1305
}
1306
#else /* !CONFIG_SLUB_DEBUG */
1307 1308
static inline void setup_object_debug(struct kmem_cache *s,
			struct page *page, void *object) {}
1309

1310
static inline int alloc_debug_processing(struct kmem_cache *s,
1311
	struct page *page, void *object, unsigned long addr) { return 0; }
1312

1313
static inline int free_debug_processing(
1314 1315
	struct kmem_cache *s, struct page *page,
	void *head, void<