mempool.c 14 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Linus Torvalds's avatar
Linus Torvalds committed
2 3 4 5 6 7 8 9
/*
 *  linux/mm/mempool.c
 *
 *  memory buffer pool support. Such pools are mostly used
 *  for guaranteed, deadlock-free memory allocations during
 *  extreme VM load.
 *
 *  started by Ingo Molnar, Copyright (C) 2001
10
 *  debugging by David Rientjes, Copyright (C) 2015
Linus Torvalds's avatar
Linus Torvalds committed
11 12 13 14
 */

#include <linux/mm.h>
#include <linux/slab.h>
15
#include <linux/highmem.h>
16
#include <linux/kasan.h>
17
#include <linux/kmemleak.h>
18
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
19 20 21
#include <linux/mempool.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>
22
#include "slab.h"
Linus Torvalds's avatar
Linus Torvalds committed
23

24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
static void poison_error(mempool_t *pool, void *element, size_t size,
			 size_t byte)
{
	const int nr = pool->curr_nr;
	const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
	const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
	int i;

	pr_err("BUG: mempool element poison mismatch\n");
	pr_err("Mempool %p size %zu\n", pool, size);
	pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
	for (i = start; i < end; i++)
		pr_cont("%x ", *(u8 *)(element + i));
	pr_cont("%s\n", end < size ? "..." : "");
	dump_stack();
}

static void __check_element(mempool_t *pool, void *element, size_t size)
{
	u8 *obj = element;
	size_t i;

	for (i = 0; i < size; i++) {
		u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;

		if (obj[i] != exp) {
			poison_error(pool, element, size, i);
			return;
		}
	}
	memset(obj, POISON_INUSE, size);
}

static void check_element(mempool_t *pool, void *element)
{
	/* Mempools backed by slab allocator */
	if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
		__check_element(pool, element, ksize(element));

	/* Mempools backed by page allocator */
	if (pool->free == mempool_free_pages) {
		int order = (int)(long)pool->pool_data;
		void *addr = kmap_atomic((struct page *)element);

		__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
		kunmap_atomic(addr);
	}
}

static void __poison_element(void *element, size_t size)
{
	u8 *obj = element;

	memset(obj, POISON_FREE, size - 1);
	obj[size - 1] = POISON_END;
}

static void poison_element(mempool_t *pool, void *element)
{
	/* Mempools backed by slab allocator */
	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
		__poison_element(element, ksize(element));

	/* Mempools backed by page allocator */
	if (pool->alloc == mempool_alloc_pages) {
		int order = (int)(long)pool->pool_data;
		void *addr = kmap_atomic((struct page *)element);

		__poison_element(addr, 1UL << (PAGE_SHIFT + order));
		kunmap_atomic(addr);
	}
}
#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
static inline void check_element(mempool_t *pool, void *element)
{
}
static inline void poison_element(mempool_t *pool, void *element)
{
}
#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */

106 107
static void kasan_poison_element(mempool_t *pool, void *element)
{
108 109
	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
		kasan_poison_kfree(element);
110 111 112 113
	if (pool->alloc == mempool_alloc_pages)
		kasan_free_pages(element, (unsigned long)pool->pool_data);
}

114
static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
115
{
116 117
	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
		kasan_unpoison_slab(element);
118 119 120 121
	if (pool->alloc == mempool_alloc_pages)
		kasan_alloc_pages(element, (unsigned long)pool->pool_data);
}

Linus Torvalds's avatar
Linus Torvalds committed
122 123 124
static void add_element(mempool_t *pool, void *element)
{
	BUG_ON(pool->curr_nr >= pool->min_nr);
125
	poison_element(pool, element);
126
	kasan_poison_element(pool, element);
Linus Torvalds's avatar
Linus Torvalds committed
127 128 129
	pool->elements[pool->curr_nr++] = element;
}

130
static void *remove_element(mempool_t *pool, gfp_t flags)
Linus Torvalds's avatar
Linus Torvalds committed
131
{
132 133 134
	void *element = pool->elements[--pool->curr_nr];

	BUG_ON(pool->curr_nr < 0);
135
	kasan_unpoison_element(pool, element, flags);
136
	check_element(pool, element);
137
	return element;
Linus Torvalds's avatar
Linus Torvalds committed
138 139
}

140 141 142 143 144 145 146 147 148
/**
 * mempool_destroy - deallocate a memory pool
 * @pool:      pointer to the memory pool which was allocated via
 *             mempool_create().
 *
 * Free all reserved elements in @pool and @pool itself.  This function
 * only sleeps if the free_fn() function sleeps.
 */
void mempool_destroy(mempool_t *pool)
Linus Torvalds's avatar
Linus Torvalds committed
149
{
150 151 152
	if (unlikely(!pool))
		return;

Linus Torvalds's avatar
Linus Torvalds committed
153
	while (pool->curr_nr) {
154
		void *element = remove_element(pool, GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
155 156 157 158 159
		pool->free(element, pool->pool_data);
	}
	kfree(pool->elements);
	kfree(pool);
}
160
EXPORT_SYMBOL(mempool_destroy);
Linus Torvalds's avatar
Linus Torvalds committed
161 162 163 164 165 166 167 168 169 170

/**
 * mempool_create - create a memory pool
 * @min_nr:    the minimum number of elements guaranteed to be
 *             allocated for this pool.
 * @alloc_fn:  user-defined element-allocation function.
 * @free_fn:   user-defined element-freeing function.
 * @pool_data: optional private data available to the user-defined functions.
 *
 * this function creates and allocates a guaranteed size, preallocated
171
 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
Linus Torvalds's avatar
Linus Torvalds committed
172
 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
173
 * functions might sleep - as long as the mempool_alloc() function is not called
Linus Torvalds's avatar
Linus Torvalds committed
174 175
 * from IRQ contexts.
 */
176
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
Linus Torvalds's avatar
Linus Torvalds committed
177 178
				mempool_free_t *free_fn, void *pool_data)
{
179 180
	return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
				   GFP_KERNEL, NUMA_NO_NODE);
181 182
}
EXPORT_SYMBOL(mempool_create);
Linus Torvalds's avatar
Linus Torvalds committed
183

184
mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
185 186
			       mempool_free_t *free_fn, void *pool_data,
			       gfp_t gfp_mask, int node_id)
187 188
{
	mempool_t *pool;
189
	pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
Linus Torvalds's avatar
Linus Torvalds committed
190 191
	if (!pool)
		return NULL;
192
	pool->elements = kmalloc_node(min_nr * sizeof(void *),
193
				      gfp_mask, node_id);
Linus Torvalds's avatar
Linus Torvalds committed
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
	if (!pool->elements) {
		kfree(pool);
		return NULL;
	}
	spin_lock_init(&pool->lock);
	pool->min_nr = min_nr;
	pool->pool_data = pool_data;
	init_waitqueue_head(&pool->wait);
	pool->alloc = alloc_fn;
	pool->free = free_fn;

	/*
	 * First pre-allocate the guaranteed number of buffers.
	 */
	while (pool->curr_nr < pool->min_nr) {
		void *element;

211
		element = pool->alloc(gfp_mask, pool->pool_data);
Linus Torvalds's avatar
Linus Torvalds committed
212
		if (unlikely(!element)) {
213
			mempool_destroy(pool);
Linus Torvalds's avatar
Linus Torvalds committed
214 215 216 217 218 219
			return NULL;
		}
		add_element(pool, element);
	}
	return pool;
}
220
EXPORT_SYMBOL(mempool_create_node);
Linus Torvalds's avatar
Linus Torvalds committed
221 222 223 224 225 226 227 228 229 230 231

/**
 * mempool_resize - resize an existing memory pool
 * @pool:       pointer to the memory pool which was allocated via
 *              mempool_create().
 * @new_min_nr: the new minimum number of elements guaranteed to be
 *              allocated for this pool.
 *
 * This function shrinks/grows the pool. In the case of growing,
 * it cannot be guaranteed that the pool will be grown to the new
 * size immediately, but new mempool_free() calls will refill it.
232
 * This function may sleep.
Linus Torvalds's avatar
Linus Torvalds committed
233 234 235 236 237
 *
 * Note, the caller must guarantee that no mempool_destroy is called
 * while this function is running. mempool_alloc() & mempool_free()
 * might be called (eg. from IRQ contexts) while this function executes.
 */
238
int mempool_resize(mempool_t *pool, int new_min_nr)
Linus Torvalds's avatar
Linus Torvalds committed
239 240 241 242 243 244
{
	void *element;
	void **new_elements;
	unsigned long flags;

	BUG_ON(new_min_nr <= 0);
245
	might_sleep();
Linus Torvalds's avatar
Linus Torvalds committed
246 247 248 249

	spin_lock_irqsave(&pool->lock, flags);
	if (new_min_nr <= pool->min_nr) {
		while (new_min_nr < pool->curr_nr) {
250
			element = remove_element(pool, GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
251 252 253 254 255 256 257 258 259 260
			spin_unlock_irqrestore(&pool->lock, flags);
			pool->free(element, pool->pool_data);
			spin_lock_irqsave(&pool->lock, flags);
		}
		pool->min_nr = new_min_nr;
		goto out_unlock;
	}
	spin_unlock_irqrestore(&pool->lock, flags);

	/* Grow the pool */
261 262
	new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
				     GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
	if (!new_elements)
		return -ENOMEM;

	spin_lock_irqsave(&pool->lock, flags);
	if (unlikely(new_min_nr <= pool->min_nr)) {
		/* Raced, other resize will do our work */
		spin_unlock_irqrestore(&pool->lock, flags);
		kfree(new_elements);
		goto out;
	}
	memcpy(new_elements, pool->elements,
			pool->curr_nr * sizeof(*new_elements));
	kfree(pool->elements);
	pool->elements = new_elements;
	pool->min_nr = new_min_nr;

	while (pool->curr_nr < pool->min_nr) {
		spin_unlock_irqrestore(&pool->lock, flags);
281
		element = pool->alloc(GFP_KERNEL, pool->pool_data);
Linus Torvalds's avatar
Linus Torvalds committed
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
		if (!element)
			goto out;
		spin_lock_irqsave(&pool->lock, flags);
		if (pool->curr_nr < pool->min_nr) {
			add_element(pool, element);
		} else {
			spin_unlock_irqrestore(&pool->lock, flags);
			pool->free(element, pool->pool_data);	/* Raced */
			goto out;
		}
	}
out_unlock:
	spin_unlock_irqrestore(&pool->lock, flags);
out:
	return 0;
}
EXPORT_SYMBOL(mempool_resize);

/**
 * mempool_alloc - allocate an element from a specific memory pool
 * @pool:      pointer to the memory pool which was allocated via
 *             mempool_create().
 * @gfp_mask:  the usual allocation bitmask.
 *
306
 * this function only sleeps if the alloc_fn() function sleeps or
Linus Torvalds's avatar
Linus Torvalds committed
307 308 309
 * returns NULL. Note that due to preallocation, this function
 * *never* fails when called from process contexts. (it might
 * fail if called from an IRQ context.)
310
 * Note: using __GFP_ZERO is not supported.
Linus Torvalds's avatar
Linus Torvalds committed
311
 */
312
void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
Linus Torvalds's avatar
Linus Torvalds committed
313 314 315
{
	void *element;
	unsigned long flags;
316
	wait_queue_entry_t wait;
317
	gfp_t gfp_temp;
318

319
	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
320
	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
321

322
	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
323 324
	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
	gfp_mask |= __GFP_NOWARN;	/* failures are OK */
Linus Torvalds's avatar
Linus Torvalds committed
325

326
	gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
327

Linus Torvalds's avatar
Linus Torvalds committed
328
repeat_alloc:
329 330

	element = pool->alloc(gfp_temp, pool->pool_data);
Linus Torvalds's avatar
Linus Torvalds committed
331 332 333 334 335
	if (likely(element != NULL))
		return element;

	spin_lock_irqsave(&pool->lock, flags);
	if (likely(pool->curr_nr)) {
336
		element = remove_element(pool, gfp_temp);
Linus Torvalds's avatar
Linus Torvalds committed
337
		spin_unlock_irqrestore(&pool->lock, flags);
338 339
		/* paired with rmb in mempool_free(), read comment there */
		smp_wmb();
340 341 342 343 344
		/*
		 * Update the allocation stack trace as this is more useful
		 * for debugging.
		 */
		kmemleak_update_trace(element);
Linus Torvalds's avatar
Linus Torvalds committed
345 346 347
		return element;
	}

348
	/*
349
	 * We use gfp mask w/o direct reclaim or IO for the first round.  If
350 351
	 * alloc failed with that and @pool was empty, retry immediately.
	 */
352
	if (gfp_temp != gfp_mask) {
353 354 355 356 357
		spin_unlock_irqrestore(&pool->lock, flags);
		gfp_temp = gfp_mask;
		goto repeat_alloc;
	}

358 359
	/* We must not sleep if !__GFP_DIRECT_RECLAIM */
	if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
360
		spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
361
		return NULL;
362
	}
Linus Torvalds's avatar
Linus Torvalds committed
363

364
	/* Let's wait for someone else to return an element to @pool */
365
	init_wait(&wait);
Linus Torvalds's avatar
Linus Torvalds committed
366 367
	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);

368 369 370 371 372 373 374 375 376
	spin_unlock_irqrestore(&pool->lock, flags);

	/*
	 * FIXME: this should be io_schedule().  The timeout is there as a
	 * workaround for some DM problems in 2.6.18.
	 */
	io_schedule_timeout(5*HZ);

	finish_wait(&pool->wait, &wait);
Linus Torvalds's avatar
Linus Torvalds committed
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
	goto repeat_alloc;
}
EXPORT_SYMBOL(mempool_alloc);

/**
 * mempool_free - return an element to the pool.
 * @element:   pool element pointer.
 * @pool:      pointer to the memory pool which was allocated via
 *             mempool_create().
 *
 * this function only sleeps if the free_fn() function sleeps.
 */
void mempool_free(void *element, mempool_t *pool)
{
	unsigned long flags;

393 394 395
	if (unlikely(element == NULL))
		return;

396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
	/*
	 * Paired with the wmb in mempool_alloc().  The preceding read is
	 * for @element and the following @pool->curr_nr.  This ensures
	 * that the visible value of @pool->curr_nr is from after the
	 * allocation of @element.  This is necessary for fringe cases
	 * where @element was passed to this task without going through
	 * barriers.
	 *
	 * For example, assume @p is %NULL at the beginning and one task
	 * performs "p = mempool_alloc(...);" while another task is doing
	 * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
	 * may end up using curr_nr value which is from before allocation
	 * of @p without the following rmb.
	 */
	smp_rmb();

	/*
	 * For correctness, we need a test which is guaranteed to trigger
	 * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
	 * without locking achieves that and refilling as soon as possible
	 * is desirable.
	 *
	 * Because curr_nr visible here is always a value after the
	 * allocation of @element, any task which decremented curr_nr below
	 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
	 * incremented to min_nr afterwards.  If curr_nr gets incremented
	 * to min_nr after the allocation of @element, the elements
	 * allocated after that are subject to the same guarantee.
	 *
	 * Waiters happen iff curr_nr is 0 and the above guarantee also
	 * ensures that there will be frees which return elements to the
	 * pool waking up the waiters.
	 */
429
	if (unlikely(pool->curr_nr < pool->min_nr)) {
Linus Torvalds's avatar
Linus Torvalds committed
430
		spin_lock_irqsave(&pool->lock, flags);
431
		if (likely(pool->curr_nr < pool->min_nr)) {
Linus Torvalds's avatar
Linus Torvalds committed
432 433 434 435 436 437 438 439 440 441 442 443 444 445
			add_element(pool, element);
			spin_unlock_irqrestore(&pool->lock, flags);
			wake_up(&pool->wait);
			return;
		}
		spin_unlock_irqrestore(&pool->lock, flags);
	}
	pool->free(element, pool->pool_data);
}
EXPORT_SYMBOL(mempool_free);

/*
 * A commonly used alloc and free fn.
 */
446
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
Linus Torvalds's avatar
Linus Torvalds committed
447
{
448
	struct kmem_cache *mem = pool_data;
449
	VM_BUG_ON(mem->ctor);
Linus Torvalds's avatar
Linus Torvalds committed
450 451 452 453 454 455
	return kmem_cache_alloc(mem, gfp_mask);
}
EXPORT_SYMBOL(mempool_alloc_slab);

void mempool_free_slab(void *element, void *pool_data)
{
456
	struct kmem_cache *mem = pool_data;
Linus Torvalds's avatar
Linus Torvalds committed
457 458 459
	kmem_cache_free(mem, element);
}
EXPORT_SYMBOL(mempool_free_slab);
460

461 462
/*
 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
Simon Arlott's avatar
Simon Arlott committed
463
 * specified by pool_data
464 465 466
 */
void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
{
467
	size_t size = (size_t)pool_data;
468 469 470 471 472 473 474 475 476 477
	return kmalloc(size, gfp_mask);
}
EXPORT_SYMBOL(mempool_kmalloc);

void mempool_kfree(void *element, void *pool_data)
{
	kfree(element);
}
EXPORT_SYMBOL(mempool_kfree);

478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
/*
 * A simple mempool-backed page allocator that allocates pages
 * of the order specified by pool_data.
 */
void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
{
	int order = (int)(long)pool_data;
	return alloc_pages(gfp_mask, order);
}
EXPORT_SYMBOL(mempool_alloc_pages);

void mempool_free_pages(void *element, void *pool_data)
{
	int order = (int)(long)pool_data;
	__free_pages(element, order);
}
EXPORT_SYMBOL(mempool_free_pages);