dmapool.c 13 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * DMA Pool allocator
 *
 * Copyright 2001 David Brownell
 * Copyright 2007 Intel Corporation
 *   Author: Matthew Wilcox <willy@linux.intel.com>
 *
 * This software may be redistributed and/or modified under the terms of
 * the GNU General Public License ("GPL") version 2 as published by the
 * Free Software Foundation.
 *
 * This allocator returns small blocks of a given size which are DMA-able by
 * the given device.  It uses the dma_alloc_coherent page allocator to get
 * new pages, then splits them up into blocks of the required size.
 * Many older drivers still have their own code to do this.
 *
 * The current design of this allocator is fairly simple.  The pool is
 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
 * allocated pages.  Each page in the page_list is split into blocks of at
20 21 22
 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
 * list of free blocks within the page.  Used blocks aren't tracked, but we
 * keep a count of how many are currently allocated from each page.
23
 */
Linus Torvalds's avatar
Linus Torvalds committed
24 25 26 27

#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
28 29
#include <linux/kernel.h>
#include <linux/list.h>
30
#include <linux/export.h>
31
#include <linux/mutex.h>
32
#include <linux/poison.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
33
#include <linux/sched.h>
34
#include <linux/slab.h>
35
#include <linux/stat.h>
36 37 38 39
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/wait.h>
Linus Torvalds's avatar
Linus Torvalds committed
40

41 42 43 44
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
#define DMAPOOL_DEBUG 1
#endif

Matthew Wilcox's avatar
Matthew Wilcox committed
45 46 47 48 49 50
struct dma_pool {		/* the pool */
	struct list_head page_list;
	spinlock_t lock;
	size_t size;
	struct device *dev;
	size_t allocation;
51
	size_t boundary;
Matthew Wilcox's avatar
Matthew Wilcox committed
52 53
	char name[32];
	struct list_head pools;
Linus Torvalds's avatar
Linus Torvalds committed
54 55
};

Matthew Wilcox's avatar
Matthew Wilcox committed
56 57 58 59
struct dma_page {		/* cacheable header for 'allocation' bytes */
	struct list_head page_list;
	void *vaddr;
	dma_addr_t dma;
60 61
	unsigned int in_use;
	unsigned int offset;
Linus Torvalds's avatar
Linus Torvalds committed
62 63
};

Matthew Wilcox's avatar
Matthew Wilcox committed
64
static DEFINE_MUTEX(pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
65 66

static ssize_t
Matthew Wilcox's avatar
Matthew Wilcox committed
67
show_pools(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds's avatar
Linus Torvalds committed
68 69 70 71 72 73 74 75 76 77 78 79 80 81
{
	unsigned temp;
	unsigned size;
	char *next;
	struct dma_page *page;
	struct dma_pool *pool;

	next = buf;
	size = PAGE_SIZE;

	temp = scnprintf(next, size, "poolinfo - 0.1\n");
	size -= temp;
	next += temp;

82
	mutex_lock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
83 84 85 86
	list_for_each_entry(pool, &dev->dma_pools, pools) {
		unsigned pages = 0;
		unsigned blocks = 0;

87
		spin_lock_irq(&pool->lock);
Linus Torvalds's avatar
Linus Torvalds committed
88 89 90 91
		list_for_each_entry(page, &pool->page_list, page_list) {
			pages++;
			blocks += page->in_use;
		}
92
		spin_unlock_irq(&pool->lock);
Linus Torvalds's avatar
Linus Torvalds committed
93 94 95

		/* per-pool info, no real statistics yet */
		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
96 97
				 pool->name, blocks,
				 pages * (pool->allocation / pool->size),
Matthew Wilcox's avatar
Matthew Wilcox committed
98
				 pool->size, pages);
Linus Torvalds's avatar
Linus Torvalds committed
99 100 101
		size -= temp;
		next += temp;
	}
102
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
103 104 105

	return PAGE_SIZE - size;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
106 107

static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
108 109 110 111 112 113 114

/**
 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
115
 * @boundary: returned blocks won't cross this power of two boundary
Linus Torvalds's avatar
Linus Torvalds committed
116 117 118 119 120 121 122 123 124
 * Context: !in_interrupt()
 *
 * Returns a dma allocation pool with the requested characteristics, or
 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
 * may be used to allocate memory.  Such memory will all have "consistent"
 * DMA mappings, accessible by the device and its driver without using
 * cache flushing primitives.  The actual size of blocks allocated may be
 * larger than requested because of alignment.
 *
125
 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
Linus Torvalds's avatar
Linus Torvalds committed
126 127 128 129
 * cross that size boundary.  This is useful for devices which have
 * addressing restrictions on individual DMA transfers, such as not crossing
 * boundaries of 4KBytes.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
130
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
131
				 size_t size, size_t align, size_t boundary)
Linus Torvalds's avatar
Linus Torvalds committed
132
{
Matthew Wilcox's avatar
Matthew Wilcox committed
133
	struct dma_pool *retval;
134
	size_t allocation;
Linus Torvalds's avatar
Linus Torvalds committed
135

136
	if (align == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
137
		align = 1;
138
	} else if (align & (align - 1)) {
Linus Torvalds's avatar
Linus Torvalds committed
139 140 141
		return NULL;
	}

142
	if (size == 0) {
143
		return NULL;
144 145 146
	} else if (size < 4) {
		size = 4;
	}
147 148 149 150

	if ((size % align) != 0)
		size = ALIGN(size, align);

151 152 153 154 155
	allocation = max_t(size_t, size, PAGE_SIZE);

	if (!boundary) {
		boundary = allocation;
	} else if ((boundary < size) || (boundary & (boundary - 1))) {
Linus Torvalds's avatar
Linus Torvalds committed
156
		return NULL;
157
	}
Linus Torvalds's avatar
Linus Torvalds committed
158

159 160
	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
	if (!retval)
Linus Torvalds's avatar
Linus Torvalds committed
161 162
		return retval;

163
	strlcpy(retval->name, name, sizeof(retval->name));
Linus Torvalds's avatar
Linus Torvalds committed
164 165 166

	retval->dev = dev;

Matthew Wilcox's avatar
Matthew Wilcox committed
167 168
	INIT_LIST_HEAD(&retval->page_list);
	spin_lock_init(&retval->lock);
Linus Torvalds's avatar
Linus Torvalds committed
169
	retval->size = size;
170
	retval->boundary = boundary;
Linus Torvalds's avatar
Linus Torvalds committed
171 172
	retval->allocation = allocation;

173 174 175 176 177 178 179
	INIT_LIST_HEAD(&retval->pools);

	mutex_lock(&pools_lock);
	if (list_empty(&dev->dma_pools) &&
	    device_create_file(dev, &dev_attr_pools)) {
		kfree(retval);
		return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
180
	} else
181 182
		list_add(&retval->pools, &dev->dma_pools);
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
183 184 185

	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
186
EXPORT_SYMBOL(dma_pool_create);
Linus Torvalds's avatar
Linus Torvalds committed
187

188 189 190
static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
{
	unsigned int offset = 0;
191
	unsigned int next_boundary = pool->boundary;
192 193 194

	do {
		unsigned int next = offset + pool->size;
195 196 197 198
		if (unlikely((next + pool->size) >= next_boundary)) {
			next = next_boundary;
			next_boundary += pool->boundary;
		}
199 200 201 202 203
		*(int *)(page->vaddr + offset) = next;
		offset = next;
	} while (offset < pool->allocation);
}

Matthew Wilcox's avatar
Matthew Wilcox committed
204
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
Linus Torvalds's avatar
Linus Torvalds committed
205
{
Matthew Wilcox's avatar
Matthew Wilcox committed
206
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
207

208
	page = kmalloc(sizeof(*page), mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
209 210
	if (!page)
		return NULL;
211
	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
Matthew Wilcox's avatar
Matthew Wilcox committed
212
					 &page->dma, mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
213
	if (page->vaddr) {
214
#ifdef	DMAPOOL_DEBUG
Matthew Wilcox's avatar
Matthew Wilcox committed
215
		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
216
#endif
217
		pool_initialise_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
218
		page->in_use = 0;
219
		page->offset = 0;
Linus Torvalds's avatar
Linus Torvalds committed
220
	} else {
Matthew Wilcox's avatar
Matthew Wilcox committed
221
		kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
222 223 224 225 226
		page = NULL;
	}
	return page;
}

227
static inline int is_page_busy(struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
228
{
229
	return page->in_use != 0;
Linus Torvalds's avatar
Linus Torvalds committed
230 231
}

Matthew Wilcox's avatar
Matthew Wilcox committed
232
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
233
{
Matthew Wilcox's avatar
Matthew Wilcox committed
234
	dma_addr_t dma = page->dma;
Linus Torvalds's avatar
Linus Torvalds committed
235

236
#ifdef	DMAPOOL_DEBUG
Matthew Wilcox's avatar
Matthew Wilcox committed
237
	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
238
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
239 240 241
	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
	list_del(&page->page_list);
	kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
242 243 244 245 246 247 248 249 250 251
}

/**
 * dma_pool_destroy - destroys a pool of dma memory blocks.
 * @pool: dma pool that will be destroyed
 * Context: !in_interrupt()
 *
 * Caller guarantees that no more memory from the pool is in use,
 * and that nothing will try to use the pool after this call.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
252
void dma_pool_destroy(struct dma_pool *pool)
Linus Torvalds's avatar
Linus Torvalds committed
253
{
254
	mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
255 256 257
	list_del(&pool->pools);
	if (pool->dev && list_empty(&pool->dev->dma_pools))
		device_remove_file(pool->dev, &dev_attr_pools);
258
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
259

Matthew Wilcox's avatar
Matthew Wilcox committed
260 261 262 263
	while (!list_empty(&pool->page_list)) {
		struct dma_page *page;
		page = list_entry(pool->page_list.next,
				  struct dma_page, page_list);
264
		if (is_page_busy(page)) {
Linus Torvalds's avatar
Linus Torvalds committed
265
			if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
266 267
				dev_err(pool->dev,
					"dma_pool_destroy %s, %p busy\n",
Linus Torvalds's avatar
Linus Torvalds committed
268 269
					pool->name, page->vaddr);
			else
Matthew Wilcox's avatar
Matthew Wilcox committed
270 271 272
				printk(KERN_ERR
				       "dma_pool_destroy %s, %p busy\n",
				       pool->name, page->vaddr);
Linus Torvalds's avatar
Linus Torvalds committed
273
			/* leak the still-in-use consistent memory */
Matthew Wilcox's avatar
Matthew Wilcox committed
274 275
			list_del(&page->page_list);
			kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
276
		} else
Matthew Wilcox's avatar
Matthew Wilcox committed
277
			pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
278 279
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
280
	kfree(pool);
Linus Torvalds's avatar
Linus Torvalds committed
281
}
Matthew Wilcox's avatar
Matthew Wilcox committed
282
EXPORT_SYMBOL(dma_pool_destroy);
Linus Torvalds's avatar
Linus Torvalds committed
283 284 285 286 287 288 289 290 291

/**
 * dma_pool_alloc - get a block of consistent memory
 * @pool: dma pool that will produce the block
 * @mem_flags: GFP_* bitmask
 * @handle: pointer to dma address of block
 *
 * This returns the kernel virtual address of a currently unused block,
 * and reports its dma address through the handle.
292
 * If such a memory block can't be allocated, %NULL is returned.
Linus Torvalds's avatar
Linus Torvalds committed
293
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
294 295
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
		     dma_addr_t *handle)
Linus Torvalds's avatar
Linus Torvalds committed
296
{
Matthew Wilcox's avatar
Matthew Wilcox committed
297 298 299 300 301
	unsigned long flags;
	struct dma_page *page;
	size_t offset;
	void *retval;

302 303
	might_sleep_if(mem_flags & __GFP_WAIT);

Matthew Wilcox's avatar
Matthew Wilcox committed
304
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
305
	list_for_each_entry(page, &pool->page_list, page_list) {
306 307
		if (page->offset < pool->allocation)
			goto ready;
Linus Torvalds's avatar
Linus Torvalds committed
308 309
	}

310 311
	/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
312

313 314 315
	page = pool_alloc_page(pool, mem_flags);
	if (!page)
		return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
316

317
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
318

319
	list_add(&page->page_list, &pool->page_list);
Matthew Wilcox's avatar
Matthew Wilcox committed
320
 ready:
Linus Torvalds's avatar
Linus Torvalds committed
321
	page->in_use++;
322 323
	offset = page->offset;
	page->offset = *(int *)(page->vaddr + offset);
Linus Torvalds's avatar
Linus Torvalds committed
324 325
	retval = offset + page->vaddr;
	*handle = offset + page->dma;
326
#ifdef	DMAPOOL_DEBUG
327 328 329 330 331 332 333 334 335
	{
		int i;
		u8 *data = retval;
		/* page->offset is stored in first 4 bytes */
		for (i = sizeof(page->offset); i < pool->size; i++) {
			if (data[i] == POOL_POISON_FREED)
				continue;
			if (pool->dev)
				dev_err(pool->dev,
336
					"dma_pool_alloc %s, %p (corrupted)\n",
337 338
					pool->name, retval);
			else
339
				pr_err("dma_pool_alloc %s, %p (corrupted)\n",
340 341 342 343 344 345 346 347 348 349 350
					pool->name, retval);

			/*
			 * Dump the first 4 bytes even if they are not
			 * POOL_POISON_FREED
			 */
			print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
					data, pool->size, 1);
			break;
		}
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
351
	memset(retval, POOL_POISON_ALLOCATED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
352
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
353
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
354 355
	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
356
EXPORT_SYMBOL(dma_pool_alloc);
Linus Torvalds's avatar
Linus Torvalds committed
357

Matthew Wilcox's avatar
Matthew Wilcox committed
358
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
359
{
Matthew Wilcox's avatar
Matthew Wilcox committed
360
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
361 362 363 364 365

	list_for_each_entry(page, &pool->page_list, page_list) {
		if (dma < page->dma)
			continue;
		if (dma < (page->dma + pool->allocation))
366
			return page;
Linus Torvalds's avatar
Linus Torvalds committed
367
	}
368
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
369 370 371 372 373 374 375 376 377 378 379
}

/**
 * dma_pool_free - put block back into dma pool
 * @pool: the dma pool holding the block
 * @vaddr: virtual address of block
 * @dma: dma address of block
 *
 * Caller promises neither device nor driver will again touch this block
 * unless it is first re-allocated.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
380
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
381
{
Matthew Wilcox's avatar
Matthew Wilcox committed
382 383
	struct dma_page *page;
	unsigned long flags;
384
	unsigned int offset;
Linus Torvalds's avatar
Linus Torvalds committed
385

386
	spin_lock_irqsave(&pool->lock, flags);
Matthew Wilcox's avatar
Matthew Wilcox committed
387 388
	page = pool_find_page(pool, dma);
	if (!page) {
389
		spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
390
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
391 392 393
			dev_err(pool->dev,
				"dma_pool_free %s, %p/%lx (bad dma)\n",
				pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
394
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
395 396
			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
			       pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
397 398 399
		return;
	}

400
	offset = vaddr - page->vaddr;
401
#ifdef	DMAPOOL_DEBUG
402
	if ((dma - page->dma) != offset) {
403
		spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
404
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
405 406 407
			dev_err(pool->dev,
				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
				pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
408
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
409 410 411
			printk(KERN_ERR
			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
			       pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
412 413
		return;
	}
414 415 416 417 418 419 420
	{
		unsigned int chain = page->offset;
		while (chain < pool->allocation) {
			if (chain != offset) {
				chain = *(int *)(page->vaddr + chain);
				continue;
			}
421
			spin_unlock_irqrestore(&pool->lock, flags);
422 423 424 425 426 427 428 429 430 431
			if (pool->dev)
				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
					"already free\n", pool->name,
					(unsigned long long)dma);
			else
				printk(KERN_ERR "dma_pool_free %s, dma %Lx "
					"already free\n", pool->name,
					(unsigned long long)dma);
			return;
		}
Linus Torvalds's avatar
Linus Torvalds committed
432
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
433
	memset(vaddr, POOL_POISON_FREED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
434 435 436
#endif

	page->in_use--;
437 438
	*(int *)vaddr = page->offset;
	page->offset = offset;
Linus Torvalds's avatar
Linus Torvalds committed
439 440
	/*
	 * Resist a temptation to do
441
	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
442 443
	 * Better have a few empty pages hang around.
	 */
Matthew Wilcox's avatar
Matthew Wilcox committed
444
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
445
}
Matthew Wilcox's avatar
Matthew Wilcox committed
446
EXPORT_SYMBOL(dma_pool_free);
Linus Torvalds's avatar
Linus Torvalds committed
447

Tejun Heo's avatar
Tejun Heo committed
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
/*
 * Managed DMA pool
 */
static void dmam_pool_release(struct device *dev, void *res)
{
	struct dma_pool *pool = *(struct dma_pool **)res;

	dma_pool_destroy(pool);
}

static int dmam_pool_match(struct device *dev, void *res, void *match_data)
{
	return *(struct dma_pool **)res == match_data;
}

/**
 * dmam_pool_create - Managed dma_pool_create()
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 *
 * Managed dma_pool_create().  DMA pool created with this function is
 * automatically destroyed on driver detach.
 */
struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
				  size_t size, size_t align, size_t allocation)
{
	struct dma_pool **ptr, *pool;

	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return NULL;

	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
	if (pool)
		devres_add(dev, ptr);
	else
		devres_free(ptr);

	return pool;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
491
EXPORT_SYMBOL(dmam_pool_create);
Tejun Heo's avatar
Tejun Heo committed
492 493 494 495 496 497 498 499 500 501 502 503

/**
 * dmam_pool_destroy - Managed dma_pool_destroy()
 * @pool: dma pool that will be destroyed
 *
 * Managed dma_pool_destroy().
 */
void dmam_pool_destroy(struct dma_pool *pool)
{
	struct device *dev = pool->dev;

	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
504
	dma_pool_destroy(pool);
Tejun Heo's avatar
Tejun Heo committed
505
}
Matthew Wilcox's avatar
Matthew Wilcox committed
506
EXPORT_SYMBOL(dmam_pool_destroy);