dmapool.c 12.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * DMA Pool allocator
 *
 * Copyright 2001 David Brownell
 * Copyright 2007 Intel Corporation
 *   Author: Matthew Wilcox <willy@linux.intel.com>
 *
 * This software may be redistributed and/or modified under the terms of
 * the GNU General Public License ("GPL") version 2 as published by the
 * Free Software Foundation.
 *
 * This allocator returns small blocks of a given size which are DMA-able by
 * the given device.  It uses the dma_alloc_coherent page allocator to get
 * new pages, then splits them up into blocks of the required size.
 * Many older drivers still have their own code to do this.
 *
 * The current design of this allocator is fairly simple.  The pool is
 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
 * allocated pages.  Each page in the page_list is split into blocks of at
20 21 22
 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
 * list of free blocks within the page.  Used blocks aren't tracked, but we
 * keep a count of how many are currently allocated from each page.
23
 */
Linus Torvalds's avatar
Linus Torvalds committed
24 25 26 27

#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
28 29
#include <linux/kernel.h>
#include <linux/list.h>
Linus Torvalds's avatar
Linus Torvalds committed
30
#include <linux/module.h>
31
#include <linux/mutex.h>
32
#include <linux/poison.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
33
#include <linux/sched.h>
34 35 36 37 38
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/wait.h>
Linus Torvalds's avatar
Linus Torvalds committed
39

Matthew Wilcox's avatar
Matthew Wilcox committed
40 41 42 43 44 45
struct dma_pool {		/* the pool */
	struct list_head page_list;
	spinlock_t lock;
	size_t size;
	struct device *dev;
	size_t allocation;
46
	size_t boundary;
Matthew Wilcox's avatar
Matthew Wilcox committed
47 48 49
	char name[32];
	wait_queue_head_t waitq;
	struct list_head pools;
Linus Torvalds's avatar
Linus Torvalds committed
50 51
};

Matthew Wilcox's avatar
Matthew Wilcox committed
52 53 54 55
struct dma_page {		/* cacheable header for 'allocation' bytes */
	struct list_head page_list;
	void *vaddr;
	dma_addr_t dma;
56 57
	unsigned int in_use;
	unsigned int offset;
Linus Torvalds's avatar
Linus Torvalds committed
58 59 60 61
};

#define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)

Matthew Wilcox's avatar
Matthew Wilcox committed
62
static DEFINE_MUTEX(pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
63 64

static ssize_t
Matthew Wilcox's avatar
Matthew Wilcox committed
65
show_pools(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds's avatar
Linus Torvalds committed
66 67 68 69 70 71 72 73 74 75 76 77 78 79
{
	unsigned temp;
	unsigned size;
	char *next;
	struct dma_page *page;
	struct dma_pool *pool;

	next = buf;
	size = PAGE_SIZE;

	temp = scnprintf(next, size, "poolinfo - 0.1\n");
	size -= temp;
	next += temp;

80
	mutex_lock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
81 82 83 84 85 86 87 88 89 90 91
	list_for_each_entry(pool, &dev->dma_pools, pools) {
		unsigned pages = 0;
		unsigned blocks = 0;

		list_for_each_entry(page, &pool->page_list, page_list) {
			pages++;
			blocks += page->in_use;
		}

		/* per-pool info, no real statistics yet */
		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
92 93
				 pool->name, blocks,
				 pages * (pool->allocation / pool->size),
Matthew Wilcox's avatar
Matthew Wilcox committed
94
				 pool->size, pages);
Linus Torvalds's avatar
Linus Torvalds committed
95 96 97
		size -= temp;
		next += temp;
	}
98
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
99 100 101

	return PAGE_SIZE - size;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
102 103

static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
104 105 106 107 108 109 110

/**
 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
111
 * @boundary: returned blocks won't cross this power of two boundary
Linus Torvalds's avatar
Linus Torvalds committed
112 113 114 115 116 117 118 119 120
 * Context: !in_interrupt()
 *
 * Returns a dma allocation pool with the requested characteristics, or
 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
 * may be used to allocate memory.  Such memory will all have "consistent"
 * DMA mappings, accessible by the device and its driver without using
 * cache flushing primitives.  The actual size of blocks allocated may be
 * larger than requested because of alignment.
 *
121
 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
Linus Torvalds's avatar
Linus Torvalds committed
122 123 124 125
 * cross that size boundary.  This is useful for devices which have
 * addressing restrictions on individual DMA transfers, such as not crossing
 * boundaries of 4KBytes.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
126
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
127
				 size_t size, size_t align, size_t boundary)
Linus Torvalds's avatar
Linus Torvalds committed
128
{
Matthew Wilcox's avatar
Matthew Wilcox committed
129
	struct dma_pool *retval;
130
	size_t allocation;
Linus Torvalds's avatar
Linus Torvalds committed
131

132
	if (align == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
133
		align = 1;
134
	} else if (align & (align - 1)) {
Linus Torvalds's avatar
Linus Torvalds committed
135 136 137
		return NULL;
	}

138
	if (size == 0) {
139
		return NULL;
140 141 142
	} else if (size < 4) {
		size = 4;
	}
143 144 145 146

	if ((size % align) != 0)
		size = ALIGN(size, align);

147 148 149 150 151
	allocation = max_t(size_t, size, PAGE_SIZE);

	if (!boundary) {
		boundary = allocation;
	} else if ((boundary < size) || (boundary & (boundary - 1))) {
Linus Torvalds's avatar
Linus Torvalds committed
152
		return NULL;
153
	}
Linus Torvalds's avatar
Linus Torvalds committed
154

155 156
	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
	if (!retval)
Linus Torvalds's avatar
Linus Torvalds committed
157 158
		return retval;

159
	strlcpy(retval->name, name, sizeof(retval->name));
Linus Torvalds's avatar
Linus Torvalds committed
160 161 162

	retval->dev = dev;

Matthew Wilcox's avatar
Matthew Wilcox committed
163 164
	INIT_LIST_HEAD(&retval->page_list);
	spin_lock_init(&retval->lock);
Linus Torvalds's avatar
Linus Torvalds committed
165
	retval->size = size;
166
	retval->boundary = boundary;
Linus Torvalds's avatar
Linus Torvalds committed
167
	retval->allocation = allocation;
Matthew Wilcox's avatar
Matthew Wilcox committed
168
	init_waitqueue_head(&retval->waitq);
Linus Torvalds's avatar
Linus Torvalds committed
169 170

	if (dev) {
171 172
		int ret;

173
		mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
174 175
		if (list_empty(&dev->dma_pools))
			ret = device_create_file(dev, &dev_attr_pools);
176 177
		else
			ret = 0;
Linus Torvalds's avatar
Linus Torvalds committed
178
		/* note:  not currently insisting "name" be unique */
179
		if (!ret)
Matthew Wilcox's avatar
Matthew Wilcox committed
180
			list_add(&retval->pools, &dev->dma_pools);
181 182 183 184
		else {
			kfree(retval);
			retval = NULL;
		}
185
		mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
186
	} else
Matthew Wilcox's avatar
Matthew Wilcox committed
187
		INIT_LIST_HEAD(&retval->pools);
Linus Torvalds's avatar
Linus Torvalds committed
188 189 190

	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
191
EXPORT_SYMBOL(dma_pool_create);
Linus Torvalds's avatar
Linus Torvalds committed
192

193 194 195
static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
{
	unsigned int offset = 0;
196
	unsigned int next_boundary = pool->boundary;
197 198 199

	do {
		unsigned int next = offset + pool->size;
200 201 202 203
		if (unlikely((next + pool->size) >= next_boundary)) {
			next = next_boundary;
			next_boundary += pool->boundary;
		}
204 205 206 207 208
		*(int *)(page->vaddr + offset) = next;
		offset = next;
	} while (offset < pool->allocation);
}

Matthew Wilcox's avatar
Matthew Wilcox committed
209
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
Linus Torvalds's avatar
Linus Torvalds committed
210
{
Matthew Wilcox's avatar
Matthew Wilcox committed
211
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
212

213
	page = kmalloc(sizeof(*page), mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
214 215
	if (!page)
		return NULL;
216
	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
Matthew Wilcox's avatar
Matthew Wilcox committed
217
					 &page->dma, mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
218 219
	if (page->vaddr) {
#ifdef	CONFIG_DEBUG_SLAB
Matthew Wilcox's avatar
Matthew Wilcox committed
220
		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
221
#endif
222
		pool_initialise_page(pool, page);
Matthew Wilcox's avatar
Matthew Wilcox committed
223
		list_add(&page->page_list, &pool->page_list);
Linus Torvalds's avatar
Linus Torvalds committed
224
		page->in_use = 0;
225
		page->offset = 0;
Linus Torvalds's avatar
Linus Torvalds committed
226
	} else {
Matthew Wilcox's avatar
Matthew Wilcox committed
227
		kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
228 229 230 231 232
		page = NULL;
	}
	return page;
}

233
static inline int is_page_busy(struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
234
{
235
	return page->in_use != 0;
Linus Torvalds's avatar
Linus Torvalds committed
236 237
}

Matthew Wilcox's avatar
Matthew Wilcox committed
238
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
239
{
Matthew Wilcox's avatar
Matthew Wilcox committed
240
	dma_addr_t dma = page->dma;
Linus Torvalds's avatar
Linus Torvalds committed
241 242

#ifdef	CONFIG_DEBUG_SLAB
Matthew Wilcox's avatar
Matthew Wilcox committed
243
	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
244
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
245 246 247
	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
	list_del(&page->page_list);
	kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
248 249 250 251 252 253 254 255 256 257
}

/**
 * dma_pool_destroy - destroys a pool of dma memory blocks.
 * @pool: dma pool that will be destroyed
 * Context: !in_interrupt()
 *
 * Caller guarantees that no more memory from the pool is in use,
 * and that nothing will try to use the pool after this call.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
258
void dma_pool_destroy(struct dma_pool *pool)
Linus Torvalds's avatar
Linus Torvalds committed
259
{
260
	mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
261 262 263
	list_del(&pool->pools);
	if (pool->dev && list_empty(&pool->dev->dma_pools))
		device_remove_file(pool->dev, &dev_attr_pools);
264
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
265

Matthew Wilcox's avatar
Matthew Wilcox committed
266 267 268 269
	while (!list_empty(&pool->page_list)) {
		struct dma_page *page;
		page = list_entry(pool->page_list.next,
				  struct dma_page, page_list);
270
		if (is_page_busy(page)) {
Linus Torvalds's avatar
Linus Torvalds committed
271
			if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
272 273
				dev_err(pool->dev,
					"dma_pool_destroy %s, %p busy\n",
Linus Torvalds's avatar
Linus Torvalds committed
274 275
					pool->name, page->vaddr);
			else
Matthew Wilcox's avatar
Matthew Wilcox committed
276 277 278
				printk(KERN_ERR
				       "dma_pool_destroy %s, %p busy\n",
				       pool->name, page->vaddr);
Linus Torvalds's avatar
Linus Torvalds committed
279
			/* leak the still-in-use consistent memory */
Matthew Wilcox's avatar
Matthew Wilcox committed
280 281
			list_del(&page->page_list);
			kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
282
		} else
Matthew Wilcox's avatar
Matthew Wilcox committed
283
			pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
284 285
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
286
	kfree(pool);
Linus Torvalds's avatar
Linus Torvalds committed
287
}
Matthew Wilcox's avatar
Matthew Wilcox committed
288
EXPORT_SYMBOL(dma_pool_destroy);
Linus Torvalds's avatar
Linus Torvalds committed
289 290 291 292 293 294 295 296 297

/**
 * dma_pool_alloc - get a block of consistent memory
 * @pool: dma pool that will produce the block
 * @mem_flags: GFP_* bitmask
 * @handle: pointer to dma address of block
 *
 * This returns the kernel virtual address of a currently unused block,
 * and reports its dma address through the handle.
298
 * If such a memory block can't be allocated, %NULL is returned.
Linus Torvalds's avatar
Linus Torvalds committed
299
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
300 301
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
		     dma_addr_t *handle)
Linus Torvalds's avatar
Linus Torvalds committed
302
{
Matthew Wilcox's avatar
Matthew Wilcox committed
303 304 305 306 307 308
	unsigned long flags;
	struct dma_page *page;
	size_t offset;
	void *retval;

	spin_lock_irqsave(&pool->lock, flags);
309
 restart:
Linus Torvalds's avatar
Linus Torvalds committed
310
	list_for_each_entry(page, &pool->page_list, page_list) {
311 312
		if (page->offset < pool->allocation)
			goto ready;
Linus Torvalds's avatar
Linus Torvalds committed
313
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
314 315
	page = pool_alloc_page(pool, GFP_ATOMIC);
	if (!page) {
Linus Torvalds's avatar
Linus Torvalds committed
316
		if (mem_flags & __GFP_WAIT) {
Matthew Wilcox's avatar
Matthew Wilcox committed
317
			DECLARE_WAITQUEUE(wait, current);
Linus Torvalds's avatar
Linus Torvalds committed
318

319
			__set_current_state(TASK_INTERRUPTIBLE);
320
			__add_wait_queue(&pool->waitq, &wait);
Matthew Wilcox's avatar
Matthew Wilcox committed
321
			spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
322

Matthew Wilcox's avatar
Matthew Wilcox committed
323
			schedule_timeout(POOL_TIMEOUT_JIFFIES);
Linus Torvalds's avatar
Linus Torvalds committed
324

325 326
			spin_lock_irqsave(&pool->lock, flags);
			__remove_wait_queue(&pool->waitq, &wait);
Linus Torvalds's avatar
Linus Torvalds committed
327 328 329 330 331 332
			goto restart;
		}
		retval = NULL;
		goto done;
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
333
 ready:
Linus Torvalds's avatar
Linus Torvalds committed
334
	page->in_use++;
335 336
	offset = page->offset;
	page->offset = *(int *)(page->vaddr + offset);
Linus Torvalds's avatar
Linus Torvalds committed
337 338 339
	retval = offset + page->vaddr;
	*handle = offset + page->dma;
#ifdef	CONFIG_DEBUG_SLAB
Matthew Wilcox's avatar
Matthew Wilcox committed
340
	memset(retval, POOL_POISON_ALLOCATED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
341
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
342 343
 done:
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
344 345
	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
346
EXPORT_SYMBOL(dma_pool_alloc);
Linus Torvalds's avatar
Linus Torvalds committed
347

Matthew Wilcox's avatar
Matthew Wilcox committed
348
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
349
{
Matthew Wilcox's avatar
Matthew Wilcox committed
350 351
	unsigned long flags;
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
352

Matthew Wilcox's avatar
Matthew Wilcox committed
353
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
354 355 356 357 358 359 360
	list_for_each_entry(page, &pool->page_list, page_list) {
		if (dma < page->dma)
			continue;
		if (dma < (page->dma + pool->allocation))
			goto done;
	}
	page = NULL;
Matthew Wilcox's avatar
Matthew Wilcox committed
361 362
 done:
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
363 364 365 366 367 368 369 370 371 372 373 374
	return page;
}

/**
 * dma_pool_free - put block back into dma pool
 * @pool: the dma pool holding the block
 * @vaddr: virtual address of block
 * @dma: dma address of block
 *
 * Caller promises neither device nor driver will again touch this block
 * unless it is first re-allocated.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
375
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
376
{
Matthew Wilcox's avatar
Matthew Wilcox committed
377 378
	struct dma_page *page;
	unsigned long flags;
379
	unsigned int offset;
Linus Torvalds's avatar
Linus Torvalds committed
380

Matthew Wilcox's avatar
Matthew Wilcox committed
381 382
	page = pool_find_page(pool, dma);
	if (!page) {
Linus Torvalds's avatar
Linus Torvalds committed
383
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
384 385 386
			dev_err(pool->dev,
				"dma_pool_free %s, %p/%lx (bad dma)\n",
				pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
387
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
388 389
			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
			       pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
390 391 392
		return;
	}

393
	offset = vaddr - page->vaddr;
Linus Torvalds's avatar
Linus Torvalds committed
394
#ifdef	CONFIG_DEBUG_SLAB
395
	if ((dma - page->dma) != offset) {
Linus Torvalds's avatar
Linus Torvalds committed
396
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
397 398 399
			dev_err(pool->dev,
				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
				pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
400
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
401 402 403
			printk(KERN_ERR
			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
			       pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
404 405
		return;
	}
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
	{
		unsigned int chain = page->offset;
		while (chain < pool->allocation) {
			if (chain != offset) {
				chain = *(int *)(page->vaddr + chain);
				continue;
			}
			if (pool->dev)
				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
					"already free\n", pool->name,
					(unsigned long long)dma);
			else
				printk(KERN_ERR "dma_pool_free %s, dma %Lx "
					"already free\n", pool->name,
					(unsigned long long)dma);
			return;
		}
Linus Torvalds's avatar
Linus Torvalds committed
423
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
424
	memset(vaddr, POOL_POISON_FREED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
425 426
#endif

Matthew Wilcox's avatar
Matthew Wilcox committed
427
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
428
	page->in_use--;
429 430
	*(int *)vaddr = page->offset;
	page->offset = offset;
Matthew Wilcox's avatar
Matthew Wilcox committed
431
	if (waitqueue_active(&pool->waitq))
432
		wake_up_locked(&pool->waitq);
Linus Torvalds's avatar
Linus Torvalds committed
433 434
	/*
	 * Resist a temptation to do
435
	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
436 437
	 * Better have a few empty pages hang around.
	 */
Matthew Wilcox's avatar
Matthew Wilcox committed
438
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
439
}
Matthew Wilcox's avatar
Matthew Wilcox committed
440
EXPORT_SYMBOL(dma_pool_free);
Linus Torvalds's avatar
Linus Torvalds committed
441

Tejun Heo's avatar
Tejun Heo committed
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
/*
 * Managed DMA pool
 */
static void dmam_pool_release(struct device *dev, void *res)
{
	struct dma_pool *pool = *(struct dma_pool **)res;

	dma_pool_destroy(pool);
}

static int dmam_pool_match(struct device *dev, void *res, void *match_data)
{
	return *(struct dma_pool **)res == match_data;
}

/**
 * dmam_pool_create - Managed dma_pool_create()
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 *
 * Managed dma_pool_create().  DMA pool created with this function is
 * automatically destroyed on driver detach.
 */
struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
				  size_t size, size_t align, size_t allocation)
{
	struct dma_pool **ptr, *pool;

	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return NULL;

	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
	if (pool)
		devres_add(dev, ptr);
	else
		devres_free(ptr);

	return pool;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
485
EXPORT_SYMBOL(dmam_pool_create);
Tejun Heo's avatar
Tejun Heo committed
486 487 488 489 490 491 492 493 494 495 496 497 498 499

/**
 * dmam_pool_destroy - Managed dma_pool_destroy()
 * @pool: dma pool that will be destroyed
 *
 * Managed dma_pool_destroy().
 */
void dmam_pool_destroy(struct dma_pool *pool)
{
	struct device *dev = pool->dev;

	dma_pool_destroy(pool);
	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
}
Matthew Wilcox's avatar
Matthew Wilcox committed
500
EXPORT_SYMBOL(dmam_pool_destroy);