dmapool.c 12.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * DMA Pool allocator
 *
 * Copyright 2001 David Brownell
 * Copyright 2007 Intel Corporation
 *   Author: Matthew Wilcox <willy@linux.intel.com>
 *
 * This software may be redistributed and/or modified under the terms of
 * the GNU General Public License ("GPL") version 2 as published by the
 * Free Software Foundation.
 *
 * This allocator returns small blocks of a given size which are DMA-able by
 * the given device.  It uses the dma_alloc_coherent page allocator to get
 * new pages, then splits them up into blocks of the required size.
 * Many older drivers still have their own code to do this.
 *
 * The current design of this allocator is fairly simple.  The pool is
 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
 * allocated pages.  Each page in the page_list is split into blocks of at
20 21 22
 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
 * list of free blocks within the page.  Used blocks aren't tracked, but we
 * keep a count of how many are currently allocated from each page.
23
 */
Linus Torvalds's avatar
Linus Torvalds committed
24 25 26 27

#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
28 29
#include <linux/kernel.h>
#include <linux/list.h>
Linus Torvalds's avatar
Linus Torvalds committed
30
#include <linux/module.h>
31
#include <linux/mutex.h>
32
#include <linux/poison.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
33
#include <linux/sched.h>
34 35 36 37 38
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/wait.h>
Linus Torvalds's avatar
Linus Torvalds committed
39

Matthew Wilcox's avatar
Matthew Wilcox committed
40 41 42 43 44 45 46 47 48
struct dma_pool {		/* the pool */
	struct list_head page_list;
	spinlock_t lock;
	size_t size;
	struct device *dev;
	size_t allocation;
	char name[32];
	wait_queue_head_t waitq;
	struct list_head pools;
Linus Torvalds's avatar
Linus Torvalds committed
49 50
};

Matthew Wilcox's avatar
Matthew Wilcox committed
51 52 53 54
struct dma_page {		/* cacheable header for 'allocation' bytes */
	struct list_head page_list;
	void *vaddr;
	dma_addr_t dma;
55 56
	unsigned int in_use;
	unsigned int offset;
Linus Torvalds's avatar
Linus Torvalds committed
57 58 59 60
};

#define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)

Matthew Wilcox's avatar
Matthew Wilcox committed
61
static DEFINE_MUTEX(pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
62 63

static ssize_t
Matthew Wilcox's avatar
Matthew Wilcox committed
64
show_pools(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds's avatar
Linus Torvalds committed
65 66 67 68 69 70 71 72 73 74 75 76 77 78
{
	unsigned temp;
	unsigned size;
	char *next;
	struct dma_page *page;
	struct dma_pool *pool;

	next = buf;
	size = PAGE_SIZE;

	temp = scnprintf(next, size, "poolinfo - 0.1\n");
	size -= temp;
	next += temp;

79
	mutex_lock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
80 81 82 83 84 85 86 87 88 89 90
	list_for_each_entry(pool, &dev->dma_pools, pools) {
		unsigned pages = 0;
		unsigned blocks = 0;

		list_for_each_entry(page, &pool->page_list, page_list) {
			pages++;
			blocks += page->in_use;
		}

		/* per-pool info, no real statistics yet */
		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
91 92
				 pool->name, blocks,
				 pages * (pool->allocation / pool->size),
Matthew Wilcox's avatar
Matthew Wilcox committed
93
				 pool->size, pages);
Linus Torvalds's avatar
Linus Torvalds committed
94 95 96
		size -= temp;
		next += temp;
	}
97
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
98 99 100

	return PAGE_SIZE - size;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
101 102

static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124

/**
 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 * Context: !in_interrupt()
 *
 * Returns a dma allocation pool with the requested characteristics, or
 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
 * may be used to allocate memory.  Such memory will all have "consistent"
 * DMA mappings, accessible by the device and its driver without using
 * cache flushing primitives.  The actual size of blocks allocated may be
 * larger than requested because of alignment.
 *
 * If allocation is nonzero, objects returned from dma_pool_alloc() won't
 * cross that size boundary.  This is useful for devices which have
 * addressing restrictions on individual DMA transfers, such as not crossing
 * boundaries of 4KBytes.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
125 126
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
				 size_t size, size_t align, size_t allocation)
Linus Torvalds's avatar
Linus Torvalds committed
127
{
Matthew Wilcox's avatar
Matthew Wilcox committed
128
	struct dma_pool *retval;
Linus Torvalds's avatar
Linus Torvalds committed
129

130
	if (align == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
131
		align = 1;
132
	} else if (align & (align - 1)) {
Linus Torvalds's avatar
Linus Torvalds committed
133 134 135
		return NULL;
	}

136
	if (size == 0) {
137
		return NULL;
138 139 140
	} else if (size < 4) {
		size = 4;
	}
141 142 143 144

	if ((size % align) != 0)
		size = ALIGN(size, align);

Linus Torvalds's avatar
Linus Torvalds committed
145 146 147 148 149
	if (allocation == 0) {
		if (PAGE_SIZE < size)
			allocation = size;
		else
			allocation = PAGE_SIZE;
Matthew Wilcox's avatar
Matthew Wilcox committed
150
		/* FIXME: round up for less fragmentation */
Linus Torvalds's avatar
Linus Torvalds committed
151 152 153
	} else if (allocation < size)
		return NULL;

Matthew Wilcox's avatar
Matthew Wilcox committed
154 155 156
	if (!
	    (retval =
	     kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
Linus Torvalds's avatar
Linus Torvalds committed
157 158
		return retval;

Matthew Wilcox's avatar
Matthew Wilcox committed
159
	strlcpy(retval->name, name, sizeof retval->name);
Linus Torvalds's avatar
Linus Torvalds committed
160 161 162

	retval->dev = dev;

Matthew Wilcox's avatar
Matthew Wilcox committed
163 164
	INIT_LIST_HEAD(&retval->page_list);
	spin_lock_init(&retval->lock);
Linus Torvalds's avatar
Linus Torvalds committed
165 166
	retval->size = size;
	retval->allocation = allocation;
Matthew Wilcox's avatar
Matthew Wilcox committed
167
	init_waitqueue_head(&retval->waitq);
Linus Torvalds's avatar
Linus Torvalds committed
168 169

	if (dev) {
170 171
		int ret;

172
		mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
173 174
		if (list_empty(&dev->dma_pools))
			ret = device_create_file(dev, &dev_attr_pools);
175 176
		else
			ret = 0;
Linus Torvalds's avatar
Linus Torvalds committed
177
		/* note:  not currently insisting "name" be unique */
178
		if (!ret)
Matthew Wilcox's avatar
Matthew Wilcox committed
179
			list_add(&retval->pools, &dev->dma_pools);
180 181 182 183
		else {
			kfree(retval);
			retval = NULL;
		}
184
		mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
185
	} else
Matthew Wilcox's avatar
Matthew Wilcox committed
186
		INIT_LIST_HEAD(&retval->pools);
Linus Torvalds's avatar
Linus Torvalds committed
187 188 189

	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
190
EXPORT_SYMBOL(dma_pool_create);
Linus Torvalds's avatar
Linus Torvalds committed
191

192 193 194 195 196 197 198 199 200 201 202 203 204
static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
{
	unsigned int offset = 0;

	do {
		unsigned int next = offset + pool->size;
		if (unlikely((next + pool->size) >= pool->allocation))
			next = pool->allocation;
		*(int *)(page->vaddr + offset) = next;
		offset = next;
	} while (offset < pool->allocation);
}

Matthew Wilcox's avatar
Matthew Wilcox committed
205
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
Linus Torvalds's avatar
Linus Torvalds committed
206
{
Matthew Wilcox's avatar
Matthew Wilcox committed
207
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
208

209
	page = kmalloc(sizeof(*page), mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
210 211
	if (!page)
		return NULL;
212
	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
Matthew Wilcox's avatar
Matthew Wilcox committed
213
					 &page->dma, mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
214 215
	if (page->vaddr) {
#ifdef	CONFIG_DEBUG_SLAB
Matthew Wilcox's avatar
Matthew Wilcox committed
216
		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
217
#endif
218
		pool_initialise_page(pool, page);
Matthew Wilcox's avatar
Matthew Wilcox committed
219
		list_add(&page->page_list, &pool->page_list);
Linus Torvalds's avatar
Linus Torvalds committed
220
		page->in_use = 0;
221
		page->offset = 0;
Linus Torvalds's avatar
Linus Torvalds committed
222
	} else {
Matthew Wilcox's avatar
Matthew Wilcox committed
223
		kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
224 225 226 227 228
		page = NULL;
	}
	return page;
}

229
static inline int is_page_busy(struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
230
{
231
	return page->in_use != 0;
Linus Torvalds's avatar
Linus Torvalds committed
232 233
}

Matthew Wilcox's avatar
Matthew Wilcox committed
234
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
235
{
Matthew Wilcox's avatar
Matthew Wilcox committed
236
	dma_addr_t dma = page->dma;
Linus Torvalds's avatar
Linus Torvalds committed
237 238

#ifdef	CONFIG_DEBUG_SLAB
Matthew Wilcox's avatar
Matthew Wilcox committed
239
	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
240
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
241 242 243
	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
	list_del(&page->page_list);
	kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
244 245 246 247 248 249 250 251 252 253
}

/**
 * dma_pool_destroy - destroys a pool of dma memory blocks.
 * @pool: dma pool that will be destroyed
 * Context: !in_interrupt()
 *
 * Caller guarantees that no more memory from the pool is in use,
 * and that nothing will try to use the pool after this call.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
254
void dma_pool_destroy(struct dma_pool *pool)
Linus Torvalds's avatar
Linus Torvalds committed
255
{
256
	mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
257 258 259
	list_del(&pool->pools);
	if (pool->dev && list_empty(&pool->dev->dma_pools))
		device_remove_file(pool->dev, &dev_attr_pools);
260
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
261

Matthew Wilcox's avatar
Matthew Wilcox committed
262 263 264 265
	while (!list_empty(&pool->page_list)) {
		struct dma_page *page;
		page = list_entry(pool->page_list.next,
				  struct dma_page, page_list);
266
		if (is_page_busy(page)) {
Linus Torvalds's avatar
Linus Torvalds committed
267
			if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
268 269
				dev_err(pool->dev,
					"dma_pool_destroy %s, %p busy\n",
Linus Torvalds's avatar
Linus Torvalds committed
270 271
					pool->name, page->vaddr);
			else
Matthew Wilcox's avatar
Matthew Wilcox committed
272 273 274
				printk(KERN_ERR
				       "dma_pool_destroy %s, %p busy\n",
				       pool->name, page->vaddr);
Linus Torvalds's avatar
Linus Torvalds committed
275
			/* leak the still-in-use consistent memory */
Matthew Wilcox's avatar
Matthew Wilcox committed
276 277
			list_del(&page->page_list);
			kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
278
		} else
Matthew Wilcox's avatar
Matthew Wilcox committed
279
			pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
280 281
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
282
	kfree(pool);
Linus Torvalds's avatar
Linus Torvalds committed
283
}
Matthew Wilcox's avatar
Matthew Wilcox committed
284
EXPORT_SYMBOL(dma_pool_destroy);
Linus Torvalds's avatar
Linus Torvalds committed
285 286 287 288 289 290 291 292 293

/**
 * dma_pool_alloc - get a block of consistent memory
 * @pool: dma pool that will produce the block
 * @mem_flags: GFP_* bitmask
 * @handle: pointer to dma address of block
 *
 * This returns the kernel virtual address of a currently unused block,
 * and reports its dma address through the handle.
294
 * If such a memory block can't be allocated, %NULL is returned.
Linus Torvalds's avatar
Linus Torvalds committed
295
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
296 297
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
		     dma_addr_t *handle)
Linus Torvalds's avatar
Linus Torvalds committed
298
{
Matthew Wilcox's avatar
Matthew Wilcox committed
299 300 301 302 303 304
	unsigned long flags;
	struct dma_page *page;
	size_t offset;
	void *retval;

	spin_lock_irqsave(&pool->lock, flags);
305
 restart:
Linus Torvalds's avatar
Linus Torvalds committed
306
	list_for_each_entry(page, &pool->page_list, page_list) {
307 308
		if (page->offset < pool->allocation)
			goto ready;
Linus Torvalds's avatar
Linus Torvalds committed
309
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
310 311
	page = pool_alloc_page(pool, GFP_ATOMIC);
	if (!page) {
Linus Torvalds's avatar
Linus Torvalds committed
312
		if (mem_flags & __GFP_WAIT) {
Matthew Wilcox's avatar
Matthew Wilcox committed
313
			DECLARE_WAITQUEUE(wait, current);
Linus Torvalds's avatar
Linus Torvalds committed
314

315
			__set_current_state(TASK_INTERRUPTIBLE);
316
			__add_wait_queue(&pool->waitq, &wait);
Matthew Wilcox's avatar
Matthew Wilcox committed
317
			spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
318

Matthew Wilcox's avatar
Matthew Wilcox committed
319
			schedule_timeout(POOL_TIMEOUT_JIFFIES);
Linus Torvalds's avatar
Linus Torvalds committed
320

321 322
			spin_lock_irqsave(&pool->lock, flags);
			__remove_wait_queue(&pool->waitq, &wait);
Linus Torvalds's avatar
Linus Torvalds committed
323 324 325 326 327 328
			goto restart;
		}
		retval = NULL;
		goto done;
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
329
 ready:
Linus Torvalds's avatar
Linus Torvalds committed
330
	page->in_use++;
331 332
	offset = page->offset;
	page->offset = *(int *)(page->vaddr + offset);
Linus Torvalds's avatar
Linus Torvalds committed
333 334 335
	retval = offset + page->vaddr;
	*handle = offset + page->dma;
#ifdef	CONFIG_DEBUG_SLAB
Matthew Wilcox's avatar
Matthew Wilcox committed
336
	memset(retval, POOL_POISON_ALLOCATED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
337
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
338 339
 done:
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
340 341
	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
342
EXPORT_SYMBOL(dma_pool_alloc);
Linus Torvalds's avatar
Linus Torvalds committed
343

Matthew Wilcox's avatar
Matthew Wilcox committed
344
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
345
{
Matthew Wilcox's avatar
Matthew Wilcox committed
346 347
	unsigned long flags;
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
348

Matthew Wilcox's avatar
Matthew Wilcox committed
349
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
350 351 352 353 354 355 356
	list_for_each_entry(page, &pool->page_list, page_list) {
		if (dma < page->dma)
			continue;
		if (dma < (page->dma + pool->allocation))
			goto done;
	}
	page = NULL;
Matthew Wilcox's avatar
Matthew Wilcox committed
357 358
 done:
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
359 360 361 362 363 364 365 366 367 368 369 370
	return page;
}

/**
 * dma_pool_free - put block back into dma pool
 * @pool: the dma pool holding the block
 * @vaddr: virtual address of block
 * @dma: dma address of block
 *
 * Caller promises neither device nor driver will again touch this block
 * unless it is first re-allocated.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
371
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
372
{
Matthew Wilcox's avatar
Matthew Wilcox committed
373 374
	struct dma_page *page;
	unsigned long flags;
375
	unsigned int offset;
Linus Torvalds's avatar
Linus Torvalds committed
376

Matthew Wilcox's avatar
Matthew Wilcox committed
377 378
	page = pool_find_page(pool, dma);
	if (!page) {
Linus Torvalds's avatar
Linus Torvalds committed
379
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
380 381 382
			dev_err(pool->dev,
				"dma_pool_free %s, %p/%lx (bad dma)\n",
				pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
383
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
384 385
			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
			       pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
386 387 388
		return;
	}

389
	offset = vaddr - page->vaddr;
Linus Torvalds's avatar
Linus Torvalds committed
390
#ifdef	CONFIG_DEBUG_SLAB
391
	if ((dma - page->dma) != offset) {
Linus Torvalds's avatar
Linus Torvalds committed
392
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
393 394 395
			dev_err(pool->dev,
				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
				pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
396
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
397 398 399
			printk(KERN_ERR
			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
			       pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
400 401
		return;
	}
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
	{
		unsigned int chain = page->offset;
		while (chain < pool->allocation) {
			if (chain != offset) {
				chain = *(int *)(page->vaddr + chain);
				continue;
			}
			if (pool->dev)
				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
					"already free\n", pool->name,
					(unsigned long long)dma);
			else
				printk(KERN_ERR "dma_pool_free %s, dma %Lx "
					"already free\n", pool->name,
					(unsigned long long)dma);
			return;
		}
Linus Torvalds's avatar
Linus Torvalds committed
419
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
420
	memset(vaddr, POOL_POISON_FREED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
421 422
#endif

Matthew Wilcox's avatar
Matthew Wilcox committed
423
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
424
	page->in_use--;
425 426
	*(int *)vaddr = page->offset;
	page->offset = offset;
Matthew Wilcox's avatar
Matthew Wilcox committed
427
	if (waitqueue_active(&pool->waitq))
428
		wake_up_locked(&pool->waitq);
Linus Torvalds's avatar
Linus Torvalds committed
429 430
	/*
	 * Resist a temptation to do
431
	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
432 433
	 * Better have a few empty pages hang around.
	 */
Matthew Wilcox's avatar
Matthew Wilcox committed
434
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
435
}
Matthew Wilcox's avatar
Matthew Wilcox committed
436
EXPORT_SYMBOL(dma_pool_free);
Linus Torvalds's avatar
Linus Torvalds committed
437

Tejun Heo's avatar
Tejun Heo committed
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
/*
 * Managed DMA pool
 */
static void dmam_pool_release(struct device *dev, void *res)
{
	struct dma_pool *pool = *(struct dma_pool **)res;

	dma_pool_destroy(pool);
}

static int dmam_pool_match(struct device *dev, void *res, void *match_data)
{
	return *(struct dma_pool **)res == match_data;
}

/**
 * dmam_pool_create - Managed dma_pool_create()
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 *
 * Managed dma_pool_create().  DMA pool created with this function is
 * automatically destroyed on driver detach.
 */
struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
				  size_t size, size_t align, size_t allocation)
{
	struct dma_pool **ptr, *pool;

	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return NULL;

	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
	if (pool)
		devres_add(dev, ptr);
	else
		devres_free(ptr);

	return pool;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
481
EXPORT_SYMBOL(dmam_pool_create);
Tejun Heo's avatar
Tejun Heo committed
482 483 484 485 486 487 488 489 490 491 492 493 494 495

/**
 * dmam_pool_destroy - Managed dma_pool_destroy()
 * @pool: dma pool that will be destroyed
 *
 * Managed dma_pool_destroy().
 */
void dmam_pool_destroy(struct dma_pool *pool)
{
	struct device *dev = pool->dev;

	dma_pool_destroy(pool);
	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
}
Matthew Wilcox's avatar
Matthew Wilcox committed
496
EXPORT_SYMBOL(dmam_pool_destroy);