dmapool.c 13 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * DMA Pool allocator
 *
 * Copyright 2001 David Brownell
 * Copyright 2007 Intel Corporation
 *   Author: Matthew Wilcox <willy@linux.intel.com>
 *
 * This software may be redistributed and/or modified under the terms of
 * the GNU General Public License ("GPL") version 2 as published by the
 * Free Software Foundation.
 *
 * This allocator returns small blocks of a given size which are DMA-able by
 * the given device.  It uses the dma_alloc_coherent page allocator to get
 * new pages, then splits them up into blocks of the required size.
 * Many older drivers still have their own code to do this.
 *
 * The current design of this allocator is fairly simple.  The pool is
 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
 * allocated pages.  Each page in the page_list is split into blocks of at
20 21 22
 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
 * list of free blocks within the page.  Used blocks aren't tracked, but we
 * keep a count of how many are currently allocated from each page.
23
 */
Linus Torvalds's avatar
Linus Torvalds committed
24 25 26 27

#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
28 29
#include <linux/kernel.h>
#include <linux/list.h>
Linus Torvalds's avatar
Linus Torvalds committed
30
#include <linux/module.h>
31
#include <linux/mutex.h>
32
#include <linux/poison.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
33
#include <linux/sched.h>
34 35 36 37 38
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/wait.h>
Linus Torvalds's avatar
Linus Torvalds committed
39

40 41 42 43
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
#define DMAPOOL_DEBUG 1
#endif

Matthew Wilcox's avatar
Matthew Wilcox committed
44 45 46 47 48 49
struct dma_pool {		/* the pool */
	struct list_head page_list;
	spinlock_t lock;
	size_t size;
	struct device *dev;
	size_t allocation;
50
	size_t boundary;
Matthew Wilcox's avatar
Matthew Wilcox committed
51 52 53
	char name[32];
	wait_queue_head_t waitq;
	struct list_head pools;
Linus Torvalds's avatar
Linus Torvalds committed
54 55
};

Matthew Wilcox's avatar
Matthew Wilcox committed
56 57 58 59
struct dma_page {		/* cacheable header for 'allocation' bytes */
	struct list_head page_list;
	void *vaddr;
	dma_addr_t dma;
60 61
	unsigned int in_use;
	unsigned int offset;
Linus Torvalds's avatar
Linus Torvalds committed
62 63 64 65
};

#define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)

Matthew Wilcox's avatar
Matthew Wilcox committed
66
static DEFINE_MUTEX(pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
67 68

static ssize_t
Matthew Wilcox's avatar
Matthew Wilcox committed
69
show_pools(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds's avatar
Linus Torvalds committed
70 71 72 73 74 75 76 77 78 79 80 81 82 83
{
	unsigned temp;
	unsigned size;
	char *next;
	struct dma_page *page;
	struct dma_pool *pool;

	next = buf;
	size = PAGE_SIZE;

	temp = scnprintf(next, size, "poolinfo - 0.1\n");
	size -= temp;
	next += temp;

84
	mutex_lock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
85 86 87 88
	list_for_each_entry(pool, &dev->dma_pools, pools) {
		unsigned pages = 0;
		unsigned blocks = 0;

89
		spin_lock_irq(&pool->lock);
Linus Torvalds's avatar
Linus Torvalds committed
90 91 92 93
		list_for_each_entry(page, &pool->page_list, page_list) {
			pages++;
			blocks += page->in_use;
		}
94
		spin_unlock_irq(&pool->lock);
Linus Torvalds's avatar
Linus Torvalds committed
95 96 97

		/* per-pool info, no real statistics yet */
		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
98 99
				 pool->name, blocks,
				 pages * (pool->allocation / pool->size),
Matthew Wilcox's avatar
Matthew Wilcox committed
100
				 pool->size, pages);
Linus Torvalds's avatar
Linus Torvalds committed
101 102 103
		size -= temp;
		next += temp;
	}
104
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
105 106 107

	return PAGE_SIZE - size;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
108 109

static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
110 111 112 113 114 115 116

/**
 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
117
 * @boundary: returned blocks won't cross this power of two boundary
Linus Torvalds's avatar
Linus Torvalds committed
118 119 120 121 122 123 124 125 126
 * Context: !in_interrupt()
 *
 * Returns a dma allocation pool with the requested characteristics, or
 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
 * may be used to allocate memory.  Such memory will all have "consistent"
 * DMA mappings, accessible by the device and its driver without using
 * cache flushing primitives.  The actual size of blocks allocated may be
 * larger than requested because of alignment.
 *
127
 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
Linus Torvalds's avatar
Linus Torvalds committed
128 129 130 131
 * cross that size boundary.  This is useful for devices which have
 * addressing restrictions on individual DMA transfers, such as not crossing
 * boundaries of 4KBytes.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
132
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
133
				 size_t size, size_t align, size_t boundary)
Linus Torvalds's avatar
Linus Torvalds committed
134
{
Matthew Wilcox's avatar
Matthew Wilcox committed
135
	struct dma_pool *retval;
136
	size_t allocation;
Linus Torvalds's avatar
Linus Torvalds committed
137

138
	if (align == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
139
		align = 1;
140
	} else if (align & (align - 1)) {
Linus Torvalds's avatar
Linus Torvalds committed
141 142 143
		return NULL;
	}

144
	if (size == 0) {
145
		return NULL;
146 147 148
	} else if (size < 4) {
		size = 4;
	}
149 150 151 152

	if ((size % align) != 0)
		size = ALIGN(size, align);

153 154 155 156 157
	allocation = max_t(size_t, size, PAGE_SIZE);

	if (!boundary) {
		boundary = allocation;
	} else if ((boundary < size) || (boundary & (boundary - 1))) {
Linus Torvalds's avatar
Linus Torvalds committed
158
		return NULL;
159
	}
Linus Torvalds's avatar
Linus Torvalds committed
160

161 162
	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
	if (!retval)
Linus Torvalds's avatar
Linus Torvalds committed
163 164
		return retval;

165
	strlcpy(retval->name, name, sizeof(retval->name));
Linus Torvalds's avatar
Linus Torvalds committed
166 167 168

	retval->dev = dev;

Matthew Wilcox's avatar
Matthew Wilcox committed
169 170
	INIT_LIST_HEAD(&retval->page_list);
	spin_lock_init(&retval->lock);
Linus Torvalds's avatar
Linus Torvalds committed
171
	retval->size = size;
172
	retval->boundary = boundary;
Linus Torvalds's avatar
Linus Torvalds committed
173
	retval->allocation = allocation;
Matthew Wilcox's avatar
Matthew Wilcox committed
174
	init_waitqueue_head(&retval->waitq);
Linus Torvalds's avatar
Linus Torvalds committed
175 176

	if (dev) {
177 178
		int ret;

179
		mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
180 181
		if (list_empty(&dev->dma_pools))
			ret = device_create_file(dev, &dev_attr_pools);
182 183
		else
			ret = 0;
Linus Torvalds's avatar
Linus Torvalds committed
184
		/* note:  not currently insisting "name" be unique */
185
		if (!ret)
Matthew Wilcox's avatar
Matthew Wilcox committed
186
			list_add(&retval->pools, &dev->dma_pools);
187 188 189 190
		else {
			kfree(retval);
			retval = NULL;
		}
191
		mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
192
	} else
Matthew Wilcox's avatar
Matthew Wilcox committed
193
		INIT_LIST_HEAD(&retval->pools);
Linus Torvalds's avatar
Linus Torvalds committed
194 195 196

	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
197
EXPORT_SYMBOL(dma_pool_create);
Linus Torvalds's avatar
Linus Torvalds committed
198

199 200 201
static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
{
	unsigned int offset = 0;
202
	unsigned int next_boundary = pool->boundary;
203 204 205

	do {
		unsigned int next = offset + pool->size;
206 207 208 209
		if (unlikely((next + pool->size) >= next_boundary)) {
			next = next_boundary;
			next_boundary += pool->boundary;
		}
210 211 212 213 214
		*(int *)(page->vaddr + offset) = next;
		offset = next;
	} while (offset < pool->allocation);
}

Matthew Wilcox's avatar
Matthew Wilcox committed
215
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
Linus Torvalds's avatar
Linus Torvalds committed
216
{
Matthew Wilcox's avatar
Matthew Wilcox committed
217
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
218

219
	page = kmalloc(sizeof(*page), mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
220 221
	if (!page)
		return NULL;
222
	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
Matthew Wilcox's avatar
Matthew Wilcox committed
223
					 &page->dma, mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
224
	if (page->vaddr) {
225
#ifdef	DMAPOOL_DEBUG
Matthew Wilcox's avatar
Matthew Wilcox committed
226
		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
227
#endif
228
		pool_initialise_page(pool, page);
Matthew Wilcox's avatar
Matthew Wilcox committed
229
		list_add(&page->page_list, &pool->page_list);
Linus Torvalds's avatar
Linus Torvalds committed
230
		page->in_use = 0;
231
		page->offset = 0;
Linus Torvalds's avatar
Linus Torvalds committed
232
	} else {
Matthew Wilcox's avatar
Matthew Wilcox committed
233
		kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
234 235 236 237 238
		page = NULL;
	}
	return page;
}

239
static inline int is_page_busy(struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
240
{
241
	return page->in_use != 0;
Linus Torvalds's avatar
Linus Torvalds committed
242 243
}

Matthew Wilcox's avatar
Matthew Wilcox committed
244
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
245
{
Matthew Wilcox's avatar
Matthew Wilcox committed
246
	dma_addr_t dma = page->dma;
Linus Torvalds's avatar
Linus Torvalds committed
247

248
#ifdef	DMAPOOL_DEBUG
Matthew Wilcox's avatar
Matthew Wilcox committed
249
	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
250
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
251 252 253
	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
	list_del(&page->page_list);
	kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
254 255 256 257 258 259 260 261 262 263
}

/**
 * dma_pool_destroy - destroys a pool of dma memory blocks.
 * @pool: dma pool that will be destroyed
 * Context: !in_interrupt()
 *
 * Caller guarantees that no more memory from the pool is in use,
 * and that nothing will try to use the pool after this call.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
264
void dma_pool_destroy(struct dma_pool *pool)
Linus Torvalds's avatar
Linus Torvalds committed
265
{
266
	mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
267 268 269
	list_del(&pool->pools);
	if (pool->dev && list_empty(&pool->dev->dma_pools))
		device_remove_file(pool->dev, &dev_attr_pools);
270
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
271

Matthew Wilcox's avatar
Matthew Wilcox committed
272 273 274 275
	while (!list_empty(&pool->page_list)) {
		struct dma_page *page;
		page = list_entry(pool->page_list.next,
				  struct dma_page, page_list);
276
		if (is_page_busy(page)) {
Linus Torvalds's avatar
Linus Torvalds committed
277
			if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
278 279
				dev_err(pool->dev,
					"dma_pool_destroy %s, %p busy\n",
Linus Torvalds's avatar
Linus Torvalds committed
280 281
					pool->name, page->vaddr);
			else
Matthew Wilcox's avatar
Matthew Wilcox committed
282 283 284
				printk(KERN_ERR
				       "dma_pool_destroy %s, %p busy\n",
				       pool->name, page->vaddr);
Linus Torvalds's avatar
Linus Torvalds committed
285
			/* leak the still-in-use consistent memory */
Matthew Wilcox's avatar
Matthew Wilcox committed
286 287
			list_del(&page->page_list);
			kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
288
		} else
Matthew Wilcox's avatar
Matthew Wilcox committed
289
			pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
290 291
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
292
	kfree(pool);
Linus Torvalds's avatar
Linus Torvalds committed
293
}
Matthew Wilcox's avatar
Matthew Wilcox committed
294
EXPORT_SYMBOL(dma_pool_destroy);
Linus Torvalds's avatar
Linus Torvalds committed
295 296 297 298 299 300 301 302 303

/**
 * dma_pool_alloc - get a block of consistent memory
 * @pool: dma pool that will produce the block
 * @mem_flags: GFP_* bitmask
 * @handle: pointer to dma address of block
 *
 * This returns the kernel virtual address of a currently unused block,
 * and reports its dma address through the handle.
304
 * If such a memory block can't be allocated, %NULL is returned.
Linus Torvalds's avatar
Linus Torvalds committed
305
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
306 307
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
		     dma_addr_t *handle)
Linus Torvalds's avatar
Linus Torvalds committed
308
{
Matthew Wilcox's avatar
Matthew Wilcox committed
309 310 311 312 313
	unsigned long flags;
	struct dma_page *page;
	size_t offset;
	void *retval;

314 315
	might_sleep_if(mem_flags & __GFP_WAIT);

Matthew Wilcox's avatar
Matthew Wilcox committed
316
	spin_lock_irqsave(&pool->lock, flags);
317
 restart:
Linus Torvalds's avatar
Linus Torvalds committed
318
	list_for_each_entry(page, &pool->page_list, page_list) {
319 320
		if (page->offset < pool->allocation)
			goto ready;
Linus Torvalds's avatar
Linus Torvalds committed
321
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
322 323
	page = pool_alloc_page(pool, GFP_ATOMIC);
	if (!page) {
Linus Torvalds's avatar
Linus Torvalds committed
324
		if (mem_flags & __GFP_WAIT) {
Matthew Wilcox's avatar
Matthew Wilcox committed
325
			DECLARE_WAITQUEUE(wait, current);
Linus Torvalds's avatar
Linus Torvalds committed
326

327
			__set_current_state(TASK_INTERRUPTIBLE);
328
			__add_wait_queue(&pool->waitq, &wait);
Matthew Wilcox's avatar
Matthew Wilcox committed
329
			spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
330

Matthew Wilcox's avatar
Matthew Wilcox committed
331
			schedule_timeout(POOL_TIMEOUT_JIFFIES);
Linus Torvalds's avatar
Linus Torvalds committed
332

333 334
			spin_lock_irqsave(&pool->lock, flags);
			__remove_wait_queue(&pool->waitq, &wait);
Linus Torvalds's avatar
Linus Torvalds committed
335 336 337 338 339 340
			goto restart;
		}
		retval = NULL;
		goto done;
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
341
 ready:
Linus Torvalds's avatar
Linus Torvalds committed
342
	page->in_use++;
343 344
	offset = page->offset;
	page->offset = *(int *)(page->vaddr + offset);
Linus Torvalds's avatar
Linus Torvalds committed
345 346
	retval = offset + page->vaddr;
	*handle = offset + page->dma;
347
#ifdef	DMAPOOL_DEBUG
Matthew Wilcox's avatar
Matthew Wilcox committed
348
	memset(retval, POOL_POISON_ALLOCATED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
349
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
350 351
 done:
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
352 353
	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
354
EXPORT_SYMBOL(dma_pool_alloc);
Linus Torvalds's avatar
Linus Torvalds committed
355

Matthew Wilcox's avatar
Matthew Wilcox committed
356
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
357
{
Matthew Wilcox's avatar
Matthew Wilcox committed
358 359
	unsigned long flags;
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
360

Matthew Wilcox's avatar
Matthew Wilcox committed
361
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
362 363 364 365 366 367 368
	list_for_each_entry(page, &pool->page_list, page_list) {
		if (dma < page->dma)
			continue;
		if (dma < (page->dma + pool->allocation))
			goto done;
	}
	page = NULL;
Matthew Wilcox's avatar
Matthew Wilcox committed
369 370
 done:
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
371 372 373 374 375 376 377 378 379 380 381 382
	return page;
}

/**
 * dma_pool_free - put block back into dma pool
 * @pool: the dma pool holding the block
 * @vaddr: virtual address of block
 * @dma: dma address of block
 *
 * Caller promises neither device nor driver will again touch this block
 * unless it is first re-allocated.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
383
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
384
{
Matthew Wilcox's avatar
Matthew Wilcox committed
385 386
	struct dma_page *page;
	unsigned long flags;
387
	unsigned int offset;
Linus Torvalds's avatar
Linus Torvalds committed
388

Matthew Wilcox's avatar
Matthew Wilcox committed
389 390
	page = pool_find_page(pool, dma);
	if (!page) {
Linus Torvalds's avatar
Linus Torvalds committed
391
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
392 393 394
			dev_err(pool->dev,
				"dma_pool_free %s, %p/%lx (bad dma)\n",
				pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
395
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
396 397
			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
			       pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
398 399 400
		return;
	}

401
	offset = vaddr - page->vaddr;
402
#ifdef	DMAPOOL_DEBUG
403
	if ((dma - page->dma) != offset) {
Linus Torvalds's avatar
Linus Torvalds committed
404
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
405 406 407
			dev_err(pool->dev,
				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
				pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
408
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
409 410 411
			printk(KERN_ERR
			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
			       pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
412 413
		return;
	}
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
	{
		unsigned int chain = page->offset;
		while (chain < pool->allocation) {
			if (chain != offset) {
				chain = *(int *)(page->vaddr + chain);
				continue;
			}
			if (pool->dev)
				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
					"already free\n", pool->name,
					(unsigned long long)dma);
			else
				printk(KERN_ERR "dma_pool_free %s, dma %Lx "
					"already free\n", pool->name,
					(unsigned long long)dma);
			return;
		}
Linus Torvalds's avatar
Linus Torvalds committed
431
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
432
	memset(vaddr, POOL_POISON_FREED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
433 434
#endif

Matthew Wilcox's avatar
Matthew Wilcox committed
435
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
436
	page->in_use--;
437 438
	*(int *)vaddr = page->offset;
	page->offset = offset;
Matthew Wilcox's avatar
Matthew Wilcox committed
439
	if (waitqueue_active(&pool->waitq))
440
		wake_up_locked(&pool->waitq);
Linus Torvalds's avatar
Linus Torvalds committed
441 442
	/*
	 * Resist a temptation to do
443
	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
444 445
	 * Better have a few empty pages hang around.
	 */
Matthew Wilcox's avatar
Matthew Wilcox committed
446
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
447
}
Matthew Wilcox's avatar
Matthew Wilcox committed
448
EXPORT_SYMBOL(dma_pool_free);
Linus Torvalds's avatar
Linus Torvalds committed
449

450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
/*
 * Managed DMA pool
 */
static void dmam_pool_release(struct device *dev, void *res)
{
	struct dma_pool *pool = *(struct dma_pool **)res;

	dma_pool_destroy(pool);
}

static int dmam_pool_match(struct device *dev, void *res, void *match_data)
{
	return *(struct dma_pool **)res == match_data;
}

/**
 * dmam_pool_create - Managed dma_pool_create()
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 *
 * Managed dma_pool_create().  DMA pool created with this function is
 * automatically destroyed on driver detach.
 */
struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
				  size_t size, size_t align, size_t allocation)
{
	struct dma_pool **ptr, *pool;

	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return NULL;

	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
	if (pool)
		devres_add(dev, ptr);
	else
		devres_free(ptr);

	return pool;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
493
EXPORT_SYMBOL(dmam_pool_create);
494 495 496 497 498 499 500 501 502 503 504 505 506 507

/**
 * dmam_pool_destroy - Managed dma_pool_destroy()
 * @pool: dma pool that will be destroyed
 *
 * Managed dma_pool_destroy().
 */
void dmam_pool_destroy(struct dma_pool *pool)
{
	struct device *dev = pool->dev;

	dma_pool_destroy(pool);
	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
}
Matthew Wilcox's avatar
Matthew Wilcox committed
508
EXPORT_SYMBOL(dmam_pool_destroy);