dmapool.c 11.9 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8

#include <linux/device.h>
#include <linux/mm.h>
#include <asm/io.h>		/* Needed for i386 to build */
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/slab.h>
#include <linux/module.h>
9
#include <linux/poison.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
10
#include <linux/sched.h>
Linus Torvalds's avatar
Linus Torvalds committed
11 12 13 14 15 16 17

/*
 * Pool allocator ... wraps the dma_alloc_coherent page allocator, so
 * small blocks are easily used by drivers for bus mastering controllers.
 * This should probably be sharing the guts of the slab allocator.
 */

Matthew Wilcox's avatar
Matthew Wilcox committed
18 19 20 21 22 23 24 25 26 27
struct dma_pool {		/* the pool */
	struct list_head page_list;
	spinlock_t lock;
	size_t blocks_per_page;
	size_t size;
	struct device *dev;
	size_t allocation;
	char name[32];
	wait_queue_head_t waitq;
	struct list_head pools;
Linus Torvalds's avatar
Linus Torvalds committed
28 29
};

Matthew Wilcox's avatar
Matthew Wilcox committed
30 31 32 33 34 35
struct dma_page {		/* cacheable header for 'allocation' bytes */
	struct list_head page_list;
	void *vaddr;
	dma_addr_t dma;
	unsigned in_use;
	unsigned long bitmap[0];
Linus Torvalds's avatar
Linus Torvalds committed
36 37 38 39
};

#define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)

Matthew Wilcox's avatar
Matthew Wilcox committed
40
static DEFINE_MUTEX(pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
41 42

static ssize_t
Matthew Wilcox's avatar
Matthew Wilcox committed
43
show_pools(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds's avatar
Linus Torvalds committed
44 45 46 47 48 49 50 51 52 53 54 55 56 57
{
	unsigned temp;
	unsigned size;
	char *next;
	struct dma_page *page;
	struct dma_pool *pool;

	next = buf;
	size = PAGE_SIZE;

	temp = scnprintf(next, size, "poolinfo - 0.1\n");
	size -= temp;
	next += temp;

58
	mutex_lock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
59 60 61 62 63 64 65 66 67 68 69
	list_for_each_entry(pool, &dev->dma_pools, pools) {
		unsigned pages = 0;
		unsigned blocks = 0;

		list_for_each_entry(page, &pool->page_list, page_list) {
			pages++;
			blocks += page->in_use;
		}

		/* per-pool info, no real statistics yet */
		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
Matthew Wilcox's avatar
Matthew Wilcox committed
70 71 72
				 pool->name,
				 blocks, pages * pool->blocks_per_page,
				 pool->size, pages);
Linus Torvalds's avatar
Linus Torvalds committed
73 74 75
		size -= temp;
		next += temp;
	}
76
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
77 78 79

	return PAGE_SIZE - size;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
80 81

static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103

/**
 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 * Context: !in_interrupt()
 *
 * Returns a dma allocation pool with the requested characteristics, or
 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
 * may be used to allocate memory.  Such memory will all have "consistent"
 * DMA mappings, accessible by the device and its driver without using
 * cache flushing primitives.  The actual size of blocks allocated may be
 * larger than requested because of alignment.
 *
 * If allocation is nonzero, objects returned from dma_pool_alloc() won't
 * cross that size boundary.  This is useful for devices which have
 * addressing restrictions on individual DMA transfers, such as not crossing
 * boundaries of 4KBytes.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
104 105
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
				 size_t size, size_t align, size_t allocation)
Linus Torvalds's avatar
Linus Torvalds committed
106
{
Matthew Wilcox's avatar
Matthew Wilcox committed
107
	struct dma_pool *retval;
Linus Torvalds's avatar
Linus Torvalds committed
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124

	if (align == 0)
		align = 1;
	if (size == 0)
		return NULL;
	else if (size < align)
		size = align;
	else if ((size % align) != 0) {
		size += align + 1;
		size &= ~(align - 1);
	}

	if (allocation == 0) {
		if (PAGE_SIZE < size)
			allocation = size;
		else
			allocation = PAGE_SIZE;
Matthew Wilcox's avatar
Matthew Wilcox committed
125
		/* FIXME: round up for less fragmentation */
Linus Torvalds's avatar
Linus Torvalds committed
126 127 128
	} else if (allocation < size)
		return NULL;

Matthew Wilcox's avatar
Matthew Wilcox committed
129 130 131
	if (!
	    (retval =
	     kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
Linus Torvalds's avatar
Linus Torvalds committed
132 133
		return retval;

Matthew Wilcox's avatar
Matthew Wilcox committed
134
	strlcpy(retval->name, name, sizeof retval->name);
Linus Torvalds's avatar
Linus Torvalds committed
135 136 137

	retval->dev = dev;

Matthew Wilcox's avatar
Matthew Wilcox committed
138 139
	INIT_LIST_HEAD(&retval->page_list);
	spin_lock_init(&retval->lock);
Linus Torvalds's avatar
Linus Torvalds committed
140 141 142
	retval->size = size;
	retval->allocation = allocation;
	retval->blocks_per_page = allocation / size;
Matthew Wilcox's avatar
Matthew Wilcox committed
143
	init_waitqueue_head(&retval->waitq);
Linus Torvalds's avatar
Linus Torvalds committed
144 145

	if (dev) {
146 147
		int ret;

148
		mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
149 150
		if (list_empty(&dev->dma_pools))
			ret = device_create_file(dev, &dev_attr_pools);
151 152
		else
			ret = 0;
Linus Torvalds's avatar
Linus Torvalds committed
153
		/* note:  not currently insisting "name" be unique */
154
		if (!ret)
Matthew Wilcox's avatar
Matthew Wilcox committed
155
			list_add(&retval->pools, &dev->dma_pools);
156 157 158 159
		else {
			kfree(retval);
			retval = NULL;
		}
160
		mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
161
	} else
Matthew Wilcox's avatar
Matthew Wilcox committed
162
		INIT_LIST_HEAD(&retval->pools);
Linus Torvalds's avatar
Linus Torvalds committed
163 164 165

	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
166
EXPORT_SYMBOL(dma_pool_create);
Linus Torvalds's avatar
Linus Torvalds committed
167

Matthew Wilcox's avatar
Matthew Wilcox committed
168
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
Linus Torvalds's avatar
Linus Torvalds committed
169
{
Matthew Wilcox's avatar
Matthew Wilcox committed
170 171
	struct dma_page *page;
	int mapsize;
Linus Torvalds's avatar
Linus Torvalds committed
172 173 174

	mapsize = pool->blocks_per_page;
	mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
Matthew Wilcox's avatar
Matthew Wilcox committed
175
	mapsize *= sizeof(long);
Linus Torvalds's avatar
Linus Torvalds committed
176

177
	page = kmalloc(mapsize + sizeof *page, mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
178 179
	if (!page)
		return NULL;
Matthew Wilcox's avatar
Matthew Wilcox committed
180 181 182
	page->vaddr = dma_alloc_coherent(pool->dev,
					 pool->allocation,
					 &page->dma, mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
183
	if (page->vaddr) {
Matthew Wilcox's avatar
Matthew Wilcox committed
184
		memset(page->bitmap, 0xff, mapsize);	/* bit set == free */
Linus Torvalds's avatar
Linus Torvalds committed
185
#ifdef	CONFIG_DEBUG_SLAB
Matthew Wilcox's avatar
Matthew Wilcox committed
186
		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
187
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
188
		list_add(&page->page_list, &pool->page_list);
Linus Torvalds's avatar
Linus Torvalds committed
189 190
		page->in_use = 0;
	} else {
Matthew Wilcox's avatar
Matthew Wilcox committed
191
		kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
192 193 194 195 196
		page = NULL;
	}
	return page;
}

Matthew Wilcox's avatar
Matthew Wilcox committed
197
static inline int is_page_busy(int blocks, unsigned long *bitmap)
Linus Torvalds's avatar
Linus Torvalds committed
198 199 200 201 202 203 204 205 206
{
	while (blocks > 0) {
		if (*bitmap++ != ~0UL)
			return 1;
		blocks -= BITS_PER_LONG;
	}
	return 0;
}

Matthew Wilcox's avatar
Matthew Wilcox committed
207
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
208
{
Matthew Wilcox's avatar
Matthew Wilcox committed
209
	dma_addr_t dma = page->dma;
Linus Torvalds's avatar
Linus Torvalds committed
210 211

#ifdef	CONFIG_DEBUG_SLAB
Matthew Wilcox's avatar
Matthew Wilcox committed
212
	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
213
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
214 215 216
	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
	list_del(&page->page_list);
	kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
217 218 219 220 221 222 223 224 225 226
}

/**
 * dma_pool_destroy - destroys a pool of dma memory blocks.
 * @pool: dma pool that will be destroyed
 * Context: !in_interrupt()
 *
 * Caller guarantees that no more memory from the pool is in use,
 * and that nothing will try to use the pool after this call.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
227
void dma_pool_destroy(struct dma_pool *pool)
Linus Torvalds's avatar
Linus Torvalds committed
228
{
229
	mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
230 231 232
	list_del(&pool->pools);
	if (pool->dev && list_empty(&pool->dev->dma_pools))
		device_remove_file(pool->dev, &dev_attr_pools);
233
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
234

Matthew Wilcox's avatar
Matthew Wilcox committed
235 236 237 238 239
	while (!list_empty(&pool->page_list)) {
		struct dma_page *page;
		page = list_entry(pool->page_list.next,
				  struct dma_page, page_list);
		if (is_page_busy(pool->blocks_per_page, page->bitmap)) {
Linus Torvalds's avatar
Linus Torvalds committed
240
			if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
241 242
				dev_err(pool->dev,
					"dma_pool_destroy %s, %p busy\n",
Linus Torvalds's avatar
Linus Torvalds committed
243 244
					pool->name, page->vaddr);
			else
Matthew Wilcox's avatar
Matthew Wilcox committed
245 246 247
				printk(KERN_ERR
				       "dma_pool_destroy %s, %p busy\n",
				       pool->name, page->vaddr);
Linus Torvalds's avatar
Linus Torvalds committed
248
			/* leak the still-in-use consistent memory */
Matthew Wilcox's avatar
Matthew Wilcox committed
249 250
			list_del(&page->page_list);
			kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
251
		} else
Matthew Wilcox's avatar
Matthew Wilcox committed
252
			pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
253 254
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
255
	kfree(pool);
Linus Torvalds's avatar
Linus Torvalds committed
256
}
Matthew Wilcox's avatar
Matthew Wilcox committed
257
EXPORT_SYMBOL(dma_pool_destroy);
Linus Torvalds's avatar
Linus Torvalds committed
258 259 260 261 262 263 264 265 266 267 268

/**
 * dma_pool_alloc - get a block of consistent memory
 * @pool: dma pool that will produce the block
 * @mem_flags: GFP_* bitmask
 * @handle: pointer to dma address of block
 *
 * This returns the kernel virtual address of a currently unused block,
 * and reports its dma address through the handle.
 * If such a memory block can't be allocated, null is returned.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
269 270
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
		     dma_addr_t *handle)
Linus Torvalds's avatar
Linus Torvalds committed
271
{
Matthew Wilcox's avatar
Matthew Wilcox committed
272 273 274 275 276 277 278 279
	unsigned long flags;
	struct dma_page *page;
	int map, block;
	size_t offset;
	void *retval;

 restart:
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
280
	list_for_each_entry(page, &pool->page_list, page_list) {
Matthew Wilcox's avatar
Matthew Wilcox committed
281
		int i;
Linus Torvalds's avatar
Linus Torvalds committed
282 283
		/* only cachable accesses here ... */
		for (map = 0, i = 0;
Matthew Wilcox's avatar
Matthew Wilcox committed
284 285
		     i < pool->blocks_per_page; i += BITS_PER_LONG, map++) {
			if (page->bitmap[map] == 0)
Linus Torvalds's avatar
Linus Torvalds committed
286
				continue;
Matthew Wilcox's avatar
Matthew Wilcox committed
287
			block = ffz(~page->bitmap[map]);
Linus Torvalds's avatar
Linus Torvalds committed
288
			if ((i + block) < pool->blocks_per_page) {
Matthew Wilcox's avatar
Matthew Wilcox committed
289
				clear_bit(block, &page->bitmap[map]);
Linus Torvalds's avatar
Linus Torvalds committed
290 291 292 293 294 295
				offset = (BITS_PER_LONG * map) + block;
				offset *= pool->size;
				goto ready;
			}
		}
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
296 297
	page = pool_alloc_page(pool, GFP_ATOMIC);
	if (!page) {
Linus Torvalds's avatar
Linus Torvalds committed
298
		if (mem_flags & __GFP_WAIT) {
Matthew Wilcox's avatar
Matthew Wilcox committed
299
			DECLARE_WAITQUEUE(wait, current);
Linus Torvalds's avatar
Linus Torvalds committed
300

301
			__set_current_state(TASK_INTERRUPTIBLE);
Matthew Wilcox's avatar
Matthew Wilcox committed
302 303
			add_wait_queue(&pool->waitq, &wait);
			spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
304

Matthew Wilcox's avatar
Matthew Wilcox committed
305
			schedule_timeout(POOL_TIMEOUT_JIFFIES);
Linus Torvalds's avatar
Linus Torvalds committed
306

Matthew Wilcox's avatar
Matthew Wilcox committed
307
			remove_wait_queue(&pool->waitq, &wait);
Linus Torvalds's avatar
Linus Torvalds committed
308 309 310 311 312 313
			goto restart;
		}
		retval = NULL;
		goto done;
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
314
	clear_bit(0, &page->bitmap[0]);
Linus Torvalds's avatar
Linus Torvalds committed
315
	offset = 0;
Matthew Wilcox's avatar
Matthew Wilcox committed
316
 ready:
Linus Torvalds's avatar
Linus Torvalds committed
317 318 319 320
	page->in_use++;
	retval = offset + page->vaddr;
	*handle = offset + page->dma;
#ifdef	CONFIG_DEBUG_SLAB
Matthew Wilcox's avatar
Matthew Wilcox committed
321
	memset(retval, POOL_POISON_ALLOCATED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
322
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
323 324
 done:
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
325 326
	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
327
EXPORT_SYMBOL(dma_pool_alloc);
Linus Torvalds's avatar
Linus Torvalds committed
328

Matthew Wilcox's avatar
Matthew Wilcox committed
329
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
330
{
Matthew Wilcox's avatar
Matthew Wilcox committed
331 332
	unsigned long flags;
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
333

Matthew Wilcox's avatar
Matthew Wilcox committed
334
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
335 336 337 338 339 340 341
	list_for_each_entry(page, &pool->page_list, page_list) {
		if (dma < page->dma)
			continue;
		if (dma < (page->dma + pool->allocation))
			goto done;
	}
	page = NULL;
Matthew Wilcox's avatar
Matthew Wilcox committed
342 343
 done:
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
344 345 346 347 348 349 350 351 352 353 354 355
	return page;
}

/**
 * dma_pool_free - put block back into dma pool
 * @pool: the dma pool holding the block
 * @vaddr: virtual address of block
 * @dma: dma address of block
 *
 * Caller promises neither device nor driver will again touch this block
 * unless it is first re-allocated.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
356
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
357
{
Matthew Wilcox's avatar
Matthew Wilcox committed
358 359 360
	struct dma_page *page;
	unsigned long flags;
	int map, block;
Linus Torvalds's avatar
Linus Torvalds committed
361

Matthew Wilcox's avatar
Matthew Wilcox committed
362 363
	page = pool_find_page(pool, dma);
	if (!page) {
Linus Torvalds's avatar
Linus Torvalds committed
364
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
365 366 367
			dev_err(pool->dev,
				"dma_pool_free %s, %p/%lx (bad dma)\n",
				pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
368
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
369 370
			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
			       pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
371 372 373 374 375 376 377 378 379 380 381
		return;
	}

	block = dma - page->dma;
	block /= pool->size;
	map = block / BITS_PER_LONG;
	block %= BITS_PER_LONG;

#ifdef	CONFIG_DEBUG_SLAB
	if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
382 383 384
			dev_err(pool->dev,
				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
				pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
385
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
386 387 388
			printk(KERN_ERR
			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
			       pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
389 390
		return;
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
391
	if (page->bitmap[map] & (1UL << block)) {
Linus Torvalds's avatar
Linus Torvalds committed
392
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
393 394
			dev_err(pool->dev,
				"dma_pool_free %s, dma %Lx already free\n",
Linus Torvalds's avatar
Linus Torvalds committed
395 396
				pool->name, (unsigned long long)dma);
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
397 398 399
			printk(KERN_ERR
			       "dma_pool_free %s, dma %Lx already free\n",
			       pool->name, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
400 401
		return;
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
402
	memset(vaddr, POOL_POISON_FREED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
403 404
#endif

Matthew Wilcox's avatar
Matthew Wilcox committed
405
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
406
	page->in_use--;
Matthew Wilcox's avatar
Matthew Wilcox committed
407 408 409
	set_bit(block, &page->bitmap[map]);
	if (waitqueue_active(&pool->waitq))
		wake_up(&pool->waitq);
Linus Torvalds's avatar
Linus Torvalds committed
410 411 412 413 414
	/*
	 * Resist a temptation to do
	 *    if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
	 * Better have a few empty pages hang around.
	 */
Matthew Wilcox's avatar
Matthew Wilcox committed
415
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
416
}
Matthew Wilcox's avatar
Matthew Wilcox committed
417
EXPORT_SYMBOL(dma_pool_free);
Linus Torvalds's avatar
Linus Torvalds committed
418

Tejun Heo's avatar
Tejun Heo committed
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
/*
 * Managed DMA pool
 */
static void dmam_pool_release(struct device *dev, void *res)
{
	struct dma_pool *pool = *(struct dma_pool **)res;

	dma_pool_destroy(pool);
}

static int dmam_pool_match(struct device *dev, void *res, void *match_data)
{
	return *(struct dma_pool **)res == match_data;
}

/**
 * dmam_pool_create - Managed dma_pool_create()
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 *
 * Managed dma_pool_create().  DMA pool created with this function is
 * automatically destroyed on driver detach.
 */
struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
				  size_t size, size_t align, size_t allocation)
{
	struct dma_pool **ptr, *pool;

	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return NULL;

	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
	if (pool)
		devres_add(dev, ptr);
	else
		devres_free(ptr);

	return pool;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
462
EXPORT_SYMBOL(dmam_pool_create);
Tejun Heo's avatar
Tejun Heo committed
463 464 465 466 467 468 469 470 471 472 473 474 475 476

/**
 * dmam_pool_destroy - Managed dma_pool_destroy()
 * @pool: dma pool that will be destroyed
 *
 * Managed dma_pool_destroy().
 */
void dmam_pool_destroy(struct dma_pool *pool)
{
	struct device *dev = pool->dev;

	dma_pool_destroy(pool);
	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
}
Matthew Wilcox's avatar
Matthew Wilcox committed
477
EXPORT_SYMBOL(dmam_pool_destroy);