dmapool.c 12 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8

#include <linux/device.h>
#include <linux/mm.h>
#include <asm/io.h>		/* Needed for i386 to build */
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/slab.h>
#include <linux/module.h>
9
#include <linux/poison.h>
10
#include <linux/sched.h>
Linus Torvalds's avatar
Linus Torvalds committed
11 12 13 14 15 16 17

/*
 * Pool allocator ... wraps the dma_alloc_coherent page allocator, so
 * small blocks are easily used by drivers for bus mastering controllers.
 * This should probably be sharing the guts of the slab allocator.
 */

18 19 20 21 22 23 24 25 26 27
struct dma_pool {		/* the pool */
	struct list_head page_list;
	spinlock_t lock;
	size_t blocks_per_page;
	size_t size;
	struct device *dev;
	size_t allocation;
	char name[32];
	wait_queue_head_t waitq;
	struct list_head pools;
Linus Torvalds's avatar
Linus Torvalds committed
28 29
};

30 31 32 33 34 35
struct dma_page {		/* cacheable header for 'allocation' bytes */
	struct list_head page_list;
	void *vaddr;
	dma_addr_t dma;
	unsigned in_use;
	unsigned long bitmap[0];
Linus Torvalds's avatar
Linus Torvalds committed
36 37 38 39
};

#define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)

40
static DEFINE_MUTEX(pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
41 42

static ssize_t
43
show_pools(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds's avatar
Linus Torvalds committed
44 45 46 47 48 49 50 51 52 53 54 55 56 57
{
	unsigned temp;
	unsigned size;
	char *next;
	struct dma_page *page;
	struct dma_pool *pool;

	next = buf;
	size = PAGE_SIZE;

	temp = scnprintf(next, size, "poolinfo - 0.1\n");
	size -= temp;
	next += temp;

58
	mutex_lock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
59 60 61 62 63 64 65 66 67 68 69
	list_for_each_entry(pool, &dev->dma_pools, pools) {
		unsigned pages = 0;
		unsigned blocks = 0;

		list_for_each_entry(page, &pool->page_list, page_list) {
			pages++;
			blocks += page->in_use;
		}

		/* per-pool info, no real statistics yet */
		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
70 71 72
				 pool->name,
				 blocks, pages * pool->blocks_per_page,
				 pool->size, pages);
Linus Torvalds's avatar
Linus Torvalds committed
73 74 75
		size -= temp;
		next += temp;
	}
76
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
77 78 79

	return PAGE_SIZE - size;
}
80 81

static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103

/**
 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 * Context: !in_interrupt()
 *
 * Returns a dma allocation pool with the requested characteristics, or
 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
 * may be used to allocate memory.  Such memory will all have "consistent"
 * DMA mappings, accessible by the device and its driver without using
 * cache flushing primitives.  The actual size of blocks allocated may be
 * larger than requested because of alignment.
 *
 * If allocation is nonzero, objects returned from dma_pool_alloc() won't
 * cross that size boundary.  This is useful for devices which have
 * addressing restrictions on individual DMA transfers, such as not crossing
 * boundaries of 4KBytes.
 */
104 105
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
				 size_t size, size_t align, size_t allocation)
Linus Torvalds's avatar
Linus Torvalds committed
106
{
107
	struct dma_pool *retval;
Linus Torvalds's avatar
Linus Torvalds committed
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124

	if (align == 0)
		align = 1;
	if (size == 0)
		return NULL;
	else if (size < align)
		size = align;
	else if ((size % align) != 0) {
		size += align + 1;
		size &= ~(align - 1);
	}

	if (allocation == 0) {
		if (PAGE_SIZE < size)
			allocation = size;
		else
			allocation = PAGE_SIZE;
125
		/* FIXME: round up for less fragmentation */
Linus Torvalds's avatar
Linus Torvalds committed
126 127 128
	} else if (allocation < size)
		return NULL;

129 130 131
	if (!
	    (retval =
	     kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
Linus Torvalds's avatar
Linus Torvalds committed
132 133
		return retval;

134
	strlcpy(retval->name, name, sizeof retval->name);
Linus Torvalds's avatar
Linus Torvalds committed
135 136 137

	retval->dev = dev;

138 139
	INIT_LIST_HEAD(&retval->page_list);
	spin_lock_init(&retval->lock);
Linus Torvalds's avatar
Linus Torvalds committed
140 141 142
	retval->size = size;
	retval->allocation = allocation;
	retval->blocks_per_page = allocation / size;
143
	init_waitqueue_head(&retval->waitq);
Linus Torvalds's avatar
Linus Torvalds committed
144 145

	if (dev) {
146 147
		int ret;

148
		mutex_lock(&pools_lock);
149 150
		if (list_empty(&dev->dma_pools))
			ret = device_create_file(dev, &dev_attr_pools);
151 152
		else
			ret = 0;
Linus Torvalds's avatar
Linus Torvalds committed
153
		/* note:  not currently insisting "name" be unique */
154
		if (!ret)
155
			list_add(&retval->pools, &dev->dma_pools);
156 157 158 159
		else {
			kfree(retval);
			retval = NULL;
		}
160
		mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
161
	} else
162
		INIT_LIST_HEAD(&retval->pools);
Linus Torvalds's avatar
Linus Torvalds committed
163 164 165

	return retval;
}
166
EXPORT_SYMBOL(dma_pool_create);
Linus Torvalds's avatar
Linus Torvalds committed
167

168
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
Linus Torvalds's avatar
Linus Torvalds committed
169
{
170 171
	struct dma_page *page;
	int mapsize;
Linus Torvalds's avatar
Linus Torvalds committed
172 173 174

	mapsize = pool->blocks_per_page;
	mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
175
	mapsize *= sizeof(long);
Linus Torvalds's avatar
Linus Torvalds committed
176

177
	page = kmalloc(mapsize + sizeof *page, mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
178 179
	if (!page)
		return NULL;
180 181 182
	page->vaddr = dma_alloc_coherent(pool->dev,
					 pool->allocation,
					 &page->dma, mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
183
	if (page->vaddr) {
184
		memset(page->bitmap, 0xff, mapsize);	/* bit set == free */
Linus Torvalds's avatar
Linus Torvalds committed
185
#ifdef	CONFIG_DEBUG_SLAB
186
		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
187
#endif
188
		list_add(&page->page_list, &pool->page_list);
Linus Torvalds's avatar
Linus Torvalds committed
189 190
		page->in_use = 0;
	} else {
191
		kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
192 193 194 195 196
		page = NULL;
	}
	return page;
}

197
static inline int is_page_busy(int blocks, unsigned long *bitmap)
Linus Torvalds's avatar
Linus Torvalds committed
198 199 200 201 202 203 204 205 206
{
	while (blocks > 0) {
		if (*bitmap++ != ~0UL)
			return 1;
		blocks -= BITS_PER_LONG;
	}
	return 0;
}

207
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
208
{
209
	dma_addr_t dma = page->dma;
Linus Torvalds's avatar
Linus Torvalds committed
210 211

#ifdef	CONFIG_DEBUG_SLAB
212
	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
213
#endif
214 215 216
	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
	list_del(&page->page_list);
	kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
217 218 219 220 221 222 223 224 225 226
}

/**
 * dma_pool_destroy - destroys a pool of dma memory blocks.
 * @pool: dma pool that will be destroyed
 * Context: !in_interrupt()
 *
 * Caller guarantees that no more memory from the pool is in use,
 * and that nothing will try to use the pool after this call.
 */
227
void dma_pool_destroy(struct dma_pool *pool)
Linus Torvalds's avatar
Linus Torvalds committed
228
{
229
	mutex_lock(&pools_lock);
230 231 232
	list_del(&pool->pools);
	if (pool->dev && list_empty(&pool->dev->dma_pools))
		device_remove_file(pool->dev, &dev_attr_pools);
233
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
234

235 236 237 238 239
	while (!list_empty(&pool->page_list)) {
		struct dma_page *page;
		page = list_entry(pool->page_list.next,
				  struct dma_page, page_list);
		if (is_page_busy(pool->blocks_per_page, page->bitmap)) {
Linus Torvalds's avatar
Linus Torvalds committed
240
			if (pool->dev)
241 242
				dev_err(pool->dev,
					"dma_pool_destroy %s, %p busy\n",
Linus Torvalds's avatar
Linus Torvalds committed
243 244
					pool->name, page->vaddr);
			else
245 246 247
				printk(KERN_ERR
				       "dma_pool_destroy %s, %p busy\n",
				       pool->name, page->vaddr);
Linus Torvalds's avatar
Linus Torvalds committed
248
			/* leak the still-in-use consistent memory */
249 250
			list_del(&page->page_list);
			kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
251
		} else
252
			pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
253 254
	}

255
	kfree(pool);
Linus Torvalds's avatar
Linus Torvalds committed
256
}
257
EXPORT_SYMBOL(dma_pool_destroy);
Linus Torvalds's avatar
Linus Torvalds committed
258 259 260 261 262 263 264 265 266 267 268

/**
 * dma_pool_alloc - get a block of consistent memory
 * @pool: dma pool that will produce the block
 * @mem_flags: GFP_* bitmask
 * @handle: pointer to dma address of block
 *
 * This returns the kernel virtual address of a currently unused block,
 * and reports its dma address through the handle.
 * If such a memory block can't be allocated, null is returned.
 */
269 270
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
		     dma_addr_t *handle)
Linus Torvalds's avatar
Linus Torvalds committed
271
{
272 273 274 275 276 277 278
	unsigned long flags;
	struct dma_page *page;
	int map, block;
	size_t offset;
	void *retval;

	spin_lock_irqsave(&pool->lock, flags);
279
 restart:
Linus Torvalds's avatar
Linus Torvalds committed
280
	list_for_each_entry(page, &pool->page_list, page_list) {
281
		int i;
Linus Torvalds's avatar
Linus Torvalds committed
282 283
		/* only cachable accesses here ... */
		for (map = 0, i = 0;
284 285
		     i < pool->blocks_per_page; i += BITS_PER_LONG, map++) {
			if (page->bitmap[map] == 0)
Linus Torvalds's avatar
Linus Torvalds committed
286
				continue;
287
			block = ffz(~page->bitmap[map]);
Linus Torvalds's avatar
Linus Torvalds committed
288
			if ((i + block) < pool->blocks_per_page) {
289
				clear_bit(block, &page->bitmap[map]);
Linus Torvalds's avatar
Linus Torvalds committed
290 291 292 293 294 295
				offset = (BITS_PER_LONG * map) + block;
				offset *= pool->size;
				goto ready;
			}
		}
	}
296 297
	page = pool_alloc_page(pool, GFP_ATOMIC);
	if (!page) {
Linus Torvalds's avatar
Linus Torvalds committed
298
		if (mem_flags & __GFP_WAIT) {
299
			DECLARE_WAITQUEUE(wait, current);
Linus Torvalds's avatar
Linus Torvalds committed
300

301
			__set_current_state(TASK_INTERRUPTIBLE);
302
			__add_wait_queue(&pool->waitq, &wait);
303
			spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
304

305
			schedule_timeout(POOL_TIMEOUT_JIFFIES);
Linus Torvalds's avatar
Linus Torvalds committed
306

307 308
			spin_lock_irqsave(&pool->lock, flags);
			__remove_wait_queue(&pool->waitq, &wait);
Linus Torvalds's avatar
Linus Torvalds committed
309 310 311 312 313 314
			goto restart;
		}
		retval = NULL;
		goto done;
	}

315
	clear_bit(0, &page->bitmap[0]);
Linus Torvalds's avatar
Linus Torvalds committed
316
	offset = 0;
317
 ready:
Linus Torvalds's avatar
Linus Torvalds committed
318 319 320 321
	page->in_use++;
	retval = offset + page->vaddr;
	*handle = offset + page->dma;
#ifdef	CONFIG_DEBUG_SLAB
322
	memset(retval, POOL_POISON_ALLOCATED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
323
#endif
324 325
 done:
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
326 327
	return retval;
}
328
EXPORT_SYMBOL(dma_pool_alloc);
Linus Torvalds's avatar
Linus Torvalds committed
329

330
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
331
{
332 333
	unsigned long flags;
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
334

335
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
336 337 338 339 340 341 342
	list_for_each_entry(page, &pool->page_list, page_list) {
		if (dma < page->dma)
			continue;
		if (dma < (page->dma + pool->allocation))
			goto done;
	}
	page = NULL;
343 344
 done:
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
345 346 347 348 349 350 351 352 353 354 355 356
	return page;
}

/**
 * dma_pool_free - put block back into dma pool
 * @pool: the dma pool holding the block
 * @vaddr: virtual address of block
 * @dma: dma address of block
 *
 * Caller promises neither device nor driver will again touch this block
 * unless it is first re-allocated.
 */
357
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
358
{
359 360 361
	struct dma_page *page;
	unsigned long flags;
	int map, block;
Linus Torvalds's avatar
Linus Torvalds committed
362

363 364
	page = pool_find_page(pool, dma);
	if (!page) {
Linus Torvalds's avatar
Linus Torvalds committed
365
		if (pool->dev)
366 367 368
			dev_err(pool->dev,
				"dma_pool_free %s, %p/%lx (bad dma)\n",
				pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
369
		else
370 371
			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
			       pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
372 373 374 375 376 377 378 379 380 381 382
		return;
	}

	block = dma - page->dma;
	block /= pool->size;
	map = block / BITS_PER_LONG;
	block %= BITS_PER_LONG;

#ifdef	CONFIG_DEBUG_SLAB
	if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
		if (pool->dev)
383 384 385
			dev_err(pool->dev,
				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
				pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
386
		else
387 388 389
			printk(KERN_ERR
			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
			       pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
390 391
		return;
	}
392
	if (page->bitmap[map] & (1UL << block)) {
Linus Torvalds's avatar
Linus Torvalds committed
393
		if (pool->dev)
394 395
			dev_err(pool->dev,
				"dma_pool_free %s, dma %Lx already free\n",
Linus Torvalds's avatar
Linus Torvalds committed
396 397
				pool->name, (unsigned long long)dma);
		else
398 399 400
			printk(KERN_ERR
			       "dma_pool_free %s, dma %Lx already free\n",
			       pool->name, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
401 402
		return;
	}
403
	memset(vaddr, POOL_POISON_FREED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
404 405
#endif

406
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
407
	page->in_use--;
408 409
	set_bit(block, &page->bitmap[map]);
	if (waitqueue_active(&pool->waitq))
410
		wake_up_locked(&pool->waitq);
Linus Torvalds's avatar
Linus Torvalds committed
411 412 413 414 415
	/*
	 * Resist a temptation to do
	 *    if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
	 * Better have a few empty pages hang around.
	 */
416
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
417
}
418
EXPORT_SYMBOL(dma_pool_free);
Linus Torvalds's avatar
Linus Torvalds committed
419

420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
/*
 * Managed DMA pool
 */
static void dmam_pool_release(struct device *dev, void *res)
{
	struct dma_pool *pool = *(struct dma_pool **)res;

	dma_pool_destroy(pool);
}

static int dmam_pool_match(struct device *dev, void *res, void *match_data)
{
	return *(struct dma_pool **)res == match_data;
}

/**
 * dmam_pool_create - Managed dma_pool_create()
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 *
 * Managed dma_pool_create().  DMA pool created with this function is
 * automatically destroyed on driver detach.
 */
struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
				  size_t size, size_t align, size_t allocation)
{
	struct dma_pool **ptr, *pool;

	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return NULL;

	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
	if (pool)
		devres_add(dev, ptr);
	else
		devres_free(ptr);

	return pool;
}
463
EXPORT_SYMBOL(dmam_pool_create);
464 465 466 467 468 469 470 471 472 473 474 475 476 477

/**
 * dmam_pool_destroy - Managed dma_pool_destroy()
 * @pool: dma pool that will be destroyed
 *
 * Managed dma_pool_destroy().
 */
void dmam_pool_destroy(struct dma_pool *pool)
{
	struct device *dev = pool->dev;

	dma_pool_destroy(pool);
	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
}
478
EXPORT_SYMBOL(dmam_pool_destroy);