cma.c 13.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Contiguous Memory Allocator
 *
 * Copyright (c) 2010-2011 by Samsung Electronics.
 * Copyright IBM Corporation, 2013
 * Copyright LG Electronics Inc., 2014
 * Written by:
 *	Marek Szyprowski <m.szyprowski@samsung.com>
 *	Michal Nazarewicz <mina86@mina86.com>
 *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
 *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation; either version 2 of the
 * License or (at your optional) any later version of the license.
 */

#define pr_fmt(fmt) "cma: " fmt

#ifdef CONFIG_CMA_DEBUG
#ifndef DEBUG
#  define DEBUG
#endif
#endif
26
#define CREATE_TRACE_POINTS
27 28 29 30 31 32 33 34 35

#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/log2.h>
#include <linux/cma.h>
36
#include <linux/highmem.h>
37
#include <linux/io.h>
38
#include <trace/events/cma.h>
39

Sasha Levin's avatar
Sasha Levin committed
40 41 42 43
#include "cma.h"

struct cma cma_areas[MAX_CMA_AREAS];
unsigned cma_area_count;
44 45
static DEFINE_MUTEX(cma_mutex);

46
phys_addr_t cma_get_base(const struct cma *cma)
47 48 49 50
{
	return PFN_PHYS(cma->base_pfn);
}

51
unsigned long cma_get_size(const struct cma *cma)
52 53 54 55
{
	return cma->count << PAGE_SHIFT;
}

56 57 58 59 60
const char *cma_get_name(const struct cma *cma)
{
	return cma->name ? cma->name : "(undefined)";
}

61
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
62
					     unsigned int align_order)
63
{
64 65 66
	if (align_order <= cma->order_per_bit)
		return 0;
	return (1UL << (align_order - cma->order_per_bit)) - 1;
67 68
}

69
/*
70 71
 * Find the offset of the base PFN from the specified align_order.
 * The value returned is represented in order_per_bits.
72
 */
73
static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
74
					       unsigned int align_order)
75
{
76 77
	return (cma->base_pfn & ((1UL << align_order) - 1))
		>> cma->order_per_bit;
78 79
}

80 81
static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
					      unsigned long pages)
82 83 84 85
{
	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
}

86 87
static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
			     unsigned int count)
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
{
	unsigned long bitmap_no, bitmap_count;

	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
	bitmap_count = cma_bitmap_pages_to_bits(cma, count);

	mutex_lock(&cma->lock);
	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
	mutex_unlock(&cma->lock);
}

static int __init cma_activate_area(struct cma *cma)
{
	int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
	unsigned i = cma->count >> pageblock_order;
	struct zone *zone;

	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);

	if (!cma->bitmap)
		return -ENOMEM;

	WARN_ON_ONCE(!pfn_valid(pfn));
	zone = page_zone(pfn_to_page(pfn));

	do {
		unsigned j;

		base_pfn = pfn;
		for (j = pageblock_nr_pages; j; --j, pfn++) {
			WARN_ON_ONCE(!pfn_valid(pfn));
			/*
			 * alloc_contig_range requires the pfn range
			 * specified to be in the same zone. Make this
			 * simple by forcing the entire CMA resv range
			 * to be in the same zone.
			 */
			if (page_zone(pfn_to_page(pfn)) != zone)
127
				goto not_in_zone;
128 129 130 131 132
		}
		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
	} while (--i);

	mutex_init(&cma->lock);
Sasha Levin's avatar
Sasha Levin committed
133 134 135 136 137 138

#ifdef CONFIG_CMA_DEBUGFS
	INIT_HLIST_HEAD(&cma->mem_head);
	spin_lock_init(&cma->mem_head_lock);
#endif

139 140
	return 0;

141 142
not_in_zone:
	pr_err("CMA area %s could not be activated\n", cma->name);
143
	kfree(cma->bitmap);
144
	cma->count = 0;
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
	return -EINVAL;
}

static int __init cma_init_reserved_areas(void)
{
	int i;

	for (i = 0; i < cma_area_count; i++) {
		int ret = cma_activate_area(&cma_areas[i]);

		if (ret)
			return ret;
	}

	return 0;
}
core_initcall(cma_init_reserved_areas);

163 164 165 166 167 168 169 170 171 172
/**
 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
 * @base: Base address of the reserved area
 * @size: Size of the reserved area (in bytes),
 * @order_per_bit: Order of pages represented by one bit on bitmap.
 * @res_cma: Pointer to store the created cma region.
 *
 * This function creates custom contiguous area from already reserved memory.
 */
int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
173
				 unsigned int order_per_bit,
174
				 const char *name,
175
				 struct cma **res_cma)
176 177 178 179 180 181 182 183 184 185 186 187 188
{
	struct cma *cma;
	phys_addr_t alignment;

	/* Sanity checks */
	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
		pr_err("Not enough slots for CMA reserved regions!\n");
		return -ENOSPC;
	}

	if (!size || !memblock_is_region_reserved(base, size))
		return -EINVAL;

189
	/* ensure minimal alignment required by mm core */
190 191
	alignment = PAGE_SIZE <<
			max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
192 193 194 195 196 197 198 199 200 201 202 203 204

	/* alignment should be aligned with order_per_bit */
	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
		return -EINVAL;

	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
		return -EINVAL;

	/*
	 * Each reserved area must be initialised later, when more kernel
	 * subsystems (like slab allocator) are available.
	 */
	cma = &cma_areas[cma_area_count];
205 206 207 208 209 210 211
	if (name) {
		cma->name = name;
	} else {
		cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
		if (!cma->name)
			return -ENOMEM;
	}
212 213 214 215 216
	cma->base_pfn = PFN_DOWN(base);
	cma->count = size >> PAGE_SHIFT;
	cma->order_per_bit = order_per_bit;
	*res_cma = cma;
	cma_area_count++;
217
	totalcma_pages += (size / PAGE_SIZE);
218 219 220 221

	return 0;
}

222 223 224
/**
 * cma_declare_contiguous() - reserve custom contiguous area
 * @base: Base address of the reserved area optional, use 0 for any
225
 * @size: Size of the reserved area (in bytes),
226 227 228 229
 * @limit: End address of the reserved memory (optional, 0 for any).
 * @alignment: Alignment for the CMA area, should be power of 2 or zero
 * @order_per_bit: Order of pages represented by one bit on bitmap.
 * @fixed: hint about where to place the reserved area
230
 * @res_cma: Pointer to store the created cma region.
231 232 233 234 235 236 237 238 239
 *
 * This function reserves memory from early allocator. It should be
 * called by arch specific code once the early allocator (memblock or bootmem)
 * has been activated and all other subsystems have already allocated/reserved
 * memory. This function allows to create custom reserved areas.
 *
 * If @fixed is true, reserve contiguous area at exactly @base.  If false,
 * reserve in range from @base to @limit.
 */
240 241
int __init cma_declare_contiguous(phys_addr_t base,
			phys_addr_t size, phys_addr_t limit,
242
			phys_addr_t alignment, unsigned int order_per_bit,
243
			bool fixed, const char *name, struct cma **res_cma)
244
{
245
	phys_addr_t memblock_end = memblock_end_of_DRAM();
246
	phys_addr_t highmem_start;
247 248
	int ret = 0;

249
	/*
Laura Abbott's avatar
Laura Abbott committed
250 251 252 253
	 * We can't use __pa(high_memory) directly, since high_memory
	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
	 * complain. Find the boundary by adding one to the last valid
	 * address.
254
	 */
Laura Abbott's avatar
Laura Abbott committed
255
	highmem_start = __pa(high_memory - 1) + 1;
256 257
	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
		__func__, &size, &base, &limit, &alignment);
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275

	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
		pr_err("Not enough slots for CMA reserved regions!\n");
		return -ENOSPC;
	}

	if (!size)
		return -EINVAL;

	if (alignment && !is_power_of_2(alignment))
		return -EINVAL;

	/*
	 * Sanitise input arguments.
	 * Pages both ends in CMA area could be merged into adjacent unmovable
	 * migratetype page by page allocator's buddy algorithm. In the case,
	 * you couldn't get a contiguous memory, which is not what we want.
	 */
276 277
	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
278 279 280 281
	base = ALIGN(base, alignment);
	size = ALIGN(size, alignment);
	limit &= ~(alignment - 1);

282 283 284
	if (!base)
		fixed = false;

285 286 287 288
	/* size should be aligned with order_per_bit */
	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
		return -EINVAL;

289
	/*
290 291
	 * If allocating at a fixed base the request region must not cross the
	 * low/high memory boundary.
292
	 */
293
	if (fixed && base < highmem_start && base + size > highmem_start) {
294
		ret = -EINVAL;
295 296
		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
			&base, &highmem_start);
297 298 299
		goto err;
	}

300 301 302 303 304 305 306 307
	/*
	 * If the limit is unspecified or above the memblock end, its effective
	 * value will be the memblock end. Set it explicitly to simplify further
	 * checks.
	 */
	if (limit == 0 || limit > memblock_end)
		limit = memblock_end;

308
	/* Reserve memory */
309
	if (fixed) {
310 311 312 313 314 315
		if (memblock_is_region_reserved(base, size) ||
		    memblock_reserve(base, size) < 0) {
			ret = -EBUSY;
			goto err;
		}
	} else {
316 317 318 319 320 321 322 323 324 325
		phys_addr_t addr = 0;

		/*
		 * All pages in the reserved area must come from the same zone.
		 * If the requested region crosses the low/high memory boundary,
		 * try allocating from high memory first and fall back to low
		 * memory in case of failure.
		 */
		if (base < highmem_start && limit > highmem_start) {
			addr = memblock_alloc_range(size, alignment,
326 327
						    highmem_start, limit,
						    MEMBLOCK_NONE);
328 329 330
			limit = highmem_start;
		}

331
		if (!addr) {
332
			addr = memblock_alloc_range(size, alignment, base,
333 334
						    limit,
						    MEMBLOCK_NONE);
335 336 337 338
			if (!addr) {
				ret = -ENOMEM;
				goto err;
			}
339
		}
340

341 342 343 344
		/*
		 * kmemleak scans/reads tracked objects for pointers to other
		 * objects but this address isn't mapped and accessible
		 */
345
		kmemleak_ignore_phys(addr);
346
		base = addr;
347 348
	}

349
	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
350 351
	if (ret)
		goto err;
352

353 354
	pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
		&base);
355 356 357
	return 0;

err:
Joonsoo Kim's avatar
Joonsoo Kim committed
358
	pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
359 360 361
	return ret;
}

362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
#ifdef CONFIG_CMA_DEBUG
static void cma_debug_show_areas(struct cma *cma)
{
	unsigned long next_zero_bit, next_set_bit;
	unsigned long start = 0;
	unsigned int nr_zero, nr_total = 0;

	mutex_lock(&cma->lock);
	pr_info("number of available pages: ");
	for (;;) {
		next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start);
		if (next_zero_bit >= cma->count)
			break;
		next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit);
		nr_zero = next_set_bit - next_zero_bit;
		pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit);
		nr_total += nr_zero;
		start = next_zero_bit + nr_zero;
	}
	pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count);
	mutex_unlock(&cma->lock);
}
#else
static inline void cma_debug_show_areas(struct cma *cma) { }
#endif

388 389 390 391 392 393 394 395 396
/**
 * cma_alloc() - allocate pages from contiguous area
 * @cma:   Contiguous memory region for which the allocation is performed.
 * @count: Requested number of pages.
 * @align: Requested alignment of pages (in PAGE_SIZE order).
 *
 * This function allocates part of contiguous memory on specific
 * contiguous memory area.
 */
397 398
struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
		       gfp_t gfp_mask)
399
{
Andrew Morton's avatar
Andrew Morton committed
400 401 402
	unsigned long mask, offset;
	unsigned long pfn = -1;
	unsigned long start = 0;
403 404
	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
	struct page *page = NULL;
405
	int ret = -ENOMEM;
406 407 408 409

	if (!cma || !cma->count)
		return NULL;

410
	pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
411 412 413 414 415 416
		 count, align);

	if (!count)
		return NULL;

	mask = cma_bitmap_aligned_mask(cma, align);
417
	offset = cma_bitmap_aligned_offset(cma, align);
418 419 420
	bitmap_maxno = cma_bitmap_maxno(cma);
	bitmap_count = cma_bitmap_pages_to_bits(cma, count);

421 422 423
	if (bitmap_count > bitmap_maxno)
		return NULL;

424 425
	for (;;) {
		mutex_lock(&cma->lock);
426 427 428
		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
				bitmap_maxno, start, bitmap_count, mask,
				offset);
429 430 431 432 433 434 435 436 437 438 439 440 441 442
		if (bitmap_no >= bitmap_maxno) {
			mutex_unlock(&cma->lock);
			break;
		}
		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
		/*
		 * It's safe to drop the lock here. We've marked this region for
		 * our exclusive use. If the migration fails we will take the
		 * lock again and unmark it.
		 */
		mutex_unlock(&cma->lock);

		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
		mutex_lock(&cma_mutex);
443
		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
444
					 gfp_mask);
445 446 447 448 449
		mutex_unlock(&cma_mutex);
		if (ret == 0) {
			page = pfn_to_page(pfn);
			break;
		}
450

451
		cma_clear_bitmap(cma, pfn, count);
452 453 454
		if (ret != -EBUSY)
			break;

455 456 457 458 459 460
		pr_debug("%s(): memory range at %p is busy, retrying\n",
			 __func__, pfn_to_page(pfn));
		/* try again with a bit different memory target */
		start = bitmap_no + mask + 1;
	}

Andrew Morton's avatar
Andrew Morton committed
461
	trace_cma_alloc(pfn, page, count, align);
462

463
	if (ret && !(gfp_mask & __GFP_NOWARN)) {
464
		pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
465 466 467 468
			__func__, count, ret);
		cma_debug_show_areas(cma);
	}

469 470 471 472 473 474 475 476 477 478 479 480 481 482
	pr_debug("%s(): returned %p\n", __func__, page);
	return page;
}

/**
 * cma_release() - release allocated pages
 * @cma:   Contiguous memory region for which the allocation is performed.
 * @pages: Allocated pages.
 * @count: Number of allocated pages.
 *
 * This function releases memory allocated by alloc_cma().
 * It returns false when provided pages do not belong to contiguous area and
 * true otherwise.
 */
483
bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
{
	unsigned long pfn;

	if (!cma || !pages)
		return false;

	pr_debug("%s(page %p)\n", __func__, (void *)pages);

	pfn = page_to_pfn(pages);

	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
		return false;

	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);

	free_contig_range(pfn, count);
	cma_clear_bitmap(cma, pfn, count);
501
	trace_cma_release(pfn, pages, count);
502 503 504

	return true;
}
505 506 507 508 509 510 511 512 513 514 515 516 517 518

int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
{
	int i;

	for (i = 0; i < cma_area_count; i++) {
		int ret = it(&cma_areas[i], data);

		if (ret)
			return ret;
	}

	return 0;
}