sparse.c 23 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5
/*
 * sparse memory mappings.
 */
#include <linux/mm.h>
6
#include <linux/slab.h>
7 8
#include <linux/mmzone.h>
#include <linux/bootmem.h>
9
#include <linux/compiler.h>
10
#include <linux/highmem.h>
11
#include <linux/export.h>
12
#include <linux/spinlock.h>
13
#include <linux/vmalloc.h>
14

15
#include "internal.h"
16
#include <asm/dma.h>
17 18
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
19 20 21 22 23 24

/*
 * Permanent SPARSEMEM data:
 *
 * 1) mem_section	- memory sections, mem_map's for valid memory
 */
25
#ifdef CONFIG_SPARSEMEM_EXTREME
26
struct mem_section **mem_section;
27 28
#else
struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
29
	____cacheline_internodealigned_in_smp;
30 31 32
#endif
EXPORT_SYMBOL(mem_section);

33 34 35 36 37 38 39 40 41 42 43 44
#ifdef NODE_NOT_IN_PAGE_FLAGS
/*
 * If we did not store the node number in the page then we have to
 * do a lookup in the section_to_node_table in order to find which
 * node the page belongs to.
 */
#if MAX_NUMNODES <= 256
static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
#else
static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
#endif

45
int page_to_nid(const struct page *page)
46 47 48 49
{
	return section_to_node_table[page_to_section(page)];
}
EXPORT_SYMBOL(page_to_nid);
50 51 52 53 54 55 56 57 58

static void set_section_nid(unsigned long section_nr, int nid)
{
	section_to_node_table[section_nr] = nid;
}
#else /* !NODE_NOT_IN_PAGE_FLAGS */
static inline void set_section_nid(unsigned long section_nr, int nid)
{
}
59 60
#endif

61
#ifdef CONFIG_SPARSEMEM_EXTREME
62
static noinline struct mem_section __ref *sparse_index_alloc(int nid)
63 64 65 66 67
{
	struct mem_section *section = NULL;
	unsigned long array_size = SECTIONS_PER_ROOT *
				   sizeof(struct mem_section);

68 69 70
	if (slab_is_available())
		section = kzalloc_node(array_size, GFP_KERNEL, nid);
	else
71
		section = memblock_virt_alloc_node(array_size, nid);
72 73

	return section;
74
}
Bob Picco's avatar
Bob Picco committed
75

76
static int __meminit sparse_index_init(unsigned long section_nr, int nid)
Bob Picco's avatar
Bob Picco committed
77
{
78 79
	unsigned long root = SECTION_NR_TO_ROOT(section_nr);
	struct mem_section *section;
Bob Picco's avatar
Bob Picco committed
80 81

	if (mem_section[root])
82
		return -EEXIST;
83

84
	section = sparse_index_alloc(nid);
85 86
	if (!section)
		return -ENOMEM;
87 88

	mem_section[root] = section;
89

90
	return 0;
91 92 93 94 95
}
#else /* !SPARSEMEM_EXTREME */
static inline int sparse_index_init(unsigned long section_nr, int nid)
{
	return 0;
Bob Picco's avatar
Bob Picco committed
96
}
97 98
#endif

99
#ifdef CONFIG_SPARSEMEM_EXTREME
100 101 102
int __section_nr(struct mem_section* ms)
{
	unsigned long root_nr;
103
	struct mem_section *root = NULL;
104

105 106
	for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
		root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
107 108 109 110 111 112 113
		if (!root)
			continue;

		if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
		     break;
	}

114
	VM_BUG_ON(!root);
115

116 117
	return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
}
118 119 120 121 122 123
#else
int __section_nr(struct mem_section* ms)
{
	return (int)(ms - mem_section[0]);
}
#endif
124

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
/*
 * During early boot, before section_mem_map is used for an actual
 * mem_map, we use section_mem_map to store the section's NUMA
 * node.  This keeps us from having to use another data structure.  The
 * node information is cleared just before we store the real mem_map.
 */
static inline unsigned long sparse_encode_early_nid(int nid)
{
	return (nid << SECTION_NID_SHIFT);
}

static inline int sparse_early_nid(struct mem_section *section)
{
	return (section->section_mem_map >> SECTION_NID_SHIFT);
}

141 142 143
/* Validate the physical addressing limitations of the model */
void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
						unsigned long *end_pfn)
144
{
145
	unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
146

147 148 149 150
	/*
	 * Sanity checks - do not allow an architecture to pass
	 * in larger pfns than the maximum scope of sparsemem:
	 */
151 152 153 154 155 156 157
	if (*start_pfn > max_sparsemem_pfn) {
		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
			"Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
			*start_pfn, *end_pfn, max_sparsemem_pfn);
		WARN_ON_ONCE(1);
		*start_pfn = max_sparsemem_pfn;
		*end_pfn = max_sparsemem_pfn;
158
	} else if (*end_pfn > max_sparsemem_pfn) {
159 160 161 162 163 164 165 166
		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
			"End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
			*start_pfn, *end_pfn, max_sparsemem_pfn);
		WARN_ON_ONCE(1);
		*end_pfn = max_sparsemem_pfn;
	}
}

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
/*
 * There are a number of times that we loop over NR_MEM_SECTIONS,
 * looking for section_present() on each.  But, when we have very
 * large physical address spaces, NR_MEM_SECTIONS can also be
 * very large which makes the loops quite long.
 *
 * Keeping track of this gives us an easy way to break out of
 * those loops early.
 */
int __highest_present_section_nr;
static void section_mark_present(struct mem_section *ms)
{
	int section_nr = __section_nr(ms);

	if (section_nr > __highest_present_section_nr)
		__highest_present_section_nr = section_nr;

	ms->section_mem_map |= SECTION_MARKED_PRESENT;
}

static inline int next_present_section_nr(int section_nr)
{
	do {
		section_nr++;
		if (present_section_nr(section_nr))
			return section_nr;
	} while ((section_nr < NR_MEM_SECTIONS) &&
		 (section_nr <= __highest_present_section_nr));

	return -1;
}
#define for_each_present_section_nr(start, section_nr)		\
	for (section_nr = next_present_section_nr(start-1);	\
	     ((section_nr >= 0) &&				\
	      (section_nr < NR_MEM_SECTIONS) &&			\
	      (section_nr <= __highest_present_section_nr));	\
	     section_nr = next_present_section_nr(section_nr))

205 206 207 208
/* Record a memory area against a node. */
void __init memory_present(int nid, unsigned long start, unsigned long end)
{
	unsigned long pfn;
209

210 211 212 213 214 215 216 217 218 219
#ifdef CONFIG_SPARSEMEM_EXTREME
	if (unlikely(!mem_section)) {
		unsigned long size, align;

		size = sizeof(struct mem_section) * NR_SECTION_ROOTS;
		align = 1 << (INTERNODE_CACHE_SHIFT);
		mem_section = memblock_virt_alloc(size, align);
	}
#endif

220
	start &= PAGE_SECTION_MASK;
221
	mminit_validate_memmodel_limits(&start, &end);
222 223
	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
		unsigned long section = pfn_to_section_nr(pfn);
Bob Picco's avatar
Bob Picco committed
224 225 226
		struct mem_section *ms;

		sparse_index_init(section, nid);
227
		set_section_nid(section, nid);
Bob Picco's avatar
Bob Picco committed
228 229

		ms = __nr_to_section(section);
230
		if (!ms->section_mem_map) {
231 232
			ms->section_mem_map = sparse_encode_early_nid(nid) |
							SECTION_IS_ONLINE;
233 234
			section_mark_present(ms);
		}
235 236 237 238 239 240 241 242 243 244 245 246 247
	}
}

/*
 * Only used by the i386 NUMA architecures, but relatively
 * generic code.
 */
unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
						     unsigned long end_pfn)
{
	unsigned long pfn;
	unsigned long nr_pages = 0;

248
	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
249 250 251 252
	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
		if (nid != early_pfn_to_nid(pfn))
			continue;

253
		if (pfn_present(pfn))
254 255 256 257 258 259
			nr_pages += PAGES_PER_SECTION;
	}

	return nr_pages * sizeof(struct page);
}

260 261 262 263 264 265 266 267 268 269 270
/*
 * Subtle, we encode the real pfn into the mem_map such that
 * the identity pfn - section_mem_map will return the actual
 * physical page frame number.
 */
static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
{
	return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
}

/*
271
 * Decode mem_map from the coded memmap
272 273 274
 */
struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
{
275 276
	/* mask off the extra low bits of information */
	coded_mem_map &= SECTION_MAP_MASK;
277 278 279
	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
}

280
static int __meminit sparse_init_one_section(struct mem_section *ms,
281 282
		unsigned long pnum, struct page *mem_map,
		unsigned long *pageblock_bitmap)
283
{
284
	if (!present_section(ms))
285 286
		return -EINVAL;

287
	ms->section_mem_map &= ~SECTION_MAP_MASK;
288 289
	ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
							SECTION_HAS_MEM_MAP;
290
 	ms->pageblock_flags = pageblock_bitmap;
291 292 293 294

	return 1;
}

295
unsigned long usemap_size(void)
296
{
297
	return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long);
298 299 300 301 302 303 304 305 306
}

#ifdef CONFIG_MEMORY_HOTPLUG
static unsigned long *__kmalloc_section_usemap(void)
{
	return kmalloc(usemap_size(), GFP_KERNEL);
}
#endif /* CONFIG_MEMORY_HOTPLUG */

307 308
#ifdef CONFIG_MEMORY_HOTREMOVE
static unsigned long * __init
309
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
310
					 unsigned long size)
311
{
312 313 314
	unsigned long goal, limit;
	unsigned long *p;
	int nid;
315 316 317
	/*
	 * A page may contain usemaps for other sections preventing the
	 * page being freed and making a section unremovable while
Li Zhong's avatar
Li Zhong committed
318
	 * other sections referencing the usemap remain active. Similarly,
319 320 321 322 323 324
	 * a pgdat can prevent a section being removed. If section A
	 * contains a pgdat and section B contains the usemap, both
	 * sections become inter-dependent. This allocates usemaps
	 * from the same section as the pgdat where possible to avoid
	 * this problem.
	 */
325
	goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
326 327 328
	limit = goal + (1UL << PA_SECTION_SHIFT);
	nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
again:
329 330 331
	p = memblock_virt_alloc_try_nid_nopanic(size,
						SMP_CACHE_BYTES, goal, limit,
						nid);
332 333 334 335 336
	if (!p && limit) {
		limit = 0;
		goto again;
	}
	return p;
337 338 339 340 341
}

static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
{
	unsigned long usemap_snr, pgdat_snr;
342 343
	static unsigned long old_usemap_snr;
	static unsigned long old_pgdat_snr;
344 345 346
	struct pglist_data *pgdat = NODE_DATA(nid);
	int usemap_nid;

347 348 349 350 351 352
	/* First call */
	if (!old_usemap_snr) {
		old_usemap_snr = NR_MEM_SECTIONS;
		old_pgdat_snr = NR_MEM_SECTIONS;
	}

353 354 355 356 357 358 359 360 361 362 363 364 365 366
	usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
	pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
	if (usemap_snr == pgdat_snr)
		return;

	if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
		/* skip redundant message */
		return;

	old_usemap_snr = usemap_snr;
	old_pgdat_snr = pgdat_snr;

	usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
	if (usemap_nid != nid) {
367 368
		pr_info("node %d must be removed before remove section %ld\n",
			nid, usemap_snr);
369 370 371 372 373 374 375 376
		return;
	}
	/*
	 * There is a circular dependency.
	 * Some platforms allow un-removable section because they will just
	 * gather other removable sections for dynamic partitioning.
	 * Just notify un-removable section's number here.
	 */
377 378
	pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
		usemap_snr, pgdat_snr, nid);
379 380 381
}
#else
static unsigned long * __init
382
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
383
					 unsigned long size)
384
{
385
	return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
386 387 388 389 390 391 392
}

static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
{
}
#endif /* CONFIG_MEMORY_HOTREMOVE */

393
static void __init sparse_early_usemaps_alloc_node(void *data,
394 395 396
				 unsigned long pnum_begin,
				 unsigned long pnum_end,
				 unsigned long usemap_count, int nodeid)
397
{
398 399
	void *usemap;
	unsigned long pnum;
400
	unsigned long **usemap_map = (unsigned long **)data;
401
	int size = usemap_size();
402

403
	usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
404
							  size * usemap_count);
405
	if (!usemap) {
406
		pr_warn("%s: allocation failed\n", __func__);
407
		return;
408 409
	}

410 411 412 413 414 415
	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
		if (!present_section_nr(pnum))
			continue;
		usemap_map[pnum] = usemap;
		usemap += size;
		check_usemap_section_nr(nodeid, usemap_map[pnum]);
416
	}
417 418
}

419
#ifndef CONFIG_SPARSEMEM_VMEMMAP
420
struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
421 422
{
	struct page *map;
423
	unsigned long size;
424 425 426 427 428

	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
	if (map)
		return map;

429
	size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
430 431 432
	map = memblock_virt_alloc_try_nid(size,
					  PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
					  BOOTMEM_ALLOC_ACCESSIBLE, nid);
433 434
	return map;
}
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
void __init sparse_mem_maps_populate_node(struct page **map_map,
					  unsigned long pnum_begin,
					  unsigned long pnum_end,
					  unsigned long map_count, int nodeid)
{
	void *map;
	unsigned long pnum;
	unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;

	map = alloc_remap(nodeid, size * map_count);
	if (map) {
		for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
			if (!present_section_nr(pnum))
				continue;
			map_map[pnum] = map;
			map += size;
		}
		return;
	}

	size = PAGE_ALIGN(size);
456 457 458
	map = memblock_virt_alloc_try_nid_raw(size * map_count,
					      PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
					      BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
	if (map) {
		for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
			if (!present_section_nr(pnum))
				continue;
			map_map[pnum] = map;
			map += size;
		}
		return;
	}

	/* fallback */
	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
		struct mem_section *ms;

		if (!present_section_nr(pnum))
			continue;
		map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
		if (map_map[pnum])
			continue;
		ms = __nr_to_section(pnum);
479
		pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
Joe Perches's avatar
Joe Perches committed
480
		       __func__);
481 482 483
		ms->section_mem_map = 0;
	}
}
484 485
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */

486
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
487
static void __init sparse_early_mem_maps_alloc_node(void *data,
488 489 490 491
				 unsigned long pnum_begin,
				 unsigned long pnum_end,
				 unsigned long map_count, int nodeid)
{
492
	struct page **map_map = (struct page **)data;
493 494 495
	sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
					 map_count, nodeid);
}
496
#else
497
static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
498 499 500 501 502
{
	struct page *map;
	struct mem_section *ms = __nr_to_section(pnum);
	int nid = sparse_early_nid(ms);

503
	map = sparse_mem_map_populate(pnum, nid);
504 505 506
	if (map)
		return map;

507
	pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
Joe Perches's avatar
Joe Perches committed
508
	       __func__);
Bob Picco's avatar
Bob Picco committed
509
	ms->section_mem_map = 0;
510 511
	return NULL;
}
512
#endif
513

514
void __weak __meminit vmemmap_populate_print_last(void)
515 516
{
}
517

518 519 520 521 522 523 524 525 526 527 528 529 530
/**
 *  alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
 *  @map: usemap_map for pageblock flags or mmap_map for vmemmap
 */
static void __init alloc_usemap_and_memmap(void (*alloc_func)
					(void *, unsigned long, unsigned long,
					unsigned long, int), void *data)
{
	unsigned long pnum;
	unsigned long map_count;
	int nodeid_begin = 0;
	unsigned long pnum_begin = 0;

531
	for_each_present_section_nr(0, pnum) {
532 533 534 535 536 537 538 539
		struct mem_section *ms;

		ms = __nr_to_section(pnum);
		nodeid_begin = sparse_early_nid(ms);
		pnum_begin = pnum;
		break;
	}
	map_count = 1;
540
	for_each_present_section_nr(pnum_begin + 1, pnum) {
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
		struct mem_section *ms;
		int nodeid;

		ms = __nr_to_section(pnum);
		nodeid = sparse_early_nid(ms);
		if (nodeid == nodeid_begin) {
			map_count++;
			continue;
		}
		/* ok, we need to take cake of from pnum_begin to pnum - 1*/
		alloc_func(data, pnum_begin, pnum,
						map_count, nodeid_begin);
		/* new start, update count etc*/
		nodeid_begin = nodeid;
		pnum_begin = pnum;
		map_count = 1;
	}
	/* ok, last chunk */
	alloc_func(data, pnum_begin, NR_MEM_SECTIONS,
						map_count, nodeid_begin);
}

563 564 565 566 567 568 569 570
/*
 * Allocate the accumulated non-linear sections, allocate a mem_map
 * for each and record the physical to section mapping.
 */
void __init sparse_init(void)
{
	unsigned long pnum;
	struct page *map;
571
	unsigned long *usemap;
572
	unsigned long **usemap_map;
573 574 575 576 577
	int size;
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
	int size2;
	struct page **map_map;
#endif
578

579 580 581
	/* see include/linux/mmzone.h 'struct mem_section' definition */
	BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));

582 583 584
	/* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
	set_pageblock_order();

585 586 587 588 589 590
	/*
	 * map is using big page (aka 2M in x86 64 bit)
	 * usemap is less one page (aka 24 bytes)
	 * so alloc 2M (with 2M align) and 24 bytes in turn will
	 * make next 2M slip to one more 2M later.
	 * then in big system, the memory will have a lot of holes...
Lucas De Marchi's avatar
Lucas De Marchi committed
591
	 * here try to allocate 2M pages continuously.
592 593 594 595 596
	 *
	 * powerpc need to call sparse_init_one_section right after each
	 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
	 */
	size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
597
	usemap_map = memblock_virt_alloc(size, 0);
598 599
	if (!usemap_map)
		panic("can not allocate usemap_map\n");
600 601
	alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
							(void *)usemap_map);
602

603 604
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
	size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
605
	map_map = memblock_virt_alloc(size2, 0);
606 607
	if (!map_map)
		panic("can not allocate map_map\n");
608 609
	alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
							(void *)map_map);
610 611
#endif

612
	for_each_present_section_nr(0, pnum) {
613
		usemap = usemap_map[pnum];
614 615 616
		if (!usemap)
			continue;

617 618 619
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
		map = map_map[pnum];
#else
620
		map = sparse_early_mem_map_alloc(pnum);
621
#endif
622 623 624
		if (!map)
			continue;

625 626
		sparse_init_one_section(__nr_to_section(pnum), pnum, map,
								usemap);
627
	}
628

629 630
	vmemmap_populate_print_last();

631
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
632
	memblock_free_early(__pa(map_map), size2);
633
#endif
634
	memblock_free_early(__pa(usemap_map), size);
635 636 637
}

#ifdef CONFIG_MEMORY_HOTPLUG
638 639 640 641 642 643 644

/* Mark all memory sections within the pfn range as online */
void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
{
	unsigned long pfn;

	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
645
		unsigned long section_nr = pfn_to_section_nr(pfn);
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
		struct mem_section *ms;

		/* onlining code should never touch invalid ranges */
		if (WARN_ON(!valid_section_nr(section_nr)))
			continue;

		ms = __nr_to_section(section_nr);
		ms->section_mem_map |= SECTION_IS_ONLINE;
	}
}

#ifdef CONFIG_MEMORY_HOTREMOVE
/* Mark all memory sections within the pfn range as online */
void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
{
	unsigned long pfn;

	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
		unsigned long section_nr = pfn_to_section_nr(start_pfn);
		struct mem_section *ms;

		/*
		 * TODO this needs some double checking. Offlining code makes
		 * sure to check pfn_valid but those checks might be just bogus
		 */
		if (WARN_ON(!valid_section_nr(section_nr)))
			continue;

		ms = __nr_to_section(section_nr);
		ms->section_mem_map &= ~SECTION_IS_ONLINE;
	}
}
#endif

680
#ifdef CONFIG_SPARSEMEM_VMEMMAP
681
static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
682 683 684 685
{
	/* This will make the necessary allocations eventually. */
	return sparse_mem_map_populate(pnum, nid);
}
686
static void __kfree_section_memmap(struct page *memmap)
687
{
688
	unsigned long start = (unsigned long)memmap;
689
	unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
690 691

	vmemmap_free(start, end);
692
}
693
#ifdef CONFIG_MEMORY_HOTREMOVE
694
static void free_map_bootmem(struct page *memmap)
695
{
696
	unsigned long start = (unsigned long)memmap;
697
	unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
698 699

	vmemmap_free(start, end);
700
}
701
#endif /* CONFIG_MEMORY_HOTREMOVE */
702
#else
703
static struct page *__kmalloc_section_memmap(void)
704 705
{
	struct page *page, *ret;
706
	unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
707

708
	page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
	if (page)
		goto got_map_page;

	ret = vmalloc(memmap_size);
	if (ret)
		goto got_map_ptr;

	return NULL;
got_map_page:
	ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
got_map_ptr:

	return ret;
}

724
static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
725
{
726
	return __kmalloc_section_memmap();
727 728
}

729
static void __kfree_section_memmap(struct page *memmap)
730
{
731
	if (is_vmalloc_addr(memmap))
732 733 734
		vfree(memmap);
	else
		free_pages((unsigned long)memmap,
735
			   get_order(sizeof(struct page) * PAGES_PER_SECTION));
736
}
737

738
#ifdef CONFIG_MEMORY_HOTREMOVE
739
static void free_map_bootmem(struct page *memmap)
740 741
{
	unsigned long maps_section_nr, removing_section_nr, i;
742
	unsigned long magic, nr_pages;
743
	struct page *page = virt_to_page(memmap);
744

745 746 747
	nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
		>> PAGE_SHIFT;

748
	for (i = 0; i < nr_pages; i++, page++) {
749
		magic = (unsigned long) page->freelist;
750 751 752 753

		BUG_ON(magic == NODE_INFO);

		maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
754
		removing_section_nr = page_private(page);
755 756 757 758 759 760 761 762 763 764 765 766 767

		/*
		 * When this function is called, the removing section is
		 * logical offlined state. This means all pages are isolated
		 * from page allocator. If removing section's memmap is placed
		 * on the same section, it must not be freed.
		 * If it is freed, page allocator may allocate it which will
		 * be removed physically soon.
		 */
		if (maps_section_nr != removing_section_nr)
			put_page_bootmem(page);
	}
}
768
#endif /* CONFIG_MEMORY_HOTREMOVE */
769
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
770

771 772 773 774 775
/*
 * returns the number of sections whose mem_maps were properly
 * set.  If this is <=0, then that means that the passed-in
 * map was not consumed and must be freed.
 */
776
int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn)
777
{
778 779 780
	unsigned long section_nr = pfn_to_section_nr(start_pfn);
	struct mem_section *ms;
	struct page *memmap;
781
	unsigned long *usemap;
782 783
	unsigned long flags;
	int ret;
784

785 786 787 788
	/*
	 * no locking for this, because it does its own
	 * plus, it does a kmalloc
	 */
789 790 791
	ret = sparse_index_init(section_nr, pgdat->node_id);
	if (ret < 0 && ret != -EEXIST)
		return ret;
792
	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id);
793 794
	if (!memmap)
		return -ENOMEM;
795
	usemap = __kmalloc_section_usemap();
796
	if (!usemap) {
797
		__kfree_section_memmap(memmap);
798 799
		return -ENOMEM;
	}
800 801

	pgdat_resize_lock(pgdat, &flags);
802

803 804 805 806 807
	ms = __pfn_to_section(start_pfn);
	if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
		ret = -EEXIST;
		goto out;
	}
808

809
	memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION);
810

811
	section_mark_present(ms);
812

813
	ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
814 815 816

out:
	pgdat_resize_unlock(pgdat, &flags);
817 818
	if (ret <= 0) {
		kfree(usemap);
819
		__kfree_section_memmap(memmap);
820
	}
821
	return ret;
822
}
823

824
#ifdef CONFIG_MEMORY_HOTREMOVE
825 826 827 828 829 830 831 832
#ifdef CONFIG_MEMORY_FAILURE
static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
{
	int i;

	if (!memmap)
		return;

833
	for (i = 0; i < nr_pages; i++) {
834
		if (PageHWPoison(&memmap[i])) {
835
			atomic_long_sub(1, &num_poisoned_pages);
836 837 838 839 840 841 842 843 844 845
			ClearPageHWPoison(&memmap[i]);
		}
	}
}
#else
static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
{
}
#endif

846 847 848 849 850 851 852 853 854 855 856 857 858 859
static void free_section_usemap(struct page *memmap, unsigned long *usemap)
{
	struct page *usemap_page;

	if (!usemap)
		return;

	usemap_page = virt_to_page(usemap);
	/*
	 * Check to see if allocation came from hot-plug-add
	 */
	if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
		kfree(usemap);
		if (memmap)
860
			__kfree_section_memmap(memmap);
861 862 863 864 865 866 867 868
		return;
	}

	/*
	 * The usemap came from bootmem. This is packed with other usemaps
	 * on the section which has pgdat at boot time. Just keep it as is now.
	 */

869 870
	if (memmap)
		free_map_bootmem(memmap);
871 872
}

873 874
void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
		unsigned long map_offset)
875 876
{
	struct page *memmap = NULL;
877 878
	unsigned long *usemap = NULL, flags;
	struct pglist_data *pgdat = zone->zone_pgdat;
879

880
	pgdat_resize_lock(pgdat, &flags);
881 882 883 884 885 886 887
	if (ms->section_mem_map) {
		usemap = ms->pageblock_flags;
		memmap = sparse_decode_mem_map(ms->section_mem_map,
						__section_nr(ms));
		ms->section_mem_map = 0;
		ms->pageblock_flags = NULL;
	}
888
	pgdat_resize_unlock(pgdat, &flags);
889

890 891
	clear_hwpoisoned_pages(memmap + map_offset,
			PAGES_PER_SECTION - map_offset);
892 893
	free_section_usemap(memmap, usemap);
}
894 895
#endif /* CONFIG_MEMORY_HOTREMOVE */
#endif /* CONFIG_MEMORY_HOTPLUG */