mm.h 56.1 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8
#ifndef _LINUX_MM_H
#define _LINUX_MM_H

#include <linux/errno.h>

#ifdef __KERNEL__

#include <linux/gfp.h>
9
#include <linux/bug.h>
Linus Torvalds's avatar
Linus Torvalds committed
10 11 12
#include <linux/list.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
13
#include <linux/atomic.h>
14
#include <linux/debug_locks.h>
15
#include <linux/mm_types.h>
16
#include <linux/range.h>
17
#include <linux/pfn.h>
Andrea Arcangeli's avatar
Andrea Arcangeli committed
18
#include <linux/bit_spinlock.h>
19
#include <linux/shrinker.h>
Linus Torvalds's avatar
Linus Torvalds committed
20 21 22

struct mempolicy;
struct anon_vma;
23
struct anon_vma_chain;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
24
struct file_ra_state;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
25
struct user_struct;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
26
struct writeback_control;
Linus Torvalds's avatar
Linus Torvalds committed
27 28 29 30 31 32

#ifndef CONFIG_DISCONTIGMEM          /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
#endif

extern unsigned long num_physpages;
33
extern unsigned long totalram_pages;
Linus Torvalds's avatar
Linus Torvalds committed
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
extern void * high_memory;
extern int page_cluster;

#ifdef CONFIG_SYSCTL
extern int sysctl_legacy_va_layout;
#else
#define sysctl_legacy_va_layout 0
#endif

#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>

#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))

49 50 51
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)

Linus Torvalds's avatar
Linus Torvalds committed
52 53 54 55 56 57 58 59 60
/*
 * Linux kernel virtual memory manager primitives.
 * The idea being to have a "virtual" mm in the same way
 * we have a virtual fs - giving a cleaner interface to the
 * mm details, and allowing different kinds of memory mappings
 * (from shared memory to executable loading to arbitrary
 * mmap() functions).
 */

61 62
extern struct kmem_cache *vm_area_cachep;

Linus Torvalds's avatar
Linus Torvalds committed
63
#ifndef CONFIG_MMU
64 65
extern struct rb_root nommu_region_tree;
extern struct rw_semaphore nommu_region_sem;
Linus Torvalds's avatar
Linus Torvalds committed
66 67 68 69 70

extern unsigned int kobjsize(const void *objp);
#endif

/*
Hugh Dickins's avatar
Hugh Dickins committed
71
 * vm_flags in vm_area_struct, see mm_types.h.
Linus Torvalds's avatar
Linus Torvalds committed
72
 */
73 74
#define VM_NONE		0x00000000

Linus Torvalds's avatar
Linus Torvalds committed
75 76 77 78 79
#define VM_READ		0x00000001	/* currently active flags */
#define VM_WRITE	0x00000002
#define VM_EXEC		0x00000004
#define VM_SHARED	0x00000008

80
/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
Linus Torvalds's avatar
Linus Torvalds committed
81 82 83 84 85 86
#define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
#define VM_MAYWRITE	0x00000020
#define VM_MAYEXEC	0x00000040
#define VM_MAYSHARE	0x00000080

#define VM_GROWSDOWN	0x00000100	/* general info on the segment */
87
#define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
Linus Torvalds's avatar
Linus Torvalds committed
88 89 90 91 92 93 94 95 96 97 98 99
#define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */

#define VM_LOCKED	0x00002000
#define VM_IO           0x00004000	/* Memory mapped I/O or similar */

					/* Used by sys_madvise() */
#define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
#define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */

#define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
#define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
#define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
100
#define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
Linus Torvalds's avatar
Linus Torvalds committed
101 102
#define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
#define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
103
#define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
104
#define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */
105

Jared Hulbert's avatar
Jared Hulbert committed
106
#define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
107 108
#define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
#define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
Hugh Dickins's avatar
Hugh Dickins committed
109
#define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
Linus Torvalds's avatar
Linus Torvalds committed
110

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
#if defined(CONFIG_X86)
# define VM_PAT		VM_ARCH_1	/* PAT reserves whole VMA at once (x86) */
#elif defined(CONFIG_PPC)
# define VM_SAO		VM_ARCH_1	/* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP	VM_ARCH_1
#elif defined(CONFIG_IA64)
# define VM_GROWSUP	VM_ARCH_1
#elif !defined(CONFIG_MMU)
# define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
#endif

#ifndef VM_GROWSUP
# define VM_GROWSUP	VM_NONE
#endif

127 128 129
/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)

Linus Torvalds's avatar
Linus Torvalds committed
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
#ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif

#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK_FLAGS	(VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
#else
#define VM_STACK_FLAGS	(VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
#endif

#define VM_READHINTMASK			(VM_SEQ_READ | VM_RAND_READ)
#define VM_ClearReadHint(v)		(v)->vm_flags &= ~VM_READHINTMASK
#define VM_NormalReadHint(v)		(!((v)->vm_flags & VM_READHINTMASK))
#define VM_SequentialReadHint(v)	((v)->vm_flags & VM_SEQ_READ)
#define VM_RandomReadHint(v)		((v)->vm_flags & VM_RAND_READ)

146
/*
147 148
 * Special vmas that are non-mergable, non-mlock()able.
 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
149
 */
150
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
151

Linus Torvalds's avatar
Linus Torvalds committed
152 153 154 155 156 157
/*
 * mapping from the currently active vm_flags protection bits (the
 * low four bits) to a page protection mask..
 */
extern pgprot_t protection_map[16];

Nick Piggin's avatar
Nick Piggin committed
158 159
#define FAULT_FLAG_WRITE	0x01	/* Fault was a write access */
#define FAULT_FLAG_NONLINEAR	0x02	/* Fault was via a nonlinear mapping */
160
#define FAULT_FLAG_MKWRITE	0x04	/* Fault was mkwrite of existing pte */
161
#define FAULT_FLAG_ALLOW_RETRY	0x08	/* Retry fault if blocking */
162
#define FAULT_FLAG_RETRY_NOWAIT	0x10	/* Don't drop mmap_sem and wait when retrying */
163
#define FAULT_FLAG_KILLABLE	0x20	/* The fault task is in SIGKILL killable region */
Nick Piggin's avatar
Nick Piggin committed
164

165
/*
Nick Piggin's avatar
Nick Piggin committed
166
 * vm_fault is filled by the the pagefault handler and passed to the vma's
Nick Piggin's avatar
Nick Piggin committed
167 168
 * ->fault function. The vma's ->fault is responsible for returning a bitmask
 * of VM_FAULT_xxx flags that give details about how the fault was handled.
169
 *
Nick Piggin's avatar
Nick Piggin committed
170
 * pgoff should be used in favour of virtual_address, if possible. If pgoff
171
 * is used, one may implement ->remap_pages to get nonlinear mapping support.
172
 */
Nick Piggin's avatar
Nick Piggin committed
173 174 175 176 177 178
struct vm_fault {
	unsigned int flags;		/* FAULT_FLAG_xxx flags */
	pgoff_t pgoff;			/* Logical page offset based on vma */
	void __user *virtual_address;	/* Faulting virtual address */

	struct page *page;		/* ->fault handlers should return a
Nick Piggin's avatar
Nick Piggin committed
179
					 * page here, unless VM_FAULT_NOPAGE
Nick Piggin's avatar
Nick Piggin committed
180
					 * is set (which is also implied by
Nick Piggin's avatar
Nick Piggin committed
181
					 * VM_FAULT_ERROR).
Nick Piggin's avatar
Nick Piggin committed
182
					 */
183
};
Linus Torvalds's avatar
Linus Torvalds committed
184 185 186 187 188 189 190 191 192

/*
 * These are the virtual MM functions - opening of an area, closing and
 * unmapping it (needed to keep files on disk up-to-date etc), pointer
 * to the functions called when a no-page or a wp-page exception occurs. 
 */
struct vm_operations_struct {
	void (*open)(struct vm_area_struct * area);
	void (*close)(struct vm_area_struct * area);
Nick Piggin's avatar
Nick Piggin committed
193
	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
194 195 196

	/* notification that a previously read-only page is about to become
	 * writable, if an error is returned it will cause a SIGBUS */
197
	int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
198 199 200 201 202 203

	/* called by access_process_vm when get_user_pages() fails, typically
	 * for use by special VMAs that can switch between memory and hardware
	 */
	int (*access)(struct vm_area_struct *vma, unsigned long addr,
		      void *buf, int len, int write);
Linus Torvalds's avatar
Linus Torvalds committed
204
#ifdef CONFIG_NUMA
205 206 207 208 209 210 211
	/*
	 * set_policy() op must add a reference to any non-NULL @new mempolicy
	 * to hold the policy upon return.  Caller should pass NULL @new to
	 * remove a policy and fall back to surrounding context--i.e. do not
	 * install a MPOL_DEFAULT policy, nor the task or system default
	 * mempolicy.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
212
	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
213 214 215 216 217 218 219 220 221 222 223

	/*
	 * get_policy() op must add reference [mpol_get()] to any policy at
	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
	 * in mm/mempolicy.c will do this automatically.
	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
	 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
	 * must return NULL--i.e., do not "fallback" to task or system default
	 * policy.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
224 225
	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
					unsigned long addr);
226 227
	int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
		const nodemask_t *to, unsigned long flags);
Linus Torvalds's avatar
Linus Torvalds committed
228
#endif
229 230 231
	/* called by sys_remap_file_pages() to populate non-linear mapping */
	int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
			   unsigned long size, pgoff_t pgoff);
Linus Torvalds's avatar
Linus Torvalds committed
232 233 234 235 236
};

struct mmu_gather;
struct inode;

Andrew Morton's avatar
Andrew Morton committed
237 238
#define page_private(page)		((page)->private)
#define set_page_private(page, v)	((page)->private = (v))
239

240 241 242
/* It's valid only if the page is free path or free_list */
static inline void set_freepage_migratetype(struct page *page, int migratetype)
{
243
	page->index = migratetype;
244 245 246 247 248
}

/* It's valid only if the page is free path or free_list */
static inline int get_freepage_migratetype(struct page *page)
{
249
	return page->index;
250 251
}

Linus Torvalds's avatar
Linus Torvalds committed
252 253 254 255 256
/*
 * FIXME: take this include out, include page-flags.h in
 * files which need it (119 of them)
 */
#include <linux/page-flags.h>
257
#include <linux/huge_mm.h>
Linus Torvalds's avatar
Linus Torvalds committed
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272

/*
 * Methods to modify the page usage count.
 *
 * What counts for a page usage:
 * - cache mapping   (page->mapping)
 * - private data    (page->private)
 * - page mapped in a task's page tables, each mapping
 *   is counted separately
 *
 * Also, many kernel routines increase the page count before a critical
 * routine so they can be sure the page doesn't go away from under them.
 */

/*
273
 * Drop a ref, return true if the refcount fell to zero (the page has no users)
Linus Torvalds's avatar
Linus Torvalds committed
274
 */
275 276
static inline int put_page_testzero(struct page *page)
{
Nick Piggin's avatar
Nick Piggin committed
277
	VM_BUG_ON(atomic_read(&page->_count) == 0);
278
	return atomic_dec_and_test(&page->_count);
279
}
Linus Torvalds's avatar
Linus Torvalds committed
280 281

/*
282 283
 * Try to grab a ref unless the page has a refcount of zero, return false if
 * that is the case.
Linus Torvalds's avatar
Linus Torvalds committed
284
 */
285 286
static inline int get_page_unless_zero(struct page *page)
{
287
	return atomic_inc_not_zero(&page->_count);
288
}
Linus Torvalds's avatar
Linus Torvalds committed
289

290 291
extern int page_is_ram(unsigned long pfn);

292
/* Support for virtually mapped pages */
293 294
struct page *vmalloc_to_page(const void *addr);
unsigned long vmalloc_to_pfn(const void *addr);
295

296 297 298 299 300 301
/*
 * Determine if an address is within the vmalloc range
 *
 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
 * is no special casing required.
 */
302 303
static inline int is_vmalloc_addr(const void *x)
{
304
#ifdef CONFIG_MMU
305 306 307
	unsigned long addr = (unsigned long)x;

	return addr >= VMALLOC_START && addr < VMALLOC_END;
308 309
#else
	return 0;
310
#endif
311
}
312 313 314
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
315
static inline int is_vmalloc_or_module_addr(const void *x)
316 317 318 319
{
	return 0;
}
#endif
320

Andrea Arcangeli's avatar
Andrea Arcangeli committed
321 322 323
static inline void compound_lock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
324
	VM_BUG_ON(PageSlab(page));
Andrea Arcangeli's avatar
Andrea Arcangeli committed
325 326 327 328 329 330 331
	bit_spin_lock(PG_compound_lock, &page->flags);
#endif
}

static inline void compound_unlock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
332
	VM_BUG_ON(PageSlab(page));
Andrea Arcangeli's avatar
Andrea Arcangeli committed
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
	bit_spin_unlock(PG_compound_lock, &page->flags);
#endif
}

static inline unsigned long compound_lock_irqsave(struct page *page)
{
	unsigned long uninitialized_var(flags);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	local_irq_save(flags);
	compound_lock(page);
#endif
	return flags;
}

static inline void compound_unlock_irqrestore(struct page *page,
					      unsigned long flags)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	compound_unlock(page);
	local_irq_restore(flags);
#endif
}

356 357
static inline struct page *compound_head(struct page *page)
{
358
	if (unlikely(PageTail(page)))
359 360 361 362
		return page->first_page;
	return page;
}

363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
/*
 * The atomic page->_mapcount, starts from -1: so that transitions
 * both from it and to it can be tracked, using atomic_inc_and_test
 * and atomic_add_negative(-1).
 */
static inline void reset_page_mapcount(struct page *page)
{
	atomic_set(&(page)->_mapcount, -1);
}

static inline int page_mapcount(struct page *page)
{
	return atomic_read(&(page)->_mapcount) + 1;
}

378
static inline int page_count(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
379
{
380
	return atomic_read(&compound_head(page)->_count);
Linus Torvalds's avatar
Linus Torvalds committed
381 382
}

383 384 385 386 387 388 389 390 391 392 393
static inline void get_huge_page_tail(struct page *page)
{
	/*
	 * __split_huge_page_refcount() cannot run
	 * from under us.
	 */
	VM_BUG_ON(page_mapcount(page) < 0);
	VM_BUG_ON(atomic_read(&page->_count) != 0);
	atomic_inc(&page->_mapcount);
}

394 395
extern bool __get_page_tail(struct page *page);

Linus Torvalds's avatar
Linus Torvalds committed
396 397
static inline void get_page(struct page *page)
{
398 399 400
	if (unlikely(PageTail(page)))
		if (likely(__get_page_tail(page)))
			return;
401 402
	/*
	 * Getting a normal page or the head of a compound page
403
	 * requires to already have an elevated page->_count.
404
	 */
405
	VM_BUG_ON(atomic_read(&page->_count) <= 0);
Linus Torvalds's avatar
Linus Torvalds committed
406 407 408
	atomic_inc(&page->_count);
}

409 410 411 412 413 414
static inline struct page *virt_to_head_page(const void *x)
{
	struct page *page = virt_to_page(x);
	return compound_head(page);
}

415 416 417 418 419 420 421 422 423
/*
 * Setup the page count before being freed into the page allocator for
 * the first time (boot or memory hotplug)
 */
static inline void init_page_count(struct page *page)
{
	atomic_set(&page->_count, 1);
}

Andrea Arcangeli's avatar
Andrea Arcangeli committed
424 425 426
/*
 * PageBuddy() indicate that the page is free and in the buddy system
 * (see mm/page_alloc.c).
427 428 429 430 431
 *
 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
 * -2 so that an underflow of the page_mapcount() won't be mistaken
 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
 * efficiently by most CPU architectures.
Andrea Arcangeli's avatar
Andrea Arcangeli committed
432
 */
433 434
#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)

Andrea Arcangeli's avatar
Andrea Arcangeli committed
435 436
static inline int PageBuddy(struct page *page)
{
437
	return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
Andrea Arcangeli's avatar
Andrea Arcangeli committed
438 439 440 441 442
}

static inline void __SetPageBuddy(struct page *page)
{
	VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
443
	atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
444 445 446 447 448 449 450 451
}

static inline void __ClearPageBuddy(struct page *page)
{
	VM_BUG_ON(!PageBuddy(page));
	atomic_set(&page->_mapcount, -1);
}

Linus Torvalds's avatar
Linus Torvalds committed
452
void put_page(struct page *page);
453
void put_pages_list(struct list_head *pages);
Linus Torvalds's avatar
Linus Torvalds committed
454

455
void split_page(struct page *page, unsigned int order);
456
int split_free_page(struct page *page);
457
int capture_free_page(struct page *page, int alloc_order, int migratetype);
458

459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
/*
 * Compound pages have a destructor function.  Provide a
 * prototype for that function and accessor functions.
 * These are _only_ valid on the head of a PG_compound page.
 */
typedef void compound_page_dtor(struct page *);

static inline void set_compound_page_dtor(struct page *page,
						compound_page_dtor *dtor)
{
	page[1].lru.next = (void *)dtor;
}

static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
{
	return (compound_page_dtor *)page[1].lru.next;
}

477 478
static inline int compound_order(struct page *page)
{
479
	if (!PageHead(page))
480 481 482 483
		return 0;
	return (unsigned long)page[1].lru.prev;
}

Andrea Arcangeli's avatar
Andrea Arcangeli committed
484 485 486 487 488 489 490 491 492 493 494 495 496 497
static inline int compound_trans_order(struct page *page)
{
	int order;
	unsigned long flags;

	if (!PageHead(page))
		return 0;

	flags = compound_lock_irqsave(page);
	order = compound_order(page);
	compound_unlock_irqrestore(page, flags);
	return order;
}

498 499 500 501 502
static inline void set_compound_order(struct page *page, unsigned long order)
{
	page[1].lru.prev = (void *)order;
}

503
#ifdef CONFIG_MMU
Andrea Arcangeli's avatar
Andrea Arcangeli committed
504 505 506 507 508 509 510 511 512 513 514 515
/*
 * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
 * servicing faults for write access.  In the normal case, do always want
 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
 * that do not have writing enabled, when used by access_process_vm.
 */
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
	if (likely(vma->vm_flags & VM_WRITE))
		pte = pte_mkwrite(pte);
	return pte;
}
516
#endif
Andrea Arcangeli's avatar
Andrea Arcangeli committed
517

Linus Torvalds's avatar
Linus Torvalds committed
518 519 520 521 522 523 524
/*
 * Multiple processes may "see" the same page. E.g. for untouched
 * mappings of /dev/null, all processes see the same page full of
 * zeroes, and text pages of executables and shared libraries have
 * only one copy in memory, at most, normally.
 *
 * For the non-reserved pages, page_count(page) denotes a reference count.
525 526
 *   page_count() == 0 means the page is free. page->lru is then used for
 *   freelist management in the buddy allocator.
527
 *   page_count() > 0  means the page has been allocated.
Linus Torvalds's avatar
Linus Torvalds committed
528
 *
529 530 531 532 533
 * Pages are allocated by the slab allocator in order to provide memory
 * to kmalloc and kmem_cache_alloc. In this case, the management of the
 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
 * unless a particular usage is carefully commented. (the responsibility of
 * freeing the kmalloc memory is the caller's, of course).
Linus Torvalds's avatar
Linus Torvalds committed
534
 *
535 536 537 538 539 540 541 542 543
 * A page may be used by anyone else who does a __get_free_page().
 * In this case, page_count still tracks the references, and should only
 * be used through the normal accessor functions. The top bits of page->flags
 * and page->virtual store page management information, but all other fields
 * are unused and could be used privately, carefully. The management of this
 * page is the responsibility of the one who allocated it, and those who have
 * subsequently been given references to it.
 *
 * The other pages (we may call them "pagecache pages") are completely
Linus Torvalds's avatar
Linus Torvalds committed
544 545 546
 * managed by the Linux memory manager: I/O, buffers, swapping etc.
 * The following discussion applies only to them.
 *
547 548 549 550
 * A pagecache page contains an opaque `private' member, which belongs to the
 * page's address_space. Usually, this is the address of a circular list of
 * the page's disk buffers. PG_private must be set to tell the VM to call
 * into the filesystem to release these pages.
Linus Torvalds's avatar
Linus Torvalds committed
551
 *
552 553 554
 * A page may belong to an inode's memory mapping. In this case, page->mapping
 * is the pointer to the inode, and page->index is the file offset of the page,
 * in units of PAGE_CACHE_SIZE.
Linus Torvalds's avatar
Linus Torvalds committed
555
 *
556 557 558
 * If pagecache pages are not associated with an inode, they are said to be
 * anonymous pages. These may become associated with the swapcache, and in that
 * case PG_swapcache is set, and page->private is an offset into the swapcache.
Linus Torvalds's avatar
Linus Torvalds committed
559
 *
560 561 562
 * In either case (swapcache or inode backed), the pagecache itself holds one
 * reference to the page. Setting PG_private should also increment the
 * refcount. The each user mapping also has a reference to the page.
Linus Torvalds's avatar
Linus Torvalds committed
563
 *
564 565 566 567
 * The pagecache pages are stored in a per-mapping radix tree, which is
 * rooted at mapping->page_tree, and indexed by offset.
 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
 * lists, we instead now tag pages as dirty/writeback in the radix tree.
Linus Torvalds's avatar
Linus Torvalds committed
568
 *
569
 * All pagecache pages may be subject to I/O:
Linus Torvalds's avatar
Linus Torvalds committed
570 571
 * - inode pages may need to be read from disk,
 * - inode pages which have been modified and are MAP_SHARED may need
572 573 574 575
 *   to be written back to the inode on disk,
 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
 *   modified may need to be swapped out to swap space and (later) to be read
 *   back into memory.
Linus Torvalds's avatar
Linus Torvalds committed
576 577 578 579 580 581
 */

/*
 * The zone field is never updated after free_area_init_core()
 * sets it, so none of the operations on it need to be atomic.
 */
582

583 584 585 586 587 588 589 590 591 592 593

/*
 * page->flags layout:
 *
 * There are three possibilities for how page->flags get
 * laid out.  The first is for the normal case, without
 * sparsemem.  The second is for sparsemem when there is
 * plenty of space for node and section.  The last is when
 * we have run out of space and have to fall back to an
 * alternate (slower) way of determining the node.
 *
594 595 596
 * No sparsemem or sparsemem vmemmap: |       NODE     | ZONE | ... | FLAGS |
 * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
 * classic sparse no space for node:  | SECTION |     ZONE    | ... | FLAGS |
597
 */
598
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
599 600 601 602 603 604 605
#define SECTIONS_WIDTH		SECTIONS_SHIFT
#else
#define SECTIONS_WIDTH		0
#endif

#define ZONES_WIDTH		ZONES_SHIFT

606
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
607 608
#define NODES_WIDTH		NODES_SHIFT
#else
609 610 611
#ifdef CONFIG_SPARSEMEM_VMEMMAP
#error "Vmemmap: No space for nodes field in page flags"
#endif
612 613 614 615
#define NODES_WIDTH		0
#endif

/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
616
#define SECTIONS_PGOFF		((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
617 618 619 620 621 622 623
#define NODES_PGOFF		(SECTIONS_PGOFF - NODES_WIDTH)
#define ZONES_PGOFF		(NODES_PGOFF - ZONES_WIDTH)

/*
 * We are going to use the flags for the page to node mapping if its in
 * there.  This includes the case where there is no node, so it is implicit.
 */
624 625 626
#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
#define NODE_NOT_IN_PAGE_FLAGS
#endif
627

628
/*
Lucas De Marchi's avatar
Lucas De Marchi committed
629
 * Define the bit shifts to access each section.  For non-existent
630 631 632
 * sections we define the shift as 0; that plus a 0 mask ensures
 * the compiler will optimise away reference to them.
 */
633 634 635
#define SECTIONS_PGSHIFT	(SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
#define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))
#define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))
636

637 638
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
#ifdef NODE_NOT_IN_PAGE_FLAGS
639
#define ZONEID_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
640 641
#define ZONEID_PGOFF		((SECTIONS_PGOFF < ZONES_PGOFF)? \
						SECTIONS_PGOFF : ZONES_PGOFF)
642
#else
643
#define ZONEID_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
644 645
#define ZONEID_PGOFF		((NODES_PGOFF < ZONES_PGOFF)? \
						NODES_PGOFF : ZONES_PGOFF)
646 647
#endif

648
#define ZONEID_PGSHIFT		(ZONEID_PGOFF * (ZONEID_SHIFT != 0))
649

650 651
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
652 653
#endif

654 655 656
#define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)
#define NODES_MASK		((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
657
#define ZONEID_MASK		((1UL << ZONEID_SHIFT) - 1)
658

659
static inline enum zone_type page_zonenum(const struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
660
{
661
	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
662 663
}

664 665 666 667 668 669 670 671
/*
 * The identification function is only used by the buddy allocator for
 * determining if two pages could be buddies. We are not really
 * identifying a zone since we could be using a the section number
 * id if we have not node id available in page flags.
 * We guarantee only that it will return the same value for two
 * combinable pages in a zone.
 */
672 673
static inline int page_zone_id(struct page *page)
{
674
	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
675 676
}

677
static inline int zone_to_nid(struct zone *zone)
678
{
679 680 681 682 683
#ifdef CONFIG_NUMA
	return zone->node;
#else
	return 0;
#endif
684 685
}

686
#ifdef NODE_NOT_IN_PAGE_FLAGS
687
extern int page_to_nid(const struct page *page);
688
#else
689
static inline int page_to_nid(const struct page *page)
690
{
691
	return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
692
}
693 694
#endif

695
static inline struct zone *page_zone(const struct page *page)
696 697 698 699
{
	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}

700
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
701 702 703 704 705 706
static inline void set_page_section(struct page *page, unsigned long section)
{
	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
}

707
static inline unsigned long page_to_section(const struct page *page)
708 709 710
{
	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
711
#endif
712

713
static inline void set_page_zone(struct page *page, enum zone_type zone)
714 715 716 717
{
	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
}
718

719 720 721 722
static inline void set_page_node(struct page *page, unsigned long node)
{
	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
Linus Torvalds's avatar
Linus Torvalds committed
723
}
724

725
static inline void set_page_links(struct page *page, enum zone_type zone,
726
	unsigned long node, unsigned long pfn)
Linus Torvalds's avatar
Linus Torvalds committed
727
{
728 729
	set_page_zone(page, zone);
	set_page_node(page, node);
730
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
731
	set_page_section(page, pfn_to_section_nr(pfn));
732
#endif
Linus Torvalds's avatar
Linus Torvalds committed
733 734
}

735 736 737 738 739
/*
 * Some inline functions in vmstat.h depend on page_zone()
 */
#include <linux/vmstat.h>

740
static __always_inline void *lowmem_page_address(const struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
741
{
742
	return __va(PFN_PHYS(page_to_pfn(page)));
Linus Torvalds's avatar
Linus Torvalds committed
743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
}

#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
#define HASHED_PAGE_VIRTUAL
#endif

#if defined(WANT_PAGE_VIRTUAL)
#define page_address(page) ((page)->virtual)
#define set_page_address(page, address)			\
	do {						\
		(page)->virtual = (address);		\
	} while(0)
#define page_address_init()  do { } while(0)
#endif

#if defined(HASHED_PAGE_VIRTUAL)
759
void *page_address(const struct page *page);
Linus Torvalds's avatar
Linus Torvalds committed
760 761 762 763 764 765 766 767 768 769 770 771 772
void set_page_address(struct page *page, void *virtual);
void page_address_init(void);
#endif

#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
#define page_address(page) lowmem_page_address(page)
#define set_page_address(page, address)  do { } while(0)
#define page_address_init()  do { } while(0)
#endif

/*
 * On an anonymous page mapped into a user virtual memory area,
 * page->mapping points to its anon_vma, not to a struct address_space;
Hugh Dickins's avatar
Hugh Dickins committed
773 774 775 776 777 778 779 780
 * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
 *
 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
 * and then page->mapping points, not to an anon_vma, but to a private
 * structure which KSM associates with that merged page.  See ksm.h.
 *
 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
Linus Torvalds's avatar
Linus Torvalds committed
781 782 783 784 785 786
 *
 * Please note that, confusingly, "page_mapping" refers to the inode
 * address_space which maps the page from disk; whereas "page_mapped"
 * refers to user virtual address space into which the page is mapped.
 */
#define PAGE_MAPPING_ANON	1
Hugh Dickins's avatar
Hugh Dickins committed
787 788
#define PAGE_MAPPING_KSM	2
#define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
Linus Torvalds's avatar
Linus Torvalds committed
789 790 791 792 793 794

extern struct address_space swapper_space;
static inline struct address_space *page_mapping(struct page *page)
{
	struct address_space *mapping = page->mapping;

795
	VM_BUG_ON(PageSlab(page));
Linus Torvalds's avatar
Linus Torvalds committed
796 797
	if (unlikely(PageSwapCache(page)))
		mapping = &swapper_space;
798
	else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
Linus Torvalds's avatar
Linus Torvalds committed
799 800 801 802
		mapping = NULL;
	return mapping;
}

Hugh Dickins's avatar
Hugh Dickins committed
803 804 805 806 807 808
/* Neutral page->mapping pointer to address_space or anon_vma or other */
static inline void *page_rmapping(struct page *page)
{
	return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
}

809 810 811 812 813 814 815 816 817 818 819
extern struct address_space *__page_file_mapping(struct page *);

static inline
struct address_space *page_file_mapping(struct page *page)
{
	if (unlikely(PageSwapCache(page)))
		return __page_file_mapping(page);

	return page->mapping;
}

Linus Torvalds's avatar
Linus Torvalds committed
820 821 822 823 824 825 826 827 828 829 830 831
static inline int PageAnon(struct page *page)
{
	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
}

/*
 * Return the pagecache index of the passed page.  Regular pagecache pages
 * use ->index whereas swapcache pages use ->private
 */
static inline pgoff_t page_index(struct page *page)
{
	if (unlikely(PageSwapCache(page)))
832
		return page_private(page);
Linus Torvalds's avatar
Linus Torvalds committed
833 834 835
	return page->index;
}

836 837 838 839 840 841 842 843 844 845 846 847 848 849
extern pgoff_t __page_file_index(struct page *page);

/*
 * Return the file index of the page. Regular pagecache pages use ->index
 * whereas swapcache pages use swp_offset(->private)
 */
static inline pgoff_t page_file_index(struct page *page)
{
	if (unlikely(PageSwapCache(page)))
		return __page_file_index(page);

	return page->index;
}

Linus Torvalds's avatar
Linus Torvalds committed
850 851 852 853 854 855 856 857 858 859 860 861 862
/*
 * Return true if this page is mapped into pagetables.
 */
static inline int page_mapped(struct page *page)
{
	return atomic_read(&(page)->_mapcount) >= 0;
}

/*
 * Different kinds of faults, as returned by handle_mm_fault().
 * Used to decide whether a process gets delivered SIGBUS or
 * just gets major/minor fault counters bumped up.
 */
Nick Piggin's avatar
Nick Piggin committed
863

Nick Piggin's avatar
Nick Piggin committed
864
#define VM_FAULT_MINOR	0 /* For backwards compat. Remove me quickly. */
Nick Piggin's avatar
Nick Piggin committed
865

Nick Piggin's avatar
Nick Piggin committed
866 867 868 869
#define VM_FAULT_OOM	0x0001
#define VM_FAULT_SIGBUS	0x0002
#define VM_FAULT_MAJOR	0x0004
#define VM_FAULT_WRITE	0x0008	/* Special case for get_user_pages */
870 871
#define VM_FAULT_HWPOISON 0x0010	/* Hit poisoned small page */
#define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
Nick Piggin's avatar
Nick Piggin committed
872

Nick Piggin's avatar
Nick Piggin committed
873 874
#define VM_FAULT_NOPAGE	0x0100	/* ->fault installed the pte, not return page */
#define VM_FAULT_LOCKED	0x0200	/* ->fault locked the returned page */
875
#define VM_FAULT_RETRY	0x0400	/* ->fault blocked, must retry */
Linus Torvalds's avatar
Linus Torvalds committed
876

877 878 879 880 881 882 883 884
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */

#define VM_FAULT_ERROR	(VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
			 VM_FAULT_HWPOISON_LARGE)

/* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
Nick Piggin's avatar
Nick Piggin committed
885

886 887 888 889 890
/*
 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
 */
extern void pagefault_out_of_memory(void);

Linus Torvalds's avatar
Linus Torvalds committed
891 892
#define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)

893
/*
894
 * Flags passed to show_mem() and show_free_areas() to suppress output in
895 896 897 898
 * various contexts.
 */
#define SHOW_MEM_FILTER_NODES	(0x0001u)	/* filter disallowed nodes */

899 900
extern void show_free_areas(unsigned int flags);
extern bool skip_free_areas_node(unsigned int flags, int nid);
Linus Torvalds's avatar
Linus Torvalds committed
901 902 903

int shmem_zero_setup(struct vm_area_struct *);

Alexey Dobriyan's avatar
Alexey Dobriyan committed
904
extern int can_do_mlock(void);
Linus Torvalds's avatar
Linus Torvalds committed
905 906 907 908 909 910 911 912 913 914 915 916 917
extern int user_shm_lock(size_t, struct user_struct *);
extern void user_shm_unlock(size_t, struct user_struct *);

/*
 * Parameter block passed down to zap_pte_range in exceptional cases.
 */
struct zap_details {
	struct vm_area_struct *nonlinear_vma;	/* Check page->index if set */
	struct address_space *check_mapping;	/* Check page->mapping if set */
	pgoff_t	first_index;			/* Lowest page->index to unmap */
	pgoff_t last_index;			/* Highest page->index to unmap */
};

918 919 920
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
		pte_t pte);

921 922
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
		unsigned long size);
923
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
Linus Torvalds's avatar
Linus Torvalds committed
924
		unsigned long size, struct zap_details *);
925 926
void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
		unsigned long start, unsigned long end);
927 928 929 930 931 932

/**
 * mm_walk - callbacks for walk_page_range
 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
933 934 935
 *	       this handler is required to be able to handle
 *	       pmd_trans_huge() pmds.  They may simply choose to
 *	       split_huge_page() instead of handling it explicitly.
936 937
 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
 * @pte_hole: if set, called for each hole at all levels
938
 * @hugetlb_entry: if set, called for each hugetlb entry
939 940
 *		   *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
 * 			      is used.
941 942 943 944
 *
 * (see walk_page_range for more details)
 */
struct mm_walk {
945 946 947 948 949
	int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
	int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
	int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
	int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
	int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
950 951
	int (*hugetlb_entry)(pte_t *, unsigned long,
			     unsigned long, unsigned long, struct mm_walk *);
952 953
	struct mm_struct *mm;
	void *private;
954 955
};

956 957
int walk_page_range(unsigned long addr, unsigned long end,
		struct mm_walk *walk);
958
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
959
		unsigned long end, unsigned long floor, unsigned long ceiling);
Linus Torvalds's avatar
Linus Torvalds committed
960 961 962 963
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
			struct vm_area_struct *vma);
void unmap_mapping_range(struct address_space *mapping,
		loff_t const holebegin, loff_t const holelen, int even_cows);
Johannes Weiner's avatar
Johannes Weiner committed
964 965
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
	unsigned long *pfn);
966 967
int follow_phys(struct vm_area_struct *vma, unsigned long address,
		unsigned int flags, unsigned long *prot, resource_size_t *phys);
968 969
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
			void *buf, int len, int write);
Linus Torvalds's avatar
Linus Torvalds committed
970 971 972 973 974 975 976

static inline void unmap_shared_mapping_range(struct address_space *mapping,
		loff_t const holebegin, loff_t const holelen)
{
	unmap_mapping_range(mapping, holebegin, holelen, 0);
}

npiggin@suse.de's avatar
npiggin@suse.de committed
977
extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
978
extern void truncate_setsize(struct inode *inode, loff_t newsize);
npiggin@suse.de's avatar
npiggin@suse.de committed
979
extern int vmtruncate(struct inode *inode, loff_t offset);
980
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
981
int truncate_inode_page(struct address_space *mapping, struct page *page);
982
int generic_error_remove_page(struct address_space *mapping, struct page *page);
983 984
int invalidate_inode_page(struct page *page);

985
#ifdef CONFIG_MMU
Nick Piggin's avatar
Nick Piggin committed
986
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
987
			unsigned long address, unsigned int flags);
988 989
extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
			    unsigned long address, unsigned int fault_flags);
990 991 992
#else
static inline int handle_mm_fault(struct mm_struct *mm,
			struct vm_area_struct *vma, unsigned long address,
993
			unsigned int flags)
994 995 996 997 998
{
	/* should never happen if there's no MMU */
	BUG();
	return VM_FAULT_SIGBUS;
}
999 1000 1001 1002 1003 1004 1005 1006
static inline int fixup_user_fault(struct task_struct *tsk,
		struct mm_struct *mm, unsigned long address,
		unsigned int fault_flags)
{
	/* should never happen if there's no MMU */
	BUG();
	return -EFAULT;
}
1007
#endif
Nick Piggin's avatar
Nick Piggin committed
1008

Linus Torvalds's avatar
Linus Torvalds committed
1009 1010
extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1011 1012
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
		void *buf, int len, int write);
Linus Torvalds's avatar
Linus Torvalds committed
1013

Huang Ying's avatar
Huang Ying committed
1014 1015 1016 1017
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
		     unsigned long start, int len, unsigned int foll_flags,
		     struct page **pages, struct vm_area_struct **vmas,
		     int *nonblocking);
1018
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1019
			unsigned long start, int nr_pages, int write, int force,
1020 1021 1022
			struct page **pages, struct vm_area_struct **vmas);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
			struct page **pages);
1023 1024 1025 1026
struct kvec;
int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
			struct page **pages);
int get_kernel_page(unsigned long start, int write, struct page **pages);
Hugh Dickins's avatar
Hugh Dickins committed
1027
struct page *get_dump_page(unsigned long addr);
Linus Torvalds's avatar
Linus Torvalds committed
1028

1029 1030 1031
extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
extern void do_invalidatepage(struct page *page, unsigned long offset);

Linus Torvalds's avatar
Linus Torvalds committed
1032
int __set_page_dirty_nobuffers(struct page *page);
1033
int __set_page_dirty_no_writeback(struct page *page);
Linus Torvalds's avatar
Linus Torvalds committed
1034 1035
int redirty_page_for_writepage(struct writeback_control *wbc,
				struct page *page);
1036
void account_page_dirtied(struct page *page, struct address_space *mapping);
1037
void account_page_writeback(struct page *page);
1038
int set_page_dirty(struct page *page);
Linus Torvalds's avatar
Linus Torvalds committed
1039 1040 1041
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);

1042
/* Is the vma a continuation of the stack vma above it? */
1043
static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1044 1045 1046 1047
{
	return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
}

1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
static inline int stack_guard_page_start(struct vm_area_struct *vma,
					     unsigned long addr)
{
	return (vma->vm_flags & VM_GROWSDOWN) &&
		(vma->vm_start == addr) &&
		!vma_growsdown(vma->vm_prev, addr);
}

/* Is the vma a continuation of the stack vma below it? */
static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
{
	return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
}

static inline int stack_guard_page_end(struct vm_area_struct *vma,
					   unsigned long addr)
{
	return (vma->vm_flags & VM_GROWSUP) &&
		(vma->vm_end == addr) &&
		!vma_growsup(vma->vm_next, addr);
}

1070 1071 1072
extern pid_t
vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);

1073 1074
extern unsigned long move_page_tables(struct vm_area_struct *vma,
		unsigned long old_addr, struct vm_area_struct *new_vma,
1075 1076
		unsigned long new_addr, unsigned long len,
		bool need_rmap_locks);
Linus Torvalds's avatar
Linus Torvalds committed
1077 1078 1079
extern unsigned long do_mremap(unsigned long addr,
			       unsigned long old_len, unsigned long new_len,
			       unsigned long flags, unsigned long new_addr);
1080 1081 1082
extern int mprotect_fixup(struct vm_area_struct *vma,
			  struct vm_area_struct **pprev, unsigned long start,
			  unsigned long end, unsigned long newflags);
Linus Torvalds's avatar
Linus Torvalds committed
1083

1084 1085 1086 1087 1088
/*
 * doesn't attempt to fault and will return short.
 */
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
			  struct page **pages);
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
1089 1090 1091 1092 1093
/*
 * per-process(per-mm_struct) statistics.
 */
static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
{
1094 1095 1096 1097 1098 1099 1100 1101 1102
	long val = atomic_long_read(&mm->rss_stat.count[member]);

#ifdef SPLIT_RSS_COUNTING
	/*
	 * counter is updated in asynchronous manner and may go to minus.
	 * But it's never be expected number for users.
	 */
	if (val < 0)
		val = 0;
1103
#endif
1104 1105
	return (unsigned long)val;
}
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
1106 1107 1108

static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
{
1109
	atomic_long_add(value, &mm->rss_stat.count[member]);
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
1110 1111 1112 1113
}

static inline void inc_mm_counter(struct mm_struct *mm, int member)
{
1114
	atomic_long_inc(&mm->rss_stat.count[member]);
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
1115 1116 1117 1118
}

static inline void dec_mm_counter(struct mm_struct *mm, int member)
{
1119
	atomic_long_dec(&mm->rss_stat.count[member]);
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
}

static inline unsigned long get_mm_rss(struct mm_struct *mm)
{
	return get_mm_counter(mm, MM_FILEPAGES) +
		get_mm_counter(mm, MM_ANONPAGES);
}

static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
{
	return max(mm->hiwater_rss, get_mm_rss(mm));
}

static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
{
	return max(mm->hiwater_vm, mm->total_vm);
}

static inline void update_hiwater_rss(struct mm_struct *mm)
{
	unsigned long _rss = get_mm_rss(mm);

	if ((mm)->hiwater_rss < _rss)
		(mm)->hiwater_rss = _rss;
}

static inline void update_hiwater_vm(struct mm_struct *mm)
{
	if (mm->hiwater_vm < mm->total_vm)
		mm->hiwater_vm = mm->total_vm;
}

static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
					 struct mm_struct *mm)
{
	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);

	if (*maxrss < hiwater_rss)
		*maxrss = hiwater_rss;
}

1161
#if defined(SPLIT_RSS_COUNTING)
1162
void sync_mm_rss(struct mm_struct *mm);
1163
#else
1164
static inline void sync_mm_rss(struct mm_struct *mm)
1165 1166 1167
{
}
#endif
1168

Alexey Dobriyan's avatar
Alexey Dobriyan committed
1169
int vma_wants_writenotify(struct vm_area_struct *vma);
1170

1171 1172 1173 1174 1175 1176 1177 1178 1179
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
			       spinlock_t **ptl);
static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
				    spinlock_t **ptl)
{
	pte_t *ptep;
	__cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
	return ptep;
}
1180

Nick Piggin's avatar
Nick Piggin committed
1181 1182 1183 1184 1185 1186 1187
#ifdef __PAGETABLE_PUD_FOLDED
static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
						unsigned long address)
{
	return 0;
}
#else
1188
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
Nick Piggin's avatar
Nick Piggin committed
1189 1190 1191 1192 1193 1194 1195 1196 1197
#endif

#ifdef __PAGETABLE_PMD_FOLDED
static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
						unsigned long address)
{
	return 0;
}
#else
1198
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
Nick Piggin's avatar
Nick Piggin committed
1199 1200
#endif

1201 1202
int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
		pmd_t *pmd, unsigned long address);
1203 1204
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);

Linus Torvalds's avatar
Linus Torvalds committed
1205 1206 1207 1208
/*
 * The following ifdef needed to get the 4level-fixup.h header to work.
 * Remove it when 4level-fixup.h has been removed.
 */
1209
#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
Linus Torvalds's avatar
Linus Torvalds committed
1210 1211
static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
{
1212 1213
	return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
		NULL: pud_offset(pgd, address);
Linus Torvalds's avatar
Linus Torvalds committed
1214 1215 1216 1217
}

static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
1218 1219
	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
		NULL: pmd_offset(pud, address);
Linus Torvalds's avatar
Linus Torvalds committed
1220
}
1221 1222
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */

1223
#if USE_SPLIT_PTLOCKS
1224 1225 1226 1227 1228 1229
/*
 * We tuck a spinlock to guard each pagetable page into its struct page,
 * at page->private, with BUILD_BUG_ON to make sure that this will not
 * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
 * When freeing, reset page->mapping so free_pages_check won't complain.
 */
Andrew Morton's avatar
Andrew Morton committed
1230
#define __pte_lockptr(page)	&((page)->ptl)
1231 1232 1233 1234 1235
#define pte_lock_init(_page)	do {					\
	spin_lock_init(__pte_lockptr(_page));				\
} while (0)
#define pte_lock_deinit(page)	((page)->mapping = NULL)
#define pte_lockptr(mm, pmd)	({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
1236
#else	/* !USE_SPLIT_PTLOCKS */
1237 1238 1239 1240 1241 1242
/*
 * We use mm->page_table_lock to guard all pagetable pages of the mm.
 */
#define pte_lock_init(page)	do {} while (0)
#define pte_lock_deinit(page)	do {} while (0)
#define pte_lockptr(mm, pmd)	({(void)(pmd); &(mm)->page_table_lock;})
1243
#endif /* USE_SPLIT_PTLOCKS */
1244

1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
static inline void pgtable_page_ctor(struct page *page)
{
	pte_lock_init(page);
	inc_zone_page_state(page, NR_PAGETABLE);
}

static inline void pgtable_page_dtor(struct page *page)
{
	pte_lock_deinit(page);
	dec_zone_page_state(page, NR_PAGETABLE);
}

1257 1258
#define pte_offset_map_lock(mm, pmd, address, ptlp)	\
({							\
1259
	spinlock_t *__ptl = pte_lockptr(mm, pmd);	\
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
	pte_t *__pte = pte_offset_map(pmd, address);	\
	*(ptlp) = __ptl;				\
	spin_lock(__ptl);				\
	__pte;						\
})

#define pte_unmap_unlock(pte, ptl)	do {		\
	spin_unlock(ptl);				\
	pte_unmap(pte);					\
} while (0)

1271 1272 1273 1274
#define pte_alloc_map(mm, vma, pmd, address)				\
	((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma,	\
							pmd, address))?	\
	 NULL: pte_offset_map(pmd, address))
1275

1276
#define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
1277 1278
	((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL,	\
							pmd, address))?	\
1279 1280
		NULL: pte_offset_map_lock(mm, pmd, address, ptlp))

1281
#define pte_alloc_kernel(pmd, address)			\
1282
	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1283
		NULL: pte_offset_kernel(pmd, address))
Linus Torvalds's avatar
Linus Torvalds committed
1284 1285

extern void free_area_init(unsigned long * zones_size);
1286 1287
extern void free_area_init_node(int nid, unsigned long * zones_size,
		unsigned long zone_start_pfn, unsigned long *zholes_size);
1288 1289
extern void free_initmem(void);

Tejun Heo's avatar
Tejun Heo committed
1290
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1291
/*
Tejun Heo's avatar
Tejun Heo committed
1292
 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
1293 1294 1295 1296 1297 1298
 * zones, allocate the backing mem_map and account for memory holes in a more
 * architecture independent manner. This is a substitute for creating the
 * zone_sizes[] and zholes_size[] arrays and passing them to
 * free_area_init_node()
 *
 * An architecture is expected to register range of page frames backed by
Tejun Heo's avatar
Tejun Heo committed
1299
 * physical memory with memblock_add[_node]() before calling
1300 1301 1302 1303 1304 1305
 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
 * usage, an architecture is expected to do something like
 *
 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
 * 							 max_highmem_pfn};
 * for_each_valid_physical_page_range()
Tejun Heo's avatar
Tejun Heo committed
1306
 * 	memblock_add_node(base, size, nid)
1307 1308
 * free_area_init_nodes(max_zone_pfns);
 *
Tejun Heo's avatar
Tejun Heo committed
1309 1310 1311 1312
 * free_bootmem_with_active_regions() calls free_bootmem_node() for each
 * registered physical page range.  Similarly
 * sparse_memory_present_with_active_regions() calls memory_present() for
 * each range when SPARSEMEM is enabled.
1313 1314
 *
 * See mm/page_alloc.c for more information on each function exposed by
Tejun Heo's avatar
Tejun Heo committed
1315
 * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
1316 1317
 */
extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1318
unsigned long node_map_pfn_alignment(void);