swap_state.c 10.2 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9
/*
 *  linux/mm/swap_state.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *
 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
 */
#include <linux/mm.h>
10
#include <linux/gfp.h>
Linus Torvalds's avatar
Linus Torvalds committed
11 12
#include <linux/kernel_stat.h>
#include <linux/swap.h>
13
#include <linux/swapops.h>
Linus Torvalds's avatar
Linus Torvalds committed
14 15 16
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/backing-dev.h>
17
#include <linux/pagevec.h>
18
#include <linux/migrate.h>
19
#include <linux/page_cgroup.h>
Linus Torvalds's avatar
Linus Torvalds committed
20 21 22 23 24

#include <asm/pgtable.h>

/*
 * swapper_space is a fiction, retained to simplify the path through
Jens Axboe's avatar
Jens Axboe committed
25
 * vmscan's shrink_page_list.
Linus Torvalds's avatar
Linus Torvalds committed
26
 */
27
static const struct address_space_operations swap_aops = {
Linus Torvalds's avatar
Linus Torvalds committed
28
	.writepage	= swap_writepage,
29
	.set_page_dirty	= __set_page_dirty_no_writeback,
30
	.migratepage	= migrate_page,
Linus Torvalds's avatar
Linus Torvalds committed
31 32 33
};

static struct backing_dev_info swap_backing_dev_info = {
34
	.name		= "swap",
35
	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
Linus Torvalds's avatar
Linus Torvalds committed
36 37 38 39
};

struct address_space swapper_space = {
	.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
Nick Piggin's avatar
Nick Piggin committed
40
	.tree_lock	= __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
Linus Torvalds's avatar
Linus Torvalds committed
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
	.a_ops		= &swap_aops,
	.i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
	.backing_dev_info = &swap_backing_dev_info,
};

#define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)

static struct {
	unsigned long add_total;
	unsigned long del_total;
	unsigned long find_success;
	unsigned long find_total;
} swap_cache_info;

void show_swap_cache_info(void)
{
57 58
	printk("%lu pages in swap cache\n", total_swapcache_pages);
	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
Linus Torvalds's avatar
Linus Torvalds committed
59
		swap_cache_info.add_total, swap_cache_info.del_total,
60
		swap_cache_info.find_success, swap_cache_info.find_total);
Hugh Dickins's avatar
Hugh Dickins committed
61
	printk("Free swap  = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
Linus Torvalds's avatar
Linus Torvalds committed
62 63 64 65
	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
}

/*
66
 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
Linus Torvalds's avatar
Linus Torvalds committed
67 68
 * but sets SwapCache flag and private instead of mapping and index.
 */
69
static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
Linus Torvalds's avatar
Linus Torvalds committed
70 71 72
{
	int error;

73 74 75 76
	VM_BUG_ON(!PageLocked(page));
	VM_BUG_ON(PageSwapCache(page));
	VM_BUG_ON(!PageSwapBacked(page));

77 78 79 80 81 82 83 84 85 86 87 88 89 90
	page_cache_get(page);
	SetPageSwapCache(page);
	set_page_private(page, entry.val);

	spin_lock_irq(&swapper_space.tree_lock);
	error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
	if (likely(!error)) {
		total_swapcache_pages++;
		__inc_zone_page_state(page, NR_FILE_PAGES);
		INC_CACHE_INFO(add_total);
	}
	spin_unlock_irq(&swapper_space.tree_lock);

	if (unlikely(error)) {
91 92 93 94 95 96
		/*
		 * Only the context which have set SWAP_HAS_CACHE flag
		 * would call add_to_swap_cache().
		 * So add_to_swap_cache() doesn't returns -EEXIST.
		 */
		VM_BUG_ON(error == -EEXIST);
97 98 99 100 101 102 103 104 105 106 107 108 109
		set_page_private(page, 0UL);
		ClearPageSwapCache(page);
		page_cache_release(page);
	}

	return error;
}


int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
	int error;

Balbir Singh's avatar
Balbir Singh committed
110 111
	error = radix_tree_preload(gfp_mask);
	if (!error) {
112
		error = __add_to_swap_cache(page, entry);
Linus Torvalds's avatar
Linus Torvalds committed
113
		radix_tree_preload_end();
114
	}
Linus Torvalds's avatar
Linus Torvalds committed
115 116 117 118 119 120 121 122 123
	return error;
}

/*
 * This must be called only on pages that have
 * been verified to be in the swap cache.
 */
void __delete_from_swap_cache(struct page *page)
{
124 125 126
	VM_BUG_ON(!PageLocked(page));
	VM_BUG_ON(!PageSwapCache(page));
	VM_BUG_ON(PageWriteback(page));
Linus Torvalds's avatar
Linus Torvalds committed
127

128 129
	radix_tree_delete(&swapper_space.page_tree, page_private(page));
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
130 131
	ClearPageSwapCache(page);
	total_swapcache_pages--;
132
	__dec_zone_page_state(page, NR_FILE_PAGES);
Linus Torvalds's avatar
Linus Torvalds committed
133 134 135 136 137 138 139 140 141 142
	INC_CACHE_INFO(del_total);
}

/**
 * add_to_swap - allocate swap space for a page
 * @page: page we want to move to swap
 *
 * Allocate swap space for the page and add the page to the
 * swap cache.  Caller needs to hold the page lock. 
 */
143
int add_to_swap(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
144 145 146 147
{
	swp_entry_t entry;
	int err;

148 149
	VM_BUG_ON(!PageLocked(page));
	VM_BUG_ON(!PageUptodate(page));
Linus Torvalds's avatar
Linus Torvalds committed
150

151 152 153 154
	entry = get_swap_page();
	if (!entry.val)
		return 0;

Andrea Arcangeli's avatar
Andrea Arcangeli committed
155 156 157 158 159 160
	if (unlikely(PageTransHuge(page)))
		if (unlikely(split_huge_page(page))) {
			swapcache_free(entry, NULL);
			return 0;
		}

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
	/*
	 * Radix-tree node allocations from PF_MEMALLOC contexts could
	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
	 * stops emergency reserves from being allocated.
	 *
	 * TODO: this could cause a theoretical memory reclaim
	 * deadlock in the swap out path.
	 */
	/*
	 * Add it to the swap cache and mark it dirty
	 */
	err = add_to_swap_cache(page, entry,
			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);

	if (!err) {	/* Success */
		SetPageDirty(page);
		return 1;
	} else {	/* -ENOMEM radix-tree allocation failure */
Nick Piggin's avatar
Nick Piggin committed
179
		/*
180 181
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
Linus Torvalds's avatar
Linus Torvalds committed
182
		 */
183 184
		swapcache_free(entry, NULL);
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
185 186 187 188 189 190 191 192 193 194 195 196 197
	}
}

/*
 * This must be called only on pages that have
 * been verified to be in the swap cache and locked.
 * It will never put the page into the free list,
 * the caller has a reference on the page.
 */
void delete_from_swap_cache(struct page *page)
{
	swp_entry_t entry;

198
	entry.val = page_private(page);
Linus Torvalds's avatar
Linus Torvalds committed
199

Nick Piggin's avatar
Nick Piggin committed
200
	spin_lock_irq(&swapper_space.tree_lock);
Linus Torvalds's avatar
Linus Torvalds committed
201
	__delete_from_swap_cache(page);
Nick Piggin's avatar
Nick Piggin committed
202
	spin_unlock_irq(&swapper_space.tree_lock);
Linus Torvalds's avatar
Linus Torvalds committed
203

204
	swapcache_free(entry, page);
Linus Torvalds's avatar
Linus Torvalds committed
205 206 207 208 209 210 211
	page_cache_release(page);
}

/* 
 * If we are the only user, then try to free up the swap cache. 
 * 
 * Its ok to check for PageSwapCache without the page lock
212 213
 * here because we are going to recheck again inside
 * try_to_free_swap() _with_ the lock.
Linus Torvalds's avatar
Linus Torvalds committed
214 215 216 217
 * 					- Marcelo
 */
static inline void free_swap_cache(struct page *page)
{
218 219
	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
		try_to_free_swap(page);
Linus Torvalds's avatar
Linus Torvalds committed
220 221 222 223 224 225
		unlock_page(page);
	}
}

/* 
 * Perform a free_page(), also freeing any swap cache associated with
226
 * this page if it is the last user of the page.
Linus Torvalds's avatar
Linus Torvalds committed
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
 */
void free_page_and_swap_cache(struct page *page)
{
	free_swap_cache(page);
	page_cache_release(page);
}

/*
 * Passed an array of pages, drop them all from swapcache and then release
 * them.  They are removed from the LRU and freed if this is their last use.
 */
void free_pages_and_swap_cache(struct page **pages, int nr)
{
	struct page **pagep = pages;

	lru_add_drain();
	while (nr) {
244
		int todo = min(nr, PAGEVEC_SIZE);
Linus Torvalds's avatar
Linus Torvalds committed
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
		int i;

		for (i = 0; i < todo; i++)
			free_swap_cache(pagep[i]);
		release_pages(pagep, todo, 0);
		pagep += todo;
		nr -= todo;
	}
}

/*
 * Lookup a swap entry in the swap cache. A found page will be returned
 * unlocked and with its refcount incremented - we rely on the kernel
 * lock getting page table operations atomic even if we drop the page
 * lock before returning.
 */
struct page * lookup_swap_cache(swp_entry_t entry)
{
	struct page *page;

	page = find_get_page(&swapper_space, entry.val);

	if (page)
		INC_CACHE_INFO(find_success);

	INC_CACHE_INFO(find_total);
	return page;
}

/* 
 * Locate a page of swap in physical memory, reserving swap cache space
 * and reading the disk if it is not already cached.
 * A failure return means that either the page allocation failed or that
 * the swap entry is no longer in use.
 */
280
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
Linus Torvalds's avatar
Linus Torvalds committed
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
			struct vm_area_struct *vma, unsigned long addr)
{
	struct page *found_page, *new_page = NULL;
	int err;

	do {
		/*
		 * First check the swap cache.  Since this is normally
		 * called after lookup_swap_cache() failed, re-calling
		 * that would confuse statistics.
		 */
		found_page = find_get_page(&swapper_space, entry.val);
		if (found_page)
			break;

		/*
		 * Get a new page to read into from swap.
		 */
		if (!new_page) {
300
			new_page = alloc_page_vma(gfp_mask, vma, addr);
Linus Torvalds's avatar
Linus Torvalds committed
301 302 303 304
			if (!new_page)
				break;		/* Out of memory */
		}

305 306 307 308 309 310 311
		/*
		 * call radix_tree_preload() while we can wait.
		 */
		err = radix_tree_preload(gfp_mask & GFP_KERNEL);
		if (err)
			break;

312 313 314
		/*
		 * Swap entry may have been freed since our caller observed it.
		 */
315
		err = swapcache_prepare(entry);
316 317
		if (err == -EEXIST) {	/* seems racy */
			radix_tree_preload_end();
318
			continue;
319 320 321
		}
		if (err) {		/* swp entry is obsolete ? */
			radix_tree_preload_end();
322
			break;
323
		}
324

325
		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
326
		__set_page_locked(new_page);
327
		SetPageSwapBacked(new_page);
328
		err = __add_to_swap_cache(new_page, entry);
Nick Piggin's avatar
Nick Piggin committed
329
		if (likely(!err)) {
330
			radix_tree_preload_end();
Linus Torvalds's avatar
Linus Torvalds committed
331 332 333
			/*
			 * Initiate read into locked page and return.
			 */
334
			lru_cache_add_anon(new_page);
335
			swap_readpage(new_page);
Linus Torvalds's avatar
Linus Torvalds committed
336 337
			return new_page;
		}
338
		radix_tree_preload_end();
339
		ClearPageSwapBacked(new_page);
340
		__clear_page_locked(new_page);
341 342 343 344
		/*
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
		 */
345
		swapcache_free(entry, NULL);
346
	} while (err != -ENOMEM);
Linus Torvalds's avatar
Linus Torvalds committed
347 348 349 350 351

	if (new_page)
		page_cache_release(new_page);
	return found_page;
}
352 353 354 355

/**
 * swapin_readahead - swap in pages in hope we need them soon
 * @entry: swap entry of this memory
356
 * @gfp_mask: memory allocation flags
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
 * @vma: user vma this address belongs to
 * @addr: target address for mempolicy
 *
 * Returns the struct page for entry and addr, after queueing swapin.
 *
 * Primitive swap readahead code. We simply read an aligned block of
 * (1 << page_cluster) entries in the swap area. This method is chosen
 * because it doesn't cost us any seek time.  We also make sure to queue
 * the 'original' request together with the readahead ones...
 *
 * This has been extended to use the NUMA policies from the mm triggering
 * the readahead.
 *
 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
 */
372
struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
373 374 375
			struct vm_area_struct *vma, unsigned long addr)
{
	struct page *page;
376 377 378
	unsigned long offset = swp_offset(entry);
	unsigned long start_offset, end_offset;
	unsigned long mask = (1UL << page_cluster) - 1;
379

380 381 382 383 384 385 386
	/* Read a page_cluster sized and aligned cluster around offset. */
	start_offset = offset & ~mask;
	end_offset = offset | mask;
	if (!start_offset)	/* First page is swap header. */
		start_offset++;

	for (offset = start_offset; offset <= end_offset ; offset++) {
387 388
		/* Ok, do the async read-ahead now */
		page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
389
						gfp_mask, vma, addr);
390
		if (!page)
391
			continue;
392 393 394
		page_cache_release(page);
	}
	lru_add_drain();	/* Push any new pages onto the LRU now */
395
	return read_swap_cache_async(entry, gfp_mask, vma, addr);
396
}