zswap.c 32.8 KB
Newer Older
Seth Jennings's avatar
Seth Jennings committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
/*
 * zswap.c - zswap driver file
 *
 * zswap is a backend for frontswap that takes pages that are in the process
 * of being swapped out and attempts to compress and store them in a
 * RAM-based memory pool.  This can result in a significant I/O reduction on
 * the swap device and, in the case where decompressing from RAM is faster
 * than reading from the swap device, can also improve workload performance.
 *
 * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
*/

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/frontswap.h>
#include <linux/rbtree.h>
#include <linux/swap.h>
#include <linux/crypto.h>
#include <linux/mempool.h>
37
#include <linux/zpool.h>
Seth Jennings's avatar
Seth Jennings committed
38 39 40 41 42 43 44 45 46 47

#include <linux/mm_types.h>
#include <linux/page-flags.h>
#include <linux/swapops.h>
#include <linux/writeback.h>
#include <linux/pagemap.h>

/*********************************
* statistics
**********************************/
48 49
/* Total bytes used by the compressed storage */
static u64 zswap_pool_total_size;
Seth Jennings's avatar
Seth Jennings committed
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
/* The number of compressed pages currently stored in zswap */
static atomic_t zswap_stored_pages = ATOMIC_INIT(0);

/*
 * The statistics below are not protected from concurrent access for
 * performance reasons so they may not be a 100% accurate.  However,
 * they do provide useful information on roughly how many times a
 * certain event is occurring.
*/

/* Pool limit was hit (see zswap_max_pool_percent) */
static u64 zswap_pool_limit_hit;
/* Pages written back when pool limit was reached */
static u64 zswap_written_back_pages;
/* Store failed due to a reclaim failure after pool limit was reached */
static u64 zswap_reject_reclaim_fail;
/* Compressed page was too big for the allocator to (optimally) store */
static u64 zswap_reject_compress_poor;
/* Store failed because underlying allocator could not get memory */
static u64 zswap_reject_alloc_fail;
/* Store failed because the entry metadata could not be allocated (rare) */
static u64 zswap_reject_kmemcache_fail;
/* Duplicate store was encountered (rare) */
static u64 zswap_duplicate_entry;

/*********************************
* tunables
**********************************/
Dan Streetman's avatar
Dan Streetman committed
78

79 80
#define ZSWAP_PARAM_UNSET ""

Dan Streetman's avatar
Dan Streetman committed
81 82
/* Enable/disable zswap (disabled by default) */
static bool zswap_enabled;
83 84 85 86 87 88 89
static int zswap_enabled_param_set(const char *,
				   const struct kernel_param *);
static struct kernel_param_ops zswap_enabled_param_ops = {
	.set =		zswap_enabled_param_set,
	.get =		param_get_bool,
};
module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
Seth Jennings's avatar
Seth Jennings committed
90

91
/* Crypto compressor to use */
Seth Jennings's avatar
Seth Jennings committed
92
#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
93
static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
94 95 96 97
static int zswap_compressor_param_set(const char *,
				      const struct kernel_param *);
static struct kernel_param_ops zswap_compressor_param_ops = {
	.set =		zswap_compressor_param_set,
98 99
	.get =		param_get_charp,
	.free =		param_free_charp,
100 101
};
module_param_cb(compressor, &zswap_compressor_param_ops,
102
		&zswap_compressor, 0644);
Seth Jennings's avatar
Seth Jennings committed
103

104
/* Compressed storage zpool to use */
105
#define ZSWAP_ZPOOL_DEFAULT "zbud"
106
static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
107 108
static int zswap_zpool_param_set(const char *, const struct kernel_param *);
static struct kernel_param_ops zswap_zpool_param_ops = {
109 110 111
	.set =		zswap_zpool_param_set,
	.get =		param_get_charp,
	.free =		param_free_charp,
112
};
113
module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
114

115 116 117
/* The maximum percentage of memory that the compressed pool can occupy */
static unsigned int zswap_max_pool_percent = 20;
module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
118

Seth Jennings's avatar
Seth Jennings committed
119
/*********************************
Dan Streetman's avatar
Dan Streetman committed
120
* data structures
Seth Jennings's avatar
Seth Jennings committed
121 122
**********************************/

Dan Streetman's avatar
Dan Streetman committed
123 124 125 126 127
struct zswap_pool {
	struct zpool *zpool;
	struct crypto_comp * __percpu *tfm;
	struct kref kref;
	struct list_head list;
128
	struct work_struct work;
129
	struct hlist_node node;
Dan Streetman's avatar
Dan Streetman committed
130
	char tfm_name[CRYPTO_MAX_ALG_NAME];
Seth Jennings's avatar
Seth Jennings committed
131 132 133 134 135 136 137 138 139
};

/*
 * struct zswap_entry
 *
 * This structure contains the metadata for tracking a single compressed
 * page within zswap.
 *
 * rbnode - links the entry into red-black tree for the appropriate swap type
Dan Streetman's avatar
Dan Streetman committed
140
 * offset - the swap offset for the entry.  Index into the red-black tree.
Seth Jennings's avatar
Seth Jennings committed
141 142
 * refcount - the number of outstanding reference to the entry. This is needed
 *            to protect against premature freeing of the entry by code
143
 *            concurrent calls to load, invalidate, and writeback.  The lock
Seth Jennings's avatar
Seth Jennings committed
144 145 146 147
 *            for the zswap_tree structure that contains the entry must
 *            be held while changing the refcount.  Since the lock must
 *            be held, there is no reason to also make refcount atomic.
 * length - the length in bytes of the compressed page data.  Needed during
148
 *          decompression
Dan Streetman's avatar
Dan Streetman committed
149 150
 * pool - the zswap_pool the entry's data is in
 * handle - zpool allocation handle that stores the compressed page data
Seth Jennings's avatar
Seth Jennings committed
151 152 153 154 155 156
 */
struct zswap_entry {
	struct rb_node rbnode;
	pgoff_t offset;
	int refcount;
	unsigned int length;
Dan Streetman's avatar
Dan Streetman committed
157
	struct zswap_pool *pool;
Seth Jennings's avatar
Seth Jennings committed
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
	unsigned long handle;
};

struct zswap_header {
	swp_entry_t swpentry;
};

/*
 * The tree lock in the zswap_tree struct protects a few things:
 * - the rbtree
 * - the refcount field of each entry in the tree
 */
struct zswap_tree {
	struct rb_root rbroot;
	spinlock_t lock;
};

static struct zswap_tree *zswap_trees[MAX_SWAPFILES];

Dan Streetman's avatar
Dan Streetman committed
177 178 179 180
/* RCU-protected iteration */
static LIST_HEAD(zswap_pools);
/* protects zswap_pools list modification */
static DEFINE_SPINLOCK(zswap_pools_lock);
181 182
/* pool counter to provide unique names to zpool */
static atomic_t zswap_pools_count = ATOMIC_INIT(0);
Dan Streetman's avatar
Dan Streetman committed
183

184 185 186
/* used by param callback function */
static bool zswap_init_started;

187 188 189
/* fatal error during init */
static bool zswap_init_failed;

190 191 192
/* init completed, but couldn't create the initial pool */
static bool zswap_has_pool;

Dan Streetman's avatar
Dan Streetman committed
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
/*********************************
* helpers and fwd declarations
**********************************/

#define zswap_pool_debug(msg, p)				\
	pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,		\
		 zpool_get_type((p)->zpool))

static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
static int zswap_pool_get(struct zswap_pool *pool);
static void zswap_pool_put(struct zswap_pool *pool);

static const struct zpool_ops zswap_zpool_ops = {
	.evict = zswap_writeback_entry
};

static bool zswap_is_full(void)
{
	return totalram_pages * zswap_max_pool_percent / 100 <
		DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
}

static void zswap_update_total_size(void)
{
	struct zswap_pool *pool;
	u64 total = 0;

	rcu_read_lock();

	list_for_each_entry_rcu(pool, &zswap_pools, list)
		total += zpool_get_total_size(pool->zpool);

	rcu_read_unlock();

	zswap_pool_total_size = total;
}

Seth Jennings's avatar
Seth Jennings committed
230 231 232 233 234
/*********************************
* zswap entry functions
**********************************/
static struct kmem_cache *zswap_entry_cache;

235
static int __init zswap_entry_cache_create(void)
Seth Jennings's avatar
Seth Jennings committed
236 237
{
	zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
238
	return zswap_entry_cache == NULL;
Seth Jennings's avatar
Seth Jennings committed
239 240
}

241
static void __init zswap_entry_cache_destroy(void)
Seth Jennings's avatar
Seth Jennings committed
242 243 244 245 246 247 248 249 250 251 252
{
	kmem_cache_destroy(zswap_entry_cache);
}

static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
{
	struct zswap_entry *entry;
	entry = kmem_cache_alloc(zswap_entry_cache, gfp);
	if (!entry)
		return NULL;
	entry->refcount = 1;
253
	RB_CLEAR_NODE(&entry->rbnode);
Seth Jennings's avatar
Seth Jennings committed
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
	return entry;
}

static void zswap_entry_cache_free(struct zswap_entry *entry)
{
	kmem_cache_free(zswap_entry_cache, entry);
}

/*********************************
* rbtree functions
**********************************/
static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
{
	struct rb_node *node = root->rb_node;
	struct zswap_entry *entry;

	while (node) {
		entry = rb_entry(node, struct zswap_entry, rbnode);
		if (entry->offset > offset)
			node = node->rb_left;
		else if (entry->offset < offset)
			node = node->rb_right;
		else
			return entry;
	}
	return NULL;
}

/*
 * In the case that a entry with the same offset is found, a pointer to
 * the existing entry is stored in dupentry and the function returns -EEXIST
 */
static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
			struct zswap_entry **dupentry)
{
	struct rb_node **link = &root->rb_node, *parent = NULL;
	struct zswap_entry *myentry;

	while (*link) {
		parent = *link;
		myentry = rb_entry(parent, struct zswap_entry, rbnode);
		if (myentry->offset > entry->offset)
			link = &(*link)->rb_left;
		else if (myentry->offset < entry->offset)
			link = &(*link)->rb_right;
		else {
			*dupentry = myentry;
			return -EEXIST;
		}
	}
	rb_link_node(&entry->rbnode, parent, link);
	rb_insert_color(&entry->rbnode, root);
	return 0;
}

309 310 311 312 313 314 315 316 317
static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
{
	if (!RB_EMPTY_NODE(&entry->rbnode)) {
		rb_erase(&entry->rbnode, root);
		RB_CLEAR_NODE(&entry->rbnode);
	}
}

/*
318
 * Carries out the common pattern of freeing and entry's zpool allocation,
319 320
 * freeing the entry itself, and decrementing the number of stored pages.
 */
321
static void zswap_free_entry(struct zswap_entry *entry)
322
{
Dan Streetman's avatar
Dan Streetman committed
323 324
	zpool_free(entry->pool->zpool, entry->handle);
	zswap_pool_put(entry->pool);
325 326
	zswap_entry_cache_free(entry);
	atomic_dec(&zswap_stored_pages);
Dan Streetman's avatar
Dan Streetman committed
327
	zswap_update_total_size();
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
}

/* caller must hold the tree lock */
static void zswap_entry_get(struct zswap_entry *entry)
{
	entry->refcount++;
}

/* caller must hold the tree lock
* remove from the tree and free it, if nobody reference the entry
*/
static void zswap_entry_put(struct zswap_tree *tree,
			struct zswap_entry *entry)
{
	int refcount = --entry->refcount;

	BUG_ON(refcount < 0);
	if (refcount == 0) {
		zswap_rb_erase(&tree->rbroot, entry);
347
		zswap_free_entry(entry);
348 349 350 351 352 353 354
	}
}

/* caller must hold the tree lock */
static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
				pgoff_t offset)
{
355
	struct zswap_entry *entry;
356 357 358 359 360 361 362 363

	entry = zswap_rb_search(root, offset);
	if (entry)
		zswap_entry_get(entry);

	return entry;
}

Seth Jennings's avatar
Seth Jennings committed
364 365 366 367 368
/*********************************
* per-cpu code
**********************************/
static DEFINE_PER_CPU(u8 *, zswap_dstmem);

369
static int zswap_dstmem_prepare(unsigned int cpu)
Seth Jennings's avatar
Seth Jennings committed
370 371 372
{
	u8 *dst;

373
	dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
374
	if (!dst)
375
		return -ENOMEM;
376

377 378
	per_cpu(zswap_dstmem, cpu) = dst;
	return 0;
Seth Jennings's avatar
Seth Jennings committed
379 380
}

381
static int zswap_dstmem_dead(unsigned int cpu)
Seth Jennings's avatar
Seth Jennings committed
382
{
383
	u8 *dst;
Seth Jennings's avatar
Seth Jennings committed
384

385 386 387
	dst = per_cpu(zswap_dstmem, cpu);
	kfree(dst);
	per_cpu(zswap_dstmem, cpu) = NULL;
Dan Streetman's avatar
Dan Streetman committed
388 389 390 391

	return 0;
}

392
static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
Dan Streetman's avatar
Dan Streetman committed
393
{
394
	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
Dan Streetman's avatar
Dan Streetman committed
395 396
	struct crypto_comp *tfm;

397 398
	if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
		return 0;
Dan Streetman's avatar
Dan Streetman committed
399

400 401 402 403 404 405 406
	tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
	if (IS_ERR_OR_NULL(tfm)) {
		pr_err("could not alloc crypto comp %s : %ld\n",
		       pool->tfm_name, PTR_ERR(tfm));
		return -ENOMEM;
	}
	*per_cpu_ptr(pool->tfm, cpu) = tfm;
Seth Jennings's avatar
Seth Jennings committed
407 408 409
	return 0;
}

410
static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
Dan Streetman's avatar
Dan Streetman committed
411
{
412 413
	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
	struct crypto_comp *tfm;
Dan Streetman's avatar
Dan Streetman committed
414

415 416 417 418 419
	tfm = *per_cpu_ptr(pool->tfm, cpu);
	if (!IS_ERR_OR_NULL(tfm))
		crypto_free_comp(tfm);
	*per_cpu_ptr(pool->tfm, cpu) = NULL;
	return 0;
Dan Streetman's avatar
Dan Streetman committed
420 421
}

Seth Jennings's avatar
Seth Jennings committed
422
/*********************************
Dan Streetman's avatar
Dan Streetman committed
423
* pool functions
Seth Jennings's avatar
Seth Jennings committed
424
**********************************/
Dan Streetman's avatar
Dan Streetman committed
425 426

static struct zswap_pool *__zswap_pool_current(void)
Seth Jennings's avatar
Seth Jennings committed
427
{
Dan Streetman's avatar
Dan Streetman committed
428 429 430
	struct zswap_pool *pool;

	pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
431 432
	WARN_ONCE(!pool && zswap_has_pool,
		  "%s: no page storage pool!\n", __func__);
Dan Streetman's avatar
Dan Streetman committed
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450

	return pool;
}

static struct zswap_pool *zswap_pool_current(void)
{
	assert_spin_locked(&zswap_pools_lock);

	return __zswap_pool_current();
}

static struct zswap_pool *zswap_pool_current_get(void)
{
	struct zswap_pool *pool;

	rcu_read_lock();

	pool = __zswap_pool_current();
451
	if (!zswap_pool_get(pool))
Dan Streetman's avatar
Dan Streetman committed
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
		pool = NULL;

	rcu_read_unlock();

	return pool;
}

static struct zswap_pool *zswap_pool_last_get(void)
{
	struct zswap_pool *pool, *last = NULL;

	rcu_read_lock();

	list_for_each_entry_rcu(pool, &zswap_pools, list)
		last = pool;
467 468 469
	WARN_ONCE(!last && zswap_has_pool,
		  "%s: no page storage pool!\n", __func__);
	if (!zswap_pool_get(last))
Dan Streetman's avatar
Dan Streetman committed
470 471 472 473 474 475 476
		last = NULL;

	rcu_read_unlock();

	return last;
}

477
/* type and compressor must be null-terminated */
Dan Streetman's avatar
Dan Streetman committed
478 479 480 481 482 483 484
static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
{
	struct zswap_pool *pool;

	assert_spin_locked(&zswap_pools_lock);

	list_for_each_entry_rcu(pool, &zswap_pools, list) {
485
		if (strcmp(pool->tfm_name, compressor))
Dan Streetman's avatar
Dan Streetman committed
486
			continue;
487
		if (strcmp(zpool_get_type(pool->zpool), type))
Dan Streetman's avatar
Dan Streetman committed
488 489 490 491 492 493 494 495 496 497 498 499 500
			continue;
		/* if we can't get it, it's about to be destroyed */
		if (!zswap_pool_get(pool))
			continue;
		return pool;
	}

	return NULL;
}

static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
{
	struct zswap_pool *pool;
501
	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
502
	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
503
	int ret;
Dan Streetman's avatar
Dan Streetman committed
504

505 506 507 508 509 510 511 512 513 514 515
	if (!zswap_has_pool) {
		/* if either are unset, pool initialization failed, and we
		 * need both params to be set correctly before trying to
		 * create a pool.
		 */
		if (!strcmp(type, ZSWAP_PARAM_UNSET))
			return NULL;
		if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
			return NULL;
	}

Dan Streetman's avatar
Dan Streetman committed
516
	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
517
	if (!pool)
Dan Streetman's avatar
Dan Streetman committed
518 519
		return NULL;

520 521 522 523
	/* unique name for each pool specifically required by zsmalloc */
	snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));

	pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
Dan Streetman's avatar
Dan Streetman committed
524 525 526 527 528 529 530 531 532 533 534 535 536
	if (!pool->zpool) {
		pr_err("%s zpool not available\n", type);
		goto error;
	}
	pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));

	strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
	pool->tfm = alloc_percpu(struct crypto_comp *);
	if (!pool->tfm) {
		pr_err("percpu alloc failed\n");
		goto error;
	}

537 538 539
	ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
				       &pool->node);
	if (ret)
Dan Streetman's avatar
Dan Streetman committed
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
		goto error;
	pr_debug("using %s compressor\n", pool->tfm_name);

	/* being the current pool takes 1 ref; this func expects the
	 * caller to always add the new pool as the current pool
	 */
	kref_init(&pool->kref);
	INIT_LIST_HEAD(&pool->list);

	zswap_pool_debug("created", pool);

	return pool;

error:
	free_percpu(pool->tfm);
	if (pool->zpool)
		zpool_destroy_pool(pool->zpool);
	kfree(pool);
	return NULL;
}

561
static __init struct zswap_pool *__zswap_pool_create_fallback(void)
Dan Streetman's avatar
Dan Streetman committed
562
{
563 564 565 566
	bool has_comp, has_zpool;

	has_comp = crypto_has_comp(zswap_compressor, 0, 0);
	if (!has_comp && strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
Dan Streetman's avatar
Dan Streetman committed
567 568
		pr_err("compressor %s not available, using default %s\n",
		       zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
569 570
		param_free_charp(&zswap_compressor);
		zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
571
		has_comp = crypto_has_comp(zswap_compressor, 0, 0);
Dan Streetman's avatar
Dan Streetman committed
572
	}
573 574 575 576 577 578 579 580 581
	if (!has_comp) {
		pr_err("default compressor %s not available\n",
		       zswap_compressor);
		param_free_charp(&zswap_compressor);
		zswap_compressor = ZSWAP_PARAM_UNSET;
	}

	has_zpool = zpool_has_pool(zswap_zpool_type);
	if (!has_zpool && strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
Dan Streetman's avatar
Dan Streetman committed
582 583
		pr_err("zpool %s not available, using default %s\n",
		       zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
584 585
		param_free_charp(&zswap_zpool_type);
		zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
586
		has_zpool = zpool_has_pool(zswap_zpool_type);
Dan Streetman's avatar
Dan Streetman committed
587
	}
588 589 590 591 592 593 594 595 596
	if (!has_zpool) {
		pr_err("default zpool %s not available\n",
		       zswap_zpool_type);
		param_free_charp(&zswap_zpool_type);
		zswap_zpool_type = ZSWAP_PARAM_UNSET;
	}

	if (!has_comp || !has_zpool)
		return NULL;
Dan Streetman's avatar
Dan Streetman committed
597 598 599 600 601 602 603 604

	return zswap_pool_create(zswap_zpool_type, zswap_compressor);
}

static void zswap_pool_destroy(struct zswap_pool *pool)
{
	zswap_pool_debug("destroying", pool);

605
	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
Dan Streetman's avatar
Dan Streetman committed
606 607 608 609 610 611 612
	free_percpu(pool->tfm);
	zpool_destroy_pool(pool->zpool);
	kfree(pool);
}

static int __must_check zswap_pool_get(struct zswap_pool *pool)
{
613 614 615
	if (!pool)
		return 0;

Dan Streetman's avatar
Dan Streetman committed
616 617 618
	return kref_get_unless_zero(&pool->kref);
}

619
static void __zswap_pool_release(struct work_struct *work)
Dan Streetman's avatar
Dan Streetman committed
620
{
621 622 623
	struct zswap_pool *pool = container_of(work, typeof(*pool), work);

	synchronize_rcu();
Dan Streetman's avatar
Dan Streetman committed
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642

	/* nobody should have been able to get a kref... */
	WARN_ON(kref_get_unless_zero(&pool->kref));

	/* pool is now off zswap_pools list and has no references. */
	zswap_pool_destroy(pool);
}

static void __zswap_pool_empty(struct kref *kref)
{
	struct zswap_pool *pool;

	pool = container_of(kref, typeof(*pool), kref);

	spin_lock(&zswap_pools_lock);

	WARN_ON(pool == zswap_pool_current());

	list_del_rcu(&pool->list);
643 644 645

	INIT_WORK(&pool->work, __zswap_pool_release);
	schedule_work(&pool->work);
Dan Streetman's avatar
Dan Streetman committed
646 647 648 649 650 651 652

	spin_unlock(&zswap_pools_lock);
}

static void zswap_pool_put(struct zswap_pool *pool)
{
	kref_put(&pool->kref, __zswap_pool_empty);
Seth Jennings's avatar
Seth Jennings committed
653 654
}

655 656 657 658
/*********************************
* param callbacks
**********************************/

659
/* val must be a null-terminated string */
660 661 662 663
static int __zswap_param_set(const char *val, const struct kernel_param *kp,
			     char *type, char *compressor)
{
	struct zswap_pool *pool, *put_pool = NULL;
664
	char *s = strstrip((char *)val);
665 666
	int ret;

667 668 669 670 671
	if (zswap_init_failed) {
		pr_err("can't set param, initialization failed\n");
		return -ENODEV;
	}

672
	/* no change required */
673
	if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
674
		return 0;
675 676 677 678 679

	/* if this is load-time (pre-init) param setting,
	 * don't create a pool; that's done during init.
	 */
	if (!zswap_init_started)
680
		return param_set_charp(s, kp);
681 682

	if (!type) {
683 684
		if (!zpool_has_pool(s)) {
			pr_err("zpool %s not available\n", s);
685 686
			return -ENOENT;
		}
687
		type = s;
688
	} else if (!compressor) {
689 690
		if (!crypto_has_comp(s, 0, 0)) {
			pr_err("compressor %s not available\n", s);
691 692
			return -ENOENT;
		}
693 694 695 696
		compressor = s;
	} else {
		WARN_ON(1);
		return -EINVAL;
697 698 699 700 701 702 703
	}

	spin_lock(&zswap_pools_lock);

	pool = zswap_pool_find_get(type, compressor);
	if (pool) {
		zswap_pool_debug("using existing", pool);
704
		WARN_ON(pool == zswap_pool_current());
705 706 707
		list_del_rcu(&pool->list);
	}

708 709 710 711 712
	spin_unlock(&zswap_pools_lock);

	if (!pool)
		pool = zswap_pool_create(type, compressor);

713
	if (pool)
714
		ret = param_set_charp(s, kp);
715 716 717
	else
		ret = -EINVAL;

718 719
	spin_lock(&zswap_pools_lock);

720 721 722
	if (!ret) {
		put_pool = zswap_pool_current();
		list_add_rcu(&pool->list, &zswap_pools);
723
		zswap_has_pool = true;
724 725 726 727 728 729 730
	} else if (pool) {
		/* add the possibly pre-existing pool to the end of the pools
		 * list; if it's new (and empty) then it'll be removed and
		 * destroyed by the put after we drop the lock
		 */
		list_add_tail_rcu(&pool->list, &zswap_pools);
		put_pool = pool;
731 732 733 734 735
	}

	spin_unlock(&zswap_pools_lock);

	if (!zswap_has_pool && !pool) {
736 737 738 739 740 741 742 743
		/* if initial pool creation failed, and this pool creation also
		 * failed, maybe both compressor and zpool params were bad.
		 * Allow changing this param, so pool creation will succeed
		 * when the other param is changed. We already verified this
		 * param is ok in the zpool_has_pool() or crypto_has_comp()
		 * checks above.
		 */
		ret = param_set_charp(s, kp);
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766
	}

	/* drop the ref from either the old current pool,
	 * or the new pool we failed to add
	 */
	if (put_pool)
		zswap_pool_put(put_pool);

	return ret;
}

static int zswap_compressor_param_set(const char *val,
				      const struct kernel_param *kp)
{
	return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
}

static int zswap_zpool_param_set(const char *val,
				 const struct kernel_param *kp)
{
	return __zswap_param_set(val, kp, NULL, zswap_compressor);
}

767 768 769 770 771 772 773
static int zswap_enabled_param_set(const char *val,
				   const struct kernel_param *kp)
{
	if (zswap_init_failed) {
		pr_err("can't enable, initialization failed\n");
		return -ENODEV;
	}
774 775 776 777
	if (!zswap_has_pool && zswap_init_started) {
		pr_err("can't enable, no pool configured\n");
		return -ENODEV;
	}
778 779 780 781

	return param_set_bool(val, kp);
}

Seth Jennings's avatar
Seth Jennings committed
782 783 784 785 786 787 788
/*********************************
* writeback code
**********************************/
/* return enum for zswap_get_swap_cache_page */
enum zswap_get_swap_ret {
	ZSWAP_SWAPCACHE_NEW,
	ZSWAP_SWAPCACHE_EXIST,
789
	ZSWAP_SWAPCACHE_FAIL,
Seth Jennings's avatar
Seth Jennings committed
790 791 792 793 794 795 796 797 798 799 800 801 802
};

/*
 * zswap_get_swap_cache_page
 *
 * This is an adaption of read_swap_cache_async()
 *
 * This function tries to find a page with the given swap entry
 * in the swapper_space address space (the swap cache).  If the page
 * is found, it is returned in retpage.  Otherwise, a page is allocated,
 * added to the swap cache, and returned in retpage.
 *
 * If success, the swap cache page is returned in retpage
803 804 805 806
 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
 *     the new page is added to swapcache and locked
 * Returns ZSWAP_SWAPCACHE_FAIL on error
Seth Jennings's avatar
Seth Jennings committed
807 808 809 810
 */
static int zswap_get_swap_cache_page(swp_entry_t entry,
				struct page **retpage)
{
811
	bool page_was_allocated;
Seth Jennings's avatar
Seth Jennings committed
812

813 814 815 816 817
	*retpage = __read_swap_cache_async(entry, GFP_KERNEL,
			NULL, 0, &page_was_allocated);
	if (page_was_allocated)
		return ZSWAP_SWAPCACHE_NEW;
	if (!*retpage)
818
		return ZSWAP_SWAPCACHE_FAIL;
Seth Jennings's avatar
Seth Jennings committed
819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
	return ZSWAP_SWAPCACHE_EXIST;
}

/*
 * Attempts to free an entry by adding a page to the swap cache,
 * decompressing the entry data into the page, and issuing a
 * bio write to write the page back to the swap device.
 *
 * This can be thought of as a "resumed writeback" of the page
 * to the swap device.  We are basically resuming the same swap
 * writeback path that was intercepted with the frontswap_store()
 * in the first place.  After the page has been decompressed into
 * the swap cache, the compressed version stored by zswap can be
 * freed.
 */
834
static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
Seth Jennings's avatar
Seth Jennings committed
835 836 837 838 839 840 841
{
	struct zswap_header *zhdr;
	swp_entry_t swpentry;
	struct zswap_tree *tree;
	pgoff_t offset;
	struct zswap_entry *entry;
	struct page *page;
Dan Streetman's avatar
Dan Streetman committed
842
	struct crypto_comp *tfm;
Seth Jennings's avatar
Seth Jennings committed
843 844
	u8 *src, *dst;
	unsigned int dlen;
845
	int ret;
Seth Jennings's avatar
Seth Jennings committed
846 847 848 849 850
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_NONE,
	};

	/* extract swpentry from data */
851
	zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
Seth Jennings's avatar
Seth Jennings committed
852
	swpentry = zhdr->swpentry; /* here */
853
	zpool_unmap_handle(pool, handle);
Seth Jennings's avatar
Seth Jennings committed
854 855 856 857 858
	tree = zswap_trees[swp_type(swpentry)];
	offset = swp_offset(swpentry);

	/* find and ref zswap entry */
	spin_lock(&tree->lock);
859
	entry = zswap_entry_find_get(&tree->rbroot, offset);
Seth Jennings's avatar
Seth Jennings committed
860 861 862 863 864 865 866 867 868 869
	if (!entry) {
		/* entry was invalidated */
		spin_unlock(&tree->lock);
		return 0;
	}
	spin_unlock(&tree->lock);
	BUG_ON(offset != entry->offset);

	/* try to allocate swap cache page */
	switch (zswap_get_swap_cache_page(swpentry, &page)) {
870
	case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
Seth Jennings's avatar
Seth Jennings committed
871 872 873
		ret = -ENOMEM;
		goto fail;

874
	case ZSWAP_SWAPCACHE_EXIST:
Seth Jennings's avatar
Seth Jennings committed
875
		/* page is already in the swap cache, ignore for now */
876
		put_page(page);
Seth Jennings's avatar
Seth Jennings committed
877 878 879 880 881 882
		ret = -EEXIST;
		goto fail;

	case ZSWAP_SWAPCACHE_NEW: /* page is locked */
		/* decompress */
		dlen = PAGE_SIZE;
Dan Streetman's avatar
Dan Streetman committed
883
		src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
884
				ZPOOL_MM_RO) + sizeof(struct zswap_header);
Seth Jennings's avatar
Seth Jennings committed
885
		dst = kmap_atomic(page);
Dan Streetman's avatar
Dan Streetman committed
886 887 888 889
		tfm = *get_cpu_ptr(entry->pool->tfm);
		ret = crypto_comp_decompress(tfm, src, entry->length,
					     dst, &dlen);
		put_cpu_ptr(entry->pool->tfm);
Seth Jennings's avatar
Seth Jennings committed
890
		kunmap_atomic(dst);
Dan Streetman's avatar
Dan Streetman committed
891
		zpool_unmap_handle(entry->pool->zpool, entry->handle);
Seth Jennings's avatar
Seth Jennings committed
892 893 894 895 896 897 898
		BUG_ON(ret);
		BUG_ON(dlen != PAGE_SIZE);

		/* page is up to date */
		SetPageUptodate(page);
	}

899 900 901
	/* move it to the tail of the inactive list after end_writeback */
	SetPageReclaim(page);

Seth Jennings's avatar
Seth Jennings committed
902 903
	/* start writeback */
	__swap_writepage(page, &wbc, end_swap_bio_write);
904
	put_page(page);
Seth Jennings's avatar
Seth Jennings committed
905 906 907 908
	zswap_written_back_pages++;

	spin_lock(&tree->lock);
	/* drop local reference */
909
	zswap_entry_put(tree, entry);
Seth Jennings's avatar
Seth Jennings committed
910 911

	/*
912 913 914 915 916 917 918 919
	* There are two possible situations for entry here:
	* (1) refcount is 1(normal case),  entry is valid and on the tree
	* (2) refcount is 0, entry is freed and not on the tree
	*     because invalidate happened during writeback
	*  search the tree and free the entry if find entry
	*/
	if (entry == zswap_rb_search(&tree->rbroot, offset))
		zswap_entry_put(tree, entry);
Seth Jennings's avatar
Seth Jennings committed
920 921
	spin_unlock(&tree->lock);

922 923 924 925 926 927 928 929 930
	goto end;

	/*
	* if we get here due to ZSWAP_SWAPCACHE_EXIST
	* a load may happening concurrently
	* it is safe and okay to not free the entry
	* if we free the entry in the following put
	* it it either okay to return !0
	*/
Seth Jennings's avatar
Seth Jennings committed
931 932
fail:
	spin_lock(&tree->lock);
933
	zswap_entry_put(tree, entry);
Seth Jennings's avatar
Seth Jennings committed
934
	spin_unlock(&tree->lock);
935 936

end:
Seth Jennings's avatar
Seth Jennings committed
937 938 939
	return ret;
}

Dan Streetman's avatar
Dan Streetman committed
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955
static int zswap_shrink(void)
{
	struct zswap_pool *pool;
	int ret;

	pool = zswap_pool_last_get();
	if (!pool)
		return -ENOENT;

	ret = zpool_shrink(pool->zpool, 1, NULL);

	zswap_pool_put(pool);

	return ret;
}

Seth Jennings's avatar
Seth Jennings committed
956 957 958 959 960 961 962 963 964
/*********************************
* frontswap hooks
**********************************/
/* attempts to compress and store an single page */
static int zswap_frontswap_store(unsigned type, pgoff_t offset,
				struct page *page)
{
	struct zswap_tree *tree = zswap_trees[type];
	struct zswap_entry *entry, *dupentry;
Dan Streetman's avatar
Dan Streetman committed
965
	struct crypto_comp *tfm;
Seth Jennings's avatar
Seth Jennings committed
966 967 968 969 970 971 972
	int ret;
	unsigned int dlen = PAGE_SIZE, len;
	unsigned long handle;
	char *buf;
	u8 *src, *dst;
	struct zswap_header *zhdr;

Dan Streetman's avatar
Dan Streetman committed
973
	if (!zswap_enabled || !tree) {
Seth Jennings's avatar
Seth Jennings committed
974 975 976 977 978 979 980
		ret = -ENODEV;
		goto reject;
	}

	/* reclaim space if needed */
	if (zswap_is_full()) {
		zswap_pool_limit_hit++;
Dan Streetman's avatar
Dan Streetman committed
981
		if (zswap_shrink()) {
Seth Jennings's avatar
Seth Jennings committed
982 983 984 985 986 987 988 989 990 991 992 993 994 995
			zswap_reject_reclaim_fail++;
			ret = -ENOMEM;
			goto reject;
		}
	}

	/* allocate entry */
	entry = zswap_entry_cache_alloc(GFP_KERNEL);
	if (!entry) {
		zswap_reject_kmemcache_fail++;
		ret = -ENOMEM;
		goto reject;
	}

Dan Streetman's avatar
Dan Streetman committed
996 997 998 999 1000 1001 1002
	/* if entry is successfully added, it keeps the reference */
	entry->pool = zswap_pool_current_get();
	if (!entry->pool) {
		ret = -EINVAL;
		goto freepage;
	}

Seth Jennings's avatar
Seth Jennings committed
1003 1004
	/* compress */
	dst = get_cpu_var(zswap_dstmem);
Dan Streetman's avatar
Dan Streetman committed
1005
	tfm = *get_cpu_ptr(entry->pool->tfm);
Seth Jennings's avatar
Seth Jennings committed
1006
	src = kmap_atomic(page);
Dan Streetman's avatar
Dan Streetman committed
1007
	ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
Seth Jennings's avatar
Seth Jennings committed
1008
	kunmap_atomic(src);
Dan Streetman's avatar
Dan Streetman committed
1009
	put_cpu_ptr(entry->pool->tfm);
Seth Jennings's avatar
Seth Jennings committed
1010 1011
	if (ret) {
		ret = -EINVAL;
Dan Streetman's avatar
Dan Streetman committed
1012
		goto put_dstmem;
Seth Jennings's avatar
Seth Jennings committed
1013 1014 1015 1016
	}

	/* store */
	len = dlen + sizeof(struct zswap_header);
Dan Streetman's avatar
Dan Streetman committed
1017
	ret = zpool_malloc(entry->pool->zpool, len,
1018 1019
			   __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
			   &handle);
Seth Jennings's avatar
Seth Jennings committed
1020 1021
	if (ret == -ENOSPC) {
		zswap_reject_compress_poor++;
Dan Streetman's avatar
Dan Streetman committed
1022
		goto put_dstmem;
Seth Jennings's avatar
Seth Jennings committed
1023 1024 1025
	}
	if (ret) {
		zswap_reject_alloc_fail++;
Dan Streetman's avatar
Dan Streetman committed
1026
		goto put_dstmem;
Seth Jennings's avatar
Seth Jennings committed
1027
	}
Dan Streetman's avatar
Dan Streetman committed
1028
	zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
Seth Jennings's avatar
Seth Jennings committed
1029 1030 1031
	zhdr->swpentry = swp_entry(type, offset);
	buf = (u8 *)(zhdr + 1);
	memcpy(buf, dst, dlen);
Dan Streetman's avatar
Dan Streetman committed
1032
	zpool_unmap_handle(entry->pool->zpool, handle);
Seth Jennings's avatar
Seth Jennings committed
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
	put_cpu_var(zswap_dstmem);

	/* populate entry */
	entry->offset = offset;
	entry->handle = handle;
	entry->length = dlen;

	/* map */
	spin_lock(&tree->lock);
	do {
		ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
		if (ret == -EEXIST) {
			zswap_duplicate_entry++;
			/* remove from rbtree */
1047 1048
			zswap_rb_erase(&tree->rbroot, dupentry);
			zswap_entry_put(tree, dupentry);
Seth Jennings's avatar
Seth Jennings committed
1049 1050 1051 1052 1053 1054
		}
	} while (ret == -EEXIST);
	spin_unlock(&tree->lock);

	/* update stats */
	atomic_inc(&zswap_stored_pages);
Dan Streetman's avatar
Dan Streetman committed
1055
	zswap_update_total_size();
Seth Jennings's avatar
Seth Jennings committed
1056 1057 1058

	return 0;

Dan Streetman's avatar
Dan Streetman committed
1059
put_dstmem:
Seth Jennings's avatar
Seth Jennings committed
1060
	put_cpu_var(zswap_dstmem);
Dan Streetman's avatar
Dan Streetman committed
1061 1062
	zswap_pool_put(entry->pool);
freepage:
Seth Jennings's avatar
Seth Jennings committed
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
	zswap_entry_cache_free(entry);
reject:
	return ret;
}

/*
 * returns 0 if the page was successfully decompressed
 * return -1 on entry not found or error
*/
static int zswap_frontswap_load(unsigned type, pgoff_t offset,
				struct page *page)
{
	struct zswap_tree *tree = zswap_trees[type];
	struct zswap_entry *entry;
Dan Streetman's avatar
Dan Streetman committed
1077
	struct crypto_comp *tfm;
Seth Jennings's avatar
Seth Jennings committed
1078 1079
	u8 *src, *dst;
	unsigned int dlen;
1080
	int ret;
Seth Jennings's avatar
Seth Jennings committed
1081 1082 1083

	/* find */
	spin_lock(&tree->lock);
1084
	entry = zswap_entry_find_get(&tree->rbroot, offset);
Seth Jennings's avatar
Seth Jennings committed
1085 1086 1087 1088 1089 1090 1091 1092 1093
	if (!entry) {
		/* entry was written back */
		spin_unlock(&tree->lock);
		return -1;
	}
	spin_unlock(&tree->lock);

	/* decompress */
	dlen = PAGE_SIZE;
Dan Streetman's avatar
Dan Streetman committed
1094
	src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
1095
			ZPOOL_MM_RO) + sizeof(struct zswap_header);
Seth Jennings's avatar
Seth Jennings committed
1096
	dst = kmap_atomic(page);
Dan Streetman's avatar
Dan Streetman committed
1097 1098 1099
	tfm = *get_cpu_ptr(entry->pool->tfm);
	ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
	put_cpu_ptr(entry->pool->tfm);
Seth Jennings's avatar
Seth Jennings committed
1100
	kunmap_atomic(dst);
Dan Streetman's avatar
Dan Streetman committed
1101
	zpool_unmap_handle(entry->pool->zpool, entry->handle);
Seth Jennings's avatar
Seth Jennings committed
1102 1103 1104
	BUG_ON(ret);

	spin_lock(&tree->lock);
1105
	zswap_entry_put(tree, entry);
Seth Jennings's avatar
Seth Jennings committed
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
	spin_unlock(&tree->lock);

	return 0;
}

/* frees an entry in zswap */
static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
{
	struct zswap_tree *tree = zswap_trees[type];
	struct zswap_entry *entry;

	/* find */
	spin_lock(&tree->lock);
	entry = zswap_rb_search(&tree->rbroot, offset);
	if (!entry) {
		/* entry was written back */
		spin_unlock(&tree->lock);
		return;
	}

	/* remove from rbtree */
1127
	zswap_rb_erase(&tree->rbroot, entry);
Seth Jennings's avatar
Seth Jennings committed
1128 1129

	/* drop the initial reference from entry creation */
1130
	zswap_entry_put(tree, entry);
Seth Jennings's avatar
Seth Jennings committed
1131 1132 1133 1134 1135 1136 1137 1138

	spin_unlock(&tree->lock);
}

/* frees all zswap entries for the given swap type */
static void zswap_frontswap_invalidate_area(unsigned type)
{
	struct zswap_tree *tree = zswap_trees[type];
1139
	struct zswap_entry *entry, *n;
Seth Jennings's avatar
Seth Jennings committed
1140 1141 1142 1143 1144 1145

	if (!tree)
		return;

	/* walk the tree and free everything */
	spin_lock(&tree->lock);
1146
	rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1147
		zswap_free_entry(entry);
Seth Jennings's avatar
Seth Jennings committed
1148 1149
	tree->rbroot = RB_ROOT;
	spin_unlock(&tree->lock);
1150 1151
	kfree(tree);
	zswap_trees[type] = NULL;
Seth Jennings's avatar
Seth Jennings committed
1152 1153 1154 1155 1156 1157
}

static void zswap_frontswap_init(unsigned type)
{
	struct zswap_tree *tree;

1158
	tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1159 1160 1161 1162 1163
	if (!tree) {
		pr_err("alloc failed, zswap disabled for swap type %d\n", type);
		return;
	}

Seth Jennings's avatar
Seth Jennings committed
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
	tree->rbroot = RB_ROOT;
	spin_lock_init(&tree->lock);
	zswap_trees[type] = tree;
}

static struct frontswap_ops zswap_frontswap_ops = {
	.store = zswap_frontswap_store,
	.load = zswap_frontswap_load,
	.invalidate_page = zswap_frontswap_invalidate_page,
	.invalidate_area = zswap_frontswap_invalidate_area,
	.init = zswap_frontswap_init
};

/*********************************
* debugfs functions
**********************************/
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>

static struct dentry *zswap_debugfs_root;

static int __init zswap_debugfs_init(void)
{
	if (!debugfs_initialized())
		return -ENODEV;

	zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
	if (!zswap_debugfs_root)
		return -ENOMEM;

	debugfs_create_u64("pool_limit_hit", S_IRUGO,
			zswap_debugfs_root, &zswap_pool_limit_hit);
	debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
			zswap_debugfs_root, &zswap_reject_reclaim_fail);
	debugfs_create_u64("reject_alloc_fail", S_IRUGO,
			zswap_debugfs_root, &zswap_reject_alloc_fail);
	debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
			zswap_debugfs_root, &zswap_reject_kmemcache_fail);
	debugfs_create_u64("reject_compress_poor", S_IRUGO,
			zswap_debugfs_root, &zswap_reject_compress_poor);
	debugfs_create_u64("written_back_pages", S_IRUGO,
			zswap_debugfs_root, &zswap_written_back_pages);
	debugfs_create_u64("duplicate_entry", S_IRUGO,
			zswap_debugfs_root, &zswap_duplicate_entry);
1208 1209
	debugfs_create_u64("pool_total_size", S_IRUGO,
			zswap_debugfs_root, &zswap_pool_total_size);
Seth Jennings's avatar
Seth Jennings committed
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
	debugfs_create_atomic_t("stored_pages", S_IRUGO,
			zswap_debugfs_root, &zswap_stored_pages);

	return 0;
}

static void __exit zswap_debugfs_exit(void)
{
	debugfs_remove_recursive(zswap_debugfs_root);
}
#else
static int __init zswap_debugfs_init(void)
{
	return 0;
}

static void __exit zswap_debugfs_exit(void) { }
#endif

/*********************************
* module init and exit
**********************************/
static int __init init_zswap(void)
{
Dan Streetman's avatar
Dan Streetman committed
1234
	struct zswap_pool *pool;
1235
	int ret;
1236

1237 1238
	zswap_init_started = true;

Seth Jennings's avatar
Seth Jennings committed
1239 1240
	if (zswap_entry_cache_create()) {
		pr_err("entry cache creation failed\n");
Dan Streetman's avatar
Dan Streetman committed
1241
		goto cache_fail;
Seth Jennings's avatar
Seth Jennings committed
1242
	}
Dan Streetman's avatar
Dan Streetman committed
1243

1244 1245 1246
	ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
				zswap_dstmem_prepare, zswap_dstmem_dead);
	if (ret) {
Dan Streetman's avatar
Dan Streetman committed
1247 1248
		pr_err("dstmem alloc failed\n");
		goto dstmem_fail;
Seth Jennings's avatar
Seth Jennings committed
1249
	}
Dan Streetman's avatar
Dan Streetman committed
1250

1251 1252 1253 1254 1255 1256 1257
	ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
				      "mm/zswap_pool:prepare",
				      zswap_cpu_comp_prepare,
				      zswap_cpu_comp_dead);
	if (ret)
		goto hp_fail;

Dan Streetman's avatar
Dan Streetman committed
1258
	pool = __zswap_pool_create_fallback();
1259 1260 1261 1262 1263 1264
	if (pool) {
		pr_info("loaded using pool %s/%s\n", pool->tfm_name,
			zpool_get_type(pool->zpool));
		list_add(&pool->list, &zswap_pools);
		zswap_has_pool = true;
	} else {
Dan Streetman's avatar
Dan Streetman committed
1265
		pr_err("pool creation failed\n");
1266
		zswap_enabled = false;
Seth Jennings's avatar
Seth Jennings committed
1267
	}
1268

Seth Jennings's avatar
Seth Jennings committed
1269 1270 1271 1272
	frontswap_register_ops(&zswap_frontswap_ops);
	if (zswap_debugfs_init())
		pr_warn("debugfs initialization failed\n");
	return 0;
Dan Streetman's avatar
Dan Streetman committed
1273

1274
hp_fail:
1275
	cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
Dan Streetman's avatar
Dan Streetman committed
1276
dstmem_fail:
1277
	zswap_entry_cache_destroy();
Dan Streetman's avatar
Dan Streetman committed
1278
cache_fail:
1279 1280 1281
	/* if built-in, we aren't unloaded on failure; don't allow use */
	zswap_init_failed = true;
	zswap_enabled = false;
Seth Jennings's avatar
Seth Jennings committed
1282 1283 1284 1285 1286 1287
	return -ENOMEM;
}
/* must be late so crypto has time to come up */
late_initcall(init_zswap);

MODULE_LICENSE("GPL");
1288
MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
Seth Jennings's avatar
Seth Jennings committed
1289
MODULE_DESCRIPTION("Compressed cache for swap pages");