zram_drv.c 33.8 KB
Newer Older
1
/*
2
 * Compressed RAM block device
3
 *
4
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
Minchan Kim's avatar
Minchan Kim committed
5
 *               2012, 2013 Minchan Kim
6 7 8 9 10 11 12 13 14
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 *
 */

15
#define KMSG_COMPONENT "zram"
16 17 18 19
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

#include <linux/module.h>
#include <linux/kernel.h>
20
#include <linux/bio.h>
21 22 23 24 25 26
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/highmem.h>
27
#include <linux/slab.h>
28 29
#include <linux/string.h>
#include <linux/vmalloc.h>
30
#include <linux/err.h>
31
#include <linux/idr.h>
32
#include <linux/sysfs.h>
33

34
#include "zram_drv.h"
35

36
static DEFINE_IDR(zram_index_idr);
37 38 39
/* idr index must be protected */
static DEFINE_MUTEX(zram_index_mutex);

40
static int zram_major;
41
static const char *default_compressor = "lzo";
42 43

/* Module params (documentation at end) */
44
static unsigned int num_devices = 1;
45

46 47 48 49 50 51 52 53 54
static inline void deprecated_attr_warn(const char *name)
{
	pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
			task_pid_nr(current),
			current->comm,
			name,
			"See zram documentation.");
}

55
#define ZRAM_ATTR_RO(name)						\
56
static ssize_t name##_show(struct device *d,				\
57 58 59
				struct device_attribute *attr, char *b)	\
{									\
	struct zram *zram = dev_to_zram(d);				\
60 61
									\
	deprecated_attr_warn(__stringify(name));			\
62
	return scnprintf(b, PAGE_SIZE, "%llu\n",			\
63 64
		(u64)atomic64_read(&zram->stats.name));			\
}									\
65
static DEVICE_ATTR_RO(name);
66

67
static inline bool init_done(struct zram *zram)
68
{
69
	return zram->disksize;
70 71
}

72 73 74 75 76
static inline struct zram *dev_to_zram(struct device *dev)
{
	return (struct zram *)dev_to_disk(dev)->private_data;
}

77
/* flag operations require table entry bit_spin_lock() being held */
78 79
static int zram_test_flag(struct zram_meta *meta, u32 index,
			enum zram_pageflags flag)
80
{
81 82
	return meta->table[index].value & BIT(flag);
}
83

84 85 86 87 88
static void zram_set_flag(struct zram_meta *meta, u32 index,
			enum zram_pageflags flag)
{
	meta->table[index].value |= BIT(flag);
}
89

90 91 92 93 94
static void zram_clear_flag(struct zram_meta *meta, u32 index,
			enum zram_pageflags flag)
{
	meta->table[index].value &= ~BIT(flag);
}
95

96 97 98
static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
{
	return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
99 100
}

101 102
static void zram_set_obj_size(struct zram_meta *meta,
					u32 index, size_t size)
103
{
104
	unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
105

106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
	meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
}

static inline int is_partial_io(struct bio_vec *bvec)
{
	return bvec->bv_len != PAGE_SIZE;
}

/*
 * Check if request is within bounds and aligned on zram logical blocks.
 */
static inline int valid_io_request(struct zram *zram,
		sector_t start, unsigned int size)
{
	u64 end, bound;

	/* unaligned request */
	if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
		return 0;
	if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
		return 0;

	end = start + (size >> SECTOR_SHIFT);
	bound = zram->disksize >> SECTOR_SHIFT;
	/* out of range range */
	if (unlikely(start >= bound || end > bound || start > end))
		return 0;

	/* I/O request is valid */
	return 1;
}

static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
{
	if (*offset + bvec->bv_len >= PAGE_SIZE)
		(*index)++;
	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}

static inline void update_used_max(struct zram *zram,
					const unsigned long pages)
{
	unsigned long old_max, cur_max;

	old_max = atomic_long_read(&zram->stats.max_used_pages);

	do {
		cur_max = old_max;
		if (pages > cur_max)
			old_max = atomic_long_cmpxchg(
				&zram->stats.max_used_pages, cur_max, pages);
	} while (old_max != cur_max);
}

static int page_zero_filled(void *ptr)
{
	unsigned int pos;
	unsigned long *page;

	page = (unsigned long *)ptr;

	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
		if (page[pos])
			return 0;
	}

	return 1;
}

static void handle_zero_page(struct bio_vec *bvec)
{
	struct page *page = bvec->bv_page;
	void *user_mem;

	user_mem = kmap_atomic(page);
	if (is_partial_io(bvec))
		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
	else
		clear_page(user_mem);
	kunmap_atomic(user_mem);

	flush_dcache_page(page);
188 189 190 191 192
}

static ssize_t initstate_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
193
	u32 val;
194 195
	struct zram *zram = dev_to_zram(dev);

196 197 198
	down_read(&zram->init_lock);
	val = init_done(zram);
	up_read(&zram->init_lock);
199

200
	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
201 202
}

203 204 205 206 207 208 209 210
static ssize_t disksize_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
}

211 212 213 214 215
static ssize_t orig_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

216
	deprecated_attr_warn("orig_data_size");
217
	return scnprintf(buf, PAGE_SIZE, "%llu\n",
218
		(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
219 220 221 222 223 224 225 226
}

static ssize_t mem_used_total_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);

227
	deprecated_attr_warn("mem_used_total");
228
	down_read(&zram->init_lock);
229 230
	if (init_done(zram)) {
		struct zram_meta *meta = zram->meta;
231
		val = zs_get_total_pages(meta->mem_pool);
232
	}
233 234
	up_read(&zram->init_lock);

235
	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
236 237
}

238 239 240 241 242 243
static ssize_t mem_limit_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val;
	struct zram *zram = dev_to_zram(dev);

244
	deprecated_attr_warn("mem_limit");
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
	down_read(&zram->init_lock);
	val = zram->limit_pages;
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
}

static ssize_t mem_limit_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 limit;
	char *tmp;
	struct zram *zram = dev_to_zram(dev);

	limit = memparse(buf, &tmp);
	if (buf == tmp) /* no chars parsed, invalid input */
		return -EINVAL;

	down_write(&zram->init_lock);
	zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
	up_write(&zram->init_lock);

	return len;
}

Minchan Kim's avatar
Minchan Kim committed
270 271 272 273 274 275
static ssize_t mem_used_max_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);

276
	deprecated_attr_warn("mem_used_max");
Minchan Kim's avatar
Minchan Kim committed
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
	down_read(&zram->init_lock);
	if (init_done(zram))
		val = atomic_long_read(&zram->stats.max_used_pages);
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
}

static ssize_t mem_used_max_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int err;
	unsigned long val;
	struct zram *zram = dev_to_zram(dev);

	err = kstrtoul(buf, 10, &val);
	if (err || val != 0)
		return -EINVAL;

	down_read(&zram->init_lock);
297 298
	if (init_done(zram)) {
		struct zram_meta *meta = zram->meta;
Minchan Kim's avatar
Minchan Kim committed
299 300
		atomic_long_set(&zram->stats.max_used_pages,
				zs_get_total_pages(meta->mem_pool));
301
	}
Minchan Kim's avatar
Minchan Kim committed
302 303 304 305 306
	up_read(&zram->init_lock);

	return len;
}

307 308 309 310 311 312 313 314 315 316 317 318 319
static ssize_t max_comp_streams_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	int val;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	val = zram->max_comp_streams;
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
}

320 321 322 323 324
static ssize_t max_comp_streams_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int num;
	struct zram *zram = dev_to_zram(dev);
Minchan Kim's avatar
Minchan Kim committed
325
	int ret;
326

Minchan Kim's avatar
Minchan Kim committed
327 328 329
	ret = kstrtoint(buf, 0, &num);
	if (ret < 0)
		return ret;
330 331
	if (num < 1)
		return -EINVAL;
Minchan Kim's avatar
Minchan Kim committed
332

333 334
	down_write(&zram->init_lock);
	if (init_done(zram)) {
Minchan Kim's avatar
Minchan Kim committed
335
		if (!zcomp_set_max_streams(zram->comp, num)) {
336
			pr_info("Cannot change max compression streams\n");
Minchan Kim's avatar
Minchan Kim committed
337 338 339
			ret = -EINVAL;
			goto out;
		}
340
	}
Minchan Kim's avatar
Minchan Kim committed
341

342
	zram->max_comp_streams = num;
Minchan Kim's avatar
Minchan Kim committed
343 344
	ret = len;
out:
345
	up_write(&zram->init_lock);
Minchan Kim's avatar
Minchan Kim committed
346
	return ret;
347 348
}

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
static ssize_t comp_algorithm_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	size_t sz;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	sz = zcomp_available_show(zram->compressor, buf);
	up_read(&zram->init_lock);

	return sz;
}

static ssize_t comp_algorithm_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	struct zram *zram = dev_to_zram(dev);
	down_write(&zram->init_lock);
	if (init_done(zram)) {
		up_write(&zram->init_lock);
		pr_info("Can't change algorithm for initialized device\n");
		return -EBUSY;
	}
	strlcpy(zram->compressor, buf, sizeof(zram->compressor));
	up_write(&zram->init_lock);
	return len;
}

377 378
static ssize_t compact_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
379
{
380 381 382
	unsigned long nr_migrated;
	struct zram *zram = dev_to_zram(dev);
	struct zram_meta *meta;
383

384 385 386 387 388
	down_read(&zram->init_lock);
	if (!init_done(zram)) {
		up_read(&zram->init_lock);
		return -EINVAL;
	}
389

390 391 392 393
	meta = zram->meta;
	nr_migrated = zs_compact(meta->mem_pool);
	atomic64_add(nr_migrated, &zram->stats.num_migrated);
	up_read(&zram->init_lock);
394

395
	return len;
396 397
}

398 399
static ssize_t io_stat_show(struct device *dev,
		struct device_attribute *attr, char *buf)
400
{
401 402
	struct zram *zram = dev_to_zram(dev);
	ssize_t ret;
403

404 405 406 407 408 409 410 411
	down_read(&zram->init_lock);
	ret = scnprintf(buf, PAGE_SIZE,
			"%8llu %8llu %8llu %8llu\n",
			(u64)atomic64_read(&zram->stats.failed_reads),
			(u64)atomic64_read(&zram->stats.failed_writes),
			(u64)atomic64_read(&zram->stats.invalid_io),
			(u64)atomic64_read(&zram->stats.notify_free));
	up_read(&zram->init_lock);
412

413
	return ret;
414 415
}

416 417
static ssize_t mm_stat_show(struct device *dev,
		struct device_attribute *attr, char *buf)
418
{
419 420 421 422
	struct zram *zram = dev_to_zram(dev);
	u64 orig_size, mem_used = 0;
	long max_used;
	ssize_t ret;
423

424 425 426
	down_read(&zram->init_lock);
	if (init_done(zram))
		mem_used = zs_get_total_pages(zram->meta->mem_pool);
427

428 429
	orig_size = atomic64_read(&zram->stats.pages_stored);
	max_used = atomic_long_read(&zram->stats.max_used_pages);
430

431 432 433 434 435 436 437 438 439 440
	ret = scnprintf(buf, PAGE_SIZE,
			"%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n",
			orig_size << PAGE_SHIFT,
			(u64)atomic64_read(&zram->stats.compr_data_size),
			mem_used << PAGE_SHIFT,
			zram->limit_pages << PAGE_SHIFT,
			max_used << PAGE_SHIFT,
			(u64)atomic64_read(&zram->stats.zero_pages),
			(u64)atomic64_read(&zram->stats.num_migrated));
	up_read(&zram->init_lock);
441

442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
	return ret;
}

static DEVICE_ATTR_RO(io_stat);
static DEVICE_ATTR_RO(mm_stat);
ZRAM_ATTR_RO(num_reads);
ZRAM_ATTR_RO(num_writes);
ZRAM_ATTR_RO(failed_reads);
ZRAM_ATTR_RO(failed_writes);
ZRAM_ATTR_RO(invalid_io);
ZRAM_ATTR_RO(notify_free);
ZRAM_ATTR_RO(zero_pages);
ZRAM_ATTR_RO(compr_data_size);

static inline bool zram_meta_get(struct zram *zram)
{
	if (atomic_inc_not_zero(&zram->refcount))
		return true;
	return false;
}

static inline void zram_meta_put(struct zram *zram)
{
	atomic_dec(&zram->refcount);
}

static void zram_meta_free(struct zram_meta *meta, u64 disksize)
{
	size_t num_pages = disksize >> PAGE_SHIFT;
	size_t index;
472 473 474 475 476 477 478 479 480 481 482

	/* Free all pages that are still in this zram device */
	for (index = 0; index < num_pages; index++) {
		unsigned long handle = meta->table[index].handle;

		if (!handle)
			continue;

		zs_free(meta->mem_pool, handle);
	}

483 484 485 486 487
	zs_destroy_pool(meta->mem_pool);
	vfree(meta->table);
	kfree(meta);
}

488
static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
489 490
{
	size_t num_pages;
491
	char pool_name[8];
492
	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
493

494
	if (!meta)
495
		return NULL;
496 497 498 499 500

	num_pages = disksize >> PAGE_SHIFT;
	meta->table = vzalloc(num_pages * sizeof(*meta->table));
	if (!meta->table) {
		pr_err("Error allocating zram address table\n");
501
		goto out_error;
502 503
	}

504 505
	snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
	meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
506 507
	if (!meta->mem_pool) {
		pr_err("Error creating memory pool\n");
508
		goto out_error;
509 510 511 512
	}

	return meta;

513
out_error:
514 515
	vfree(meta->table);
	kfree(meta);
516
	return NULL;
517 518
}

519 520 521 522 523
/*
 * To protect concurrent access to the same index entry,
 * caller should hold this table index entry's bit_spinlock to
 * indicate this index entry is accessing.
 */
524
static void zram_free_page(struct zram *zram, size_t index)
525
{
Minchan Kim's avatar
Minchan Kim committed
526 527
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;
528

529
	if (unlikely(!handle)) {
530 531 532 533
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
Minchan Kim's avatar
Minchan Kim committed
534 535
		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
			zram_clear_flag(meta, index, ZRAM_ZERO);
536
			atomic64_dec(&zram->stats.zero_pages);
537 538 539 540
		}
		return;
	}

Minchan Kim's avatar
Minchan Kim committed
541
	zs_free(meta->mem_pool, handle);
542

543 544
	atomic64_sub(zram_get_obj_size(meta, index),
			&zram->stats.compr_data_size);
545
	atomic64_dec(&zram->stats.pages_stored);
546

Minchan Kim's avatar
Minchan Kim committed
547
	meta->table[index].handle = 0;
548
	zram_set_obj_size(meta, index, 0);
549 550
}

551
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
552
{
553
	int ret = 0;
554
	unsigned char *cmem;
Minchan Kim's avatar
Minchan Kim committed
555
	struct zram_meta *meta = zram->meta;
Minchan Kim's avatar
Minchan Kim committed
556
	unsigned long handle;
Minchan Kim's avatar
Minchan Kim committed
557
	size_t size;
Minchan Kim's avatar
Minchan Kim committed
558

559
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim's avatar
Minchan Kim committed
560
	handle = meta->table[index].handle;
561
	size = zram_get_obj_size(meta, index);
562

Minchan Kim's avatar
Minchan Kim committed
563
	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
564
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
565
		clear_page(mem);
566 567
		return 0;
	}
568

Minchan Kim's avatar
Minchan Kim committed
569
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Minchan Kim's avatar
Minchan Kim committed
570
	if (size == PAGE_SIZE)
571
		copy_page(mem, cmem);
572
	else
573
		ret = zcomp_decompress(zram->comp, cmem, size, mem);
Minchan Kim's avatar
Minchan Kim committed
574
	zs_unmap_object(meta->mem_pool, handle);
575
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
576

577
	/* Should NEVER happen. Return bio error if it does. */
578
	if (unlikely(ret)) {
579 580
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
		return ret;
581
	}
582

583
	return 0;
584 585
}

586
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
587
			  u32 index, int offset)
588 589
{
	int ret;
590 591
	struct page *page;
	unsigned char *user_mem, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
592
	struct zram_meta *meta = zram->meta;
593 594
	page = bvec->bv_page;

595
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim's avatar
Minchan Kim committed
596 597
	if (unlikely(!meta->table[index].handle) ||
			zram_test_flag(meta, index, ZRAM_ZERO)) {
598
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
599
		handle_zero_page(bvec);
600 601
		return 0;
	}
602
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
603

604 605
	if (is_partial_io(bvec))
		/* Use  a temporary buffer to decompress the page */
606 607 608 609
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);

	user_mem = kmap_atomic(page);
	if (!is_partial_io(bvec))
610 611 612 613 614 615 616
		uncmem = user_mem;

	if (!uncmem) {
		pr_info("Unable to allocate temp memory\n");
		ret = -ENOMEM;
		goto out_cleanup;
	}
617

618
	ret = zram_decompress_page(zram, uncmem, index);
619
	/* Should NEVER happen. Return bio error if it does. */
620
	if (unlikely(ret))
621
		goto out_cleanup;
622

623 624 625 626 627 628 629 630 631 632 633
	if (is_partial_io(bvec))
		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
				bvec->bv_len);

	flush_dcache_page(page);
	ret = 0;
out_cleanup:
	kunmap_atomic(user_mem);
	if (is_partial_io(bvec))
		kfree(uncmem);
	return ret;
634 635 636 637
}

static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
638
{
639
	int ret = 0;
640
	size_t clen;
641
	unsigned long handle;
642
	struct page *page;
643
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
644
	struct zram_meta *meta = zram->meta;
645
	struct zcomp_strm *zstrm;
646
	bool locked = false;
Minchan Kim's avatar
Minchan Kim committed
647
	unsigned long alloced_pages;
648

649
	page = bvec->bv_page;
650 651 652 653 654
	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
655
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
656 657 658 659
		if (!uncmem) {
			ret = -ENOMEM;
			goto out;
		}
660
		ret = zram_decompress_page(zram, uncmem, index);
661
		if (ret)
662 663 664
			goto out;
	}

665
	zstrm = zcomp_strm_find(zram->comp);
666
	locked = true;
667
	user_mem = kmap_atomic(page);
668

669
	if (is_partial_io(bvec)) {
670 671
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
672 673 674
		kunmap_atomic(user_mem);
		user_mem = NULL;
	} else {
675
		uncmem = user_mem;
676
	}
677 678

	if (page_zero_filled(uncmem)) {
679 680
		if (user_mem)
			kunmap_atomic(user_mem);
681
		/* Free memory associated with this sector now. */
682
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
683
		zram_free_page(zram, index);
Minchan Kim's avatar
Minchan Kim committed
684
		zram_set_flag(meta, index, ZRAM_ZERO);
685
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
686

687
		atomic64_inc(&zram->stats.zero_pages);
688 689
		ret = 0;
		goto out;
690
	}
691

692
	ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
693 694 695 696 697
	if (!is_partial_io(bvec)) {
		kunmap_atomic(user_mem);
		user_mem = NULL;
		uncmem = NULL;
	}
698

699
	if (unlikely(ret)) {
700
		pr_err("Compression failed! err=%d\n", ret);
701
		goto out;
702
	}
703
	src = zstrm->buffer;
704 705
	if (unlikely(clen > max_zpage_size)) {
		clen = PAGE_SIZE;
706 707
		if (is_partial_io(bvec))
			src = uncmem;
708
	}
709

Minchan Kim's avatar
Minchan Kim committed
710
	handle = zs_malloc(meta->mem_pool, clen);
711
	if (!handle) {
712 713
		pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
			index, clen);
714 715
		ret = -ENOMEM;
		goto out;
716
	}
717

Minchan Kim's avatar
Minchan Kim committed
718 719
	alloced_pages = zs_get_total_pages(meta->mem_pool);
	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
720 721 722 723 724
		zs_free(meta->mem_pool, handle);
		ret = -ENOMEM;
		goto out;
	}

Minchan Kim's avatar
Minchan Kim committed
725 726
	update_used_max(zram, alloced_pages);

Minchan Kim's avatar
Minchan Kim committed
727
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
728

729
	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
730
		src = kmap_atomic(page);
731
		copy_page(cmem, src);
732
		kunmap_atomic(src);
733 734 735
	} else {
		memcpy(cmem, src, clen);
	}
736

737 738
	zcomp_strm_release(zram->comp, zstrm);
	locked = false;
Minchan Kim's avatar
Minchan Kim committed
739
	zs_unmap_object(meta->mem_pool, handle);
740

741 742 743 744
	/*
	 * Free memory associated with this sector
	 * before overwriting unused sectors.
	 */
745
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
746 747
	zram_free_page(zram, index);

Minchan Kim's avatar
Minchan Kim committed
748
	meta->table[index].handle = handle;
749 750
	zram_set_obj_size(meta, index, clen);
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
751

752
	/* Update stats */
753 754
	atomic64_add(clen, &zram->stats.compr_data_size);
	atomic64_inc(&zram->stats.pages_stored);
755
out:
756
	if (locked)
757
		zcomp_strm_release(zram->comp, zstrm);
758 759
	if (is_partial_io(bvec))
		kfree(uncmem);
760
	return ret;
761 762
}

Joonsoo Kim's avatar
Joonsoo Kim committed
763 764 765 766 767 768 769 770 771
/*
 * zram_bio_discard - handler on discard request
 * @index: physical block index in PAGE_SIZE units
 * @offset: byte offset within physical block
 */
static void zram_bio_discard(struct zram *zram, u32 index,
			     int offset, struct bio *bio)
{
	size_t n = bio->bi_iter.bi_size;
772
	struct zram_meta *meta = zram->meta;
Joonsoo Kim's avatar
Joonsoo Kim committed
773 774 775 776 777 778 779 780 781 782 783 784

	/*
	 * zram manages data in physical block size units. Because logical block
	 * size isn't identical with physical block size on some arch, we
	 * could get a discard request pointing to a specific offset within a
	 * certain physical block.  Although we can handle this request by
	 * reading that physiclal block and decompressing and partially zeroing
	 * and re-compressing and then re-storing it, this isn't reasonable
	 * because our intent with a discard request is to save memory.  So
	 * skipping this logical block is appropriate here.
	 */
	if (offset) {
785
		if (n <= (PAGE_SIZE - offset))
Joonsoo Kim's avatar
Joonsoo Kim committed
786 787
			return;

788
		n -= (PAGE_SIZE - offset);
Joonsoo Kim's avatar
Joonsoo Kim committed
789 790 791 792
		index++;
	}

	while (n >= PAGE_SIZE) {
793
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kim's avatar
Joonsoo Kim committed
794
		zram_free_page(zram, index);
795
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
796
		atomic64_inc(&zram->stats.notify_free);
Joonsoo Kim's avatar
Joonsoo Kim committed
797 798 799 800 801
		index++;
		n -= PAGE_SIZE;
	}
}

802 803
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
			int offset, int rw)
804
{
805
	unsigned long start_time = jiffies;
806 807
	int ret;

808 809
	generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
			&zram->disk->part0);
810

811 812 813 814 815 816
	if (rw == READ) {
		atomic64_inc(&zram->stats.num_reads);
		ret = zram_bvec_read(zram, bvec, index, offset);
	} else {
		atomic64_inc(&zram->stats.num_writes);
		ret = zram_bvec_write(zram, bvec, index, offset);
817
	}
818

819
	generic_end_io_acct(rw, &zram->disk->part0, start_time);
820

821 822 823 824 825
	if (unlikely(ret)) {
		if (rw == READ)
			atomic64_inc(&zram->stats.failed_reads);
		else
			atomic64_inc(&zram->stats.failed_writes);
826
	}
827

828
	return ret;
829 830
}

831
static void __zram_make_request(struct zram *zram, struct bio *bio)
832
{
833
	int offset, rw;
834
	u32 index;
835 836
	struct bio_vec bvec;
	struct bvec_iter iter;
837

838 839 840
	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
	offset = (bio->bi_iter.bi_sector &
		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
841

Joonsoo Kim's avatar
Joonsoo Kim committed
842 843 844 845 846 847
	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
		zram_bio_discard(zram, index, offset, bio);
		bio_endio(bio, 0);
		return;
	}

848
	rw = bio_data_dir(bio);
849
	bio_for_each_segment(bvec, bio, iter) {
850 851
		int max_transfer_size = PAGE_SIZE - offset;

852
		if (bvec.bv_len > max_transfer_size) {
853 854 855 856 857 858
			/*
			 * zram_bvec_rw() can only make operation on a single
			 * zram page. Split the bio vector.
			 */
			struct bio_vec bv;

859
			bv.bv_page = bvec.bv_page;
860
			bv.bv_len = max_transfer_size;
861
			bv.bv_offset = bvec.bv_offset;
862

863
			if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
864 865
				goto out;

866
			bv.bv_len = bvec.bv_len - max_transfer_size;
867
			bv.bv_offset += max_transfer_size;
868
			if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
869 870
				goto out;
		} else
871
			if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
872 873
				goto out;

874
		update_position(&index, &offset, &bvec);
875
	}
876 877 878

	set_bit(BIO_UPTODATE, &bio->bi_flags);
	bio_endio(bio, 0);
879
	return;
880 881 882 883 884 885

out:
	bio_io_error(bio);
}

/*
886
 * Handler function for all zram I/O requests.
887
 */
888
static void zram_make_request(struct request_queue *queue, struct bio *bio)
889
{
890
	struct zram *zram = queue->queuedata;
891

892
	if (unlikely(!zram_meta_get(zram)))
893
		goto error;
894

895 896
	if (!valid_io_request(zram, bio->bi_iter.bi_sector,
					bio->bi_iter.bi_size)) {
897
		atomic64_inc(&zram->stats.invalid_io);
898
		goto put_zram;
899 900
	}

901
	__zram_make_request(zram, bio);
902
	zram_meta_put(zram);
903
	return;
904 905
put_zram:
	zram_meta_put(zram);
906 907
error:
	bio_io_error(bio);
908 909
}

Nitin Gupta's avatar
Nitin Gupta committed
910 911
static void zram_slot_free_notify(struct block_device *bdev,
				unsigned long index)
912
{
913
	struct zram *zram;
914
	struct zram_meta *meta;
915

916
	zram = bdev->bd_disk->private_data;
917
	meta = zram->meta;
918

919
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
920
	zram_free_page(zram, index);
921
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
922
	atomic64_inc(&zram->stats.notify_free);
923 924
}

925 926 927
static int zram_rw_page(struct block_device *bdev, sector_t sector,
		       struct page *page, int rw)
{
928
	int offset, err = -EIO;
929 930 931 932 933
	u32 index;
	struct zram *zram;
	struct bio_vec bv;

	zram = bdev->bd_disk->private_data;
934 935 936
	if (unlikely(!zram_meta_get(zram)))
		goto out;

937 938
	if (!valid_io_request(zram, sector, PAGE_SIZE)) {
		atomic64_inc(&zram->stats.invalid_io);
939 940
		err = -EINVAL;
		goto put_zram;
941 942 943 944 945 946 947 948 949 950
	}

	index = sector >> SECTORS_PER_PAGE_SHIFT;
	offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;

	bv.bv_page = page;
	bv.bv_len = PAGE_SIZE;
	bv.bv_offset = 0;

	err = zram_bvec_rw(