zram_drv.c 33.9 KB
Newer Older
1
/*
2
 * Compressed RAM block device
3
 *
4
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
Minchan Kim's avatar
Minchan Kim committed
5
 *               2012, 2013 Minchan Kim
6 7 8 9 10 11 12 13 14
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 *
 */

15
#define KMSG_COMPONENT "zram"
16 17 18 19
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

#include <linux/module.h>
#include <linux/kernel.h>
20
#include <linux/bio.h>
21 22 23 24 25 26
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/highmem.h>
27
#include <linux/slab.h>
28 29
#include <linux/string.h>
#include <linux/vmalloc.h>
30
#include <linux/err.h>
31
#include <linux/idr.h>
32
#include <linux/sysfs.h>
33

34
#include "zram_drv.h"
35

36
static DEFINE_IDR(zram_index_idr);
37 38 39
/* idr index must be protected */
static DEFINE_MUTEX(zram_index_mutex);

40
static int zram_major;
41
static const char *default_compressor = "lzo";
42 43

/* Module params (documentation at end) */
44
static unsigned int num_devices = 1;
45

46 47 48 49 50 51 52 53 54
static inline void deprecated_attr_warn(const char *name)
{
	pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
			task_pid_nr(current),
			current->comm,
			name,
			"See zram documentation.");
}

55
#define ZRAM_ATTR_RO(name)						\
56
static ssize_t name##_show(struct device *d,				\
57 58 59
				struct device_attribute *attr, char *b)	\
{									\
	struct zram *zram = dev_to_zram(d);				\
60 61
									\
	deprecated_attr_warn(__stringify(name));			\
62
	return scnprintf(b, PAGE_SIZE, "%llu\n",			\
63 64
		(u64)atomic64_read(&zram->stats.name));			\
}									\
65
static DEVICE_ATTR_RO(name);
66

67
static inline bool init_done(struct zram *zram)
68
{
69
	return zram->disksize;
70 71
}

72 73 74 75 76
static inline struct zram *dev_to_zram(struct device *dev)
{
	return (struct zram *)dev_to_disk(dev)->private_data;
}

77
/* flag operations require table entry bit_spin_lock() being held */
78 79
static int zram_test_flag(struct zram_meta *meta, u32 index,
			enum zram_pageflags flag)
80
{
81 82
	return meta->table[index].value & BIT(flag);
}
83

84 85 86 87 88
static void zram_set_flag(struct zram_meta *meta, u32 index,
			enum zram_pageflags flag)
{
	meta->table[index].value |= BIT(flag);
}
89

90 91 92 93 94
static void zram_clear_flag(struct zram_meta *meta, u32 index,
			enum zram_pageflags flag)
{
	meta->table[index].value &= ~BIT(flag);
}
95

96 97 98
static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
{
	return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
99 100
}

101 102
static void zram_set_obj_size(struct zram_meta *meta,
					u32 index, size_t size)
103
{
104
	unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
105

106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
	meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
}

static inline int is_partial_io(struct bio_vec *bvec)
{
	return bvec->bv_len != PAGE_SIZE;
}

/*
 * Check if request is within bounds and aligned on zram logical blocks.
 */
static inline int valid_io_request(struct zram *zram,
		sector_t start, unsigned int size)
{
	u64 end, bound;

	/* unaligned request */
	if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
		return 0;
	if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
		return 0;

	end = start + (size >> SECTOR_SHIFT);
	bound = zram->disksize >> SECTOR_SHIFT;
	/* out of range range */
	if (unlikely(start >= bound || end > bound || start > end))
		return 0;

	/* I/O request is valid */
	return 1;
}

static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
{
	if (*offset + bvec->bv_len >= PAGE_SIZE)
		(*index)++;
	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}

static inline void update_used_max(struct zram *zram,
					const unsigned long pages)
{
	unsigned long old_max, cur_max;

	old_max = atomic_long_read(&zram->stats.max_used_pages);

	do {
		cur_max = old_max;
		if (pages > cur_max)
			old_max = atomic_long_cmpxchg(
				&zram->stats.max_used_pages, cur_max, pages);
	} while (old_max != cur_max);
}

static int page_zero_filled(void *ptr)
{
	unsigned int pos;
	unsigned long *page;

	page = (unsigned long *)ptr;

	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
		if (page[pos])
			return 0;
	}

	return 1;
}

static void handle_zero_page(struct bio_vec *bvec)
{
	struct page *page = bvec->bv_page;
	void *user_mem;

	user_mem = kmap_atomic(page);
	if (is_partial_io(bvec))
		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
	else
		clear_page(user_mem);
	kunmap_atomic(user_mem);

	flush_dcache_page(page);
188 189 190 191 192
}

static ssize_t initstate_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
193
	u32 val;
194 195
	struct zram *zram = dev_to_zram(dev);

196 197 198
	down_read(&zram->init_lock);
	val = init_done(zram);
	up_read(&zram->init_lock);
199

200
	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
201 202
}

203 204 205 206 207 208 209 210
static ssize_t disksize_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
}

211 212 213 214 215
static ssize_t orig_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

216
	deprecated_attr_warn("orig_data_size");
217
	return scnprintf(buf, PAGE_SIZE, "%llu\n",
218
		(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
219 220 221 222 223 224 225 226
}

static ssize_t mem_used_total_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);

227
	deprecated_attr_warn("mem_used_total");
228
	down_read(&zram->init_lock);
229 230
	if (init_done(zram)) {
		struct zram_meta *meta = zram->meta;
231
		val = zs_get_total_pages(meta->mem_pool);
232
	}
233 234
	up_read(&zram->init_lock);

235
	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
236 237
}

238 239 240 241 242 243
static ssize_t mem_limit_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val;
	struct zram *zram = dev_to_zram(dev);

244
	deprecated_attr_warn("mem_limit");
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
	down_read(&zram->init_lock);
	val = zram->limit_pages;
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
}

static ssize_t mem_limit_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 limit;
	char *tmp;
	struct zram *zram = dev_to_zram(dev);

	limit = memparse(buf, &tmp);
	if (buf == tmp) /* no chars parsed, invalid input */
		return -EINVAL;

	down_write(&zram->init_lock);
	zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
	up_write(&zram->init_lock);

	return len;
}

Minchan Kim's avatar
Minchan Kim committed
270 271 272 273 274 275
static ssize_t mem_used_max_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);

276
	deprecated_attr_warn("mem_used_max");
Minchan Kim's avatar
Minchan Kim committed
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
	down_read(&zram->init_lock);
	if (init_done(zram))
		val = atomic_long_read(&zram->stats.max_used_pages);
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
}

static ssize_t mem_used_max_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int err;
	unsigned long val;
	struct zram *zram = dev_to_zram(dev);

	err = kstrtoul(buf, 10, &val);
	if (err || val != 0)
		return -EINVAL;

	down_read(&zram->init_lock);
297 298
	if (init_done(zram)) {
		struct zram_meta *meta = zram->meta;
Minchan Kim's avatar
Minchan Kim committed
299 300
		atomic_long_set(&zram->stats.max_used_pages,
				zs_get_total_pages(meta->mem_pool));
301
	}
Minchan Kim's avatar
Minchan Kim committed
302 303 304 305 306
	up_read(&zram->init_lock);

	return len;
}

307 308 309 310 311 312 313 314 315 316 317 318 319
static ssize_t max_comp_streams_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	int val;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	val = zram->max_comp_streams;
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
}

320 321 322 323 324
static ssize_t max_comp_streams_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int num;
	struct zram *zram = dev_to_zram(dev);
Minchan Kim's avatar
Minchan Kim committed
325
	int ret;
326

Minchan Kim's avatar
Minchan Kim committed
327 328 329
	ret = kstrtoint(buf, 0, &num);
	if (ret < 0)
		return ret;
330 331
	if (num < 1)
		return -EINVAL;
Minchan Kim's avatar
Minchan Kim committed
332

333 334
	down_write(&zram->init_lock);
	if (init_done(zram)) {
Minchan Kim's avatar
Minchan Kim committed
335
		if (!zcomp_set_max_streams(zram->comp, num)) {
336
			pr_info("Cannot change max compression streams\n");
Minchan Kim's avatar
Minchan Kim committed
337 338 339
			ret = -EINVAL;
			goto out;
		}
340
	}
Minchan Kim's avatar
Minchan Kim committed
341

342
	zram->max_comp_streams = num;
Minchan Kim's avatar
Minchan Kim committed
343 344
	ret = len;
out:
345
	up_write(&zram->init_lock);
Minchan Kim's avatar
Minchan Kim committed
346
	return ret;
347 348
}

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
static ssize_t comp_algorithm_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	size_t sz;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	sz = zcomp_available_show(zram->compressor, buf);
	up_read(&zram->init_lock);

	return sz;
}

static ssize_t comp_algorithm_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	struct zram *zram = dev_to_zram(dev);
366 367
	size_t sz;

368 369 370 371 372 373 374
	down_write(&zram->init_lock);
	if (init_done(zram)) {
		up_write(&zram->init_lock);
		pr_info("Can't change algorithm for initialized device\n");
		return -EBUSY;
	}
	strlcpy(zram->compressor, buf, sizeof(zram->compressor));
375 376 377 378 379 380

	/* ignore trailing newline */
	sz = strlen(zram->compressor);
	if (sz > 0 && zram->compressor[sz - 1] == '\n')
		zram->compressor[sz - 1] = 0x00;

381 382 383
	if (!zcomp_available_algorithm(zram->compressor))
		len = -EINVAL;

384 385 386 387
	up_write(&zram->init_lock);
	return len;
}

388 389
static ssize_t compact_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
390
{
391 392 393
	unsigned long nr_migrated;
	struct zram *zram = dev_to_zram(dev);
	struct zram_meta *meta;
394

395 396 397 398 399
	down_read(&zram->init_lock);
	if (!init_done(zram)) {
		up_read(&zram->init_lock);
		return -EINVAL;
	}
400

401 402 403 404
	meta = zram->meta;
	nr_migrated = zs_compact(meta->mem_pool);
	atomic64_add(nr_migrated, &zram->stats.num_migrated);
	up_read(&zram->init_lock);
405

406
	return len;
407 408
}

409 410
static ssize_t io_stat_show(struct device *dev,
		struct device_attribute *attr, char *buf)
411
{
412 413
	struct zram *zram = dev_to_zram(dev);
	ssize_t ret;
414

415 416 417 418 419 420 421 422
	down_read(&zram->init_lock);
	ret = scnprintf(buf, PAGE_SIZE,
			"%8llu %8llu %8llu %8llu\n",
			(u64)atomic64_read(&zram->stats.failed_reads),
			(u64)atomic64_read(&zram->stats.failed_writes),
			(u64)atomic64_read(&zram->stats.invalid_io),
			(u64)atomic64_read(&zram->stats.notify_free));
	up_read(&zram->init_lock);
423

424
	return ret;
425 426
}

427 428
static ssize_t mm_stat_show(struct device *dev,
		struct device_attribute *attr, char *buf)
429
{
430 431 432 433
	struct zram *zram = dev_to_zram(dev);
	u64 orig_size, mem_used = 0;
	long max_used;
	ssize_t ret;
434

435 436 437
	down_read(&zram->init_lock);
	if (init_done(zram))
		mem_used = zs_get_total_pages(zram->meta->mem_pool);
438

439 440
	orig_size = atomic64_read(&zram->stats.pages_stored);
	max_used = atomic_long_read(&zram->stats.max_used_pages);
441

442 443 444 445 446 447 448 449 450 451
	ret = scnprintf(buf, PAGE_SIZE,
			"%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n",
			orig_size << PAGE_SHIFT,
			(u64)atomic64_read(&zram->stats.compr_data_size),
			mem_used << PAGE_SHIFT,
			zram->limit_pages << PAGE_SHIFT,
			max_used << PAGE_SHIFT,
			(u64)atomic64_read(&zram->stats.zero_pages),
			(u64)atomic64_read(&zram->stats.num_migrated));
	up_read(&zram->init_lock);
452

453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
	return ret;
}

static DEVICE_ATTR_RO(io_stat);
static DEVICE_ATTR_RO(mm_stat);
ZRAM_ATTR_RO(num_reads);
ZRAM_ATTR_RO(num_writes);
ZRAM_ATTR_RO(failed_reads);
ZRAM_ATTR_RO(failed_writes);
ZRAM_ATTR_RO(invalid_io);
ZRAM_ATTR_RO(notify_free);
ZRAM_ATTR_RO(zero_pages);
ZRAM_ATTR_RO(compr_data_size);

static inline bool zram_meta_get(struct zram *zram)
{
	if (atomic_inc_not_zero(&zram->refcount))
		return true;
	return false;
}

static inline void zram_meta_put(struct zram *zram)
{
	atomic_dec(&zram->refcount);
}

static void zram_meta_free(struct zram_meta *meta, u64 disksize)
{
	size_t num_pages = disksize >> PAGE_SHIFT;
	size_t index;
483 484 485 486 487 488 489 490 491 492 493

	/* Free all pages that are still in this zram device */
	for (index = 0; index < num_pages; index++) {
		unsigned long handle = meta->table[index].handle;

		if (!handle)
			continue;

		zs_free(meta->mem_pool, handle);
	}

494 495 496 497 498
	zs_destroy_pool(meta->mem_pool);
	vfree(meta->table);
	kfree(meta);
}

499
static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
500 501
{
	size_t num_pages;
502
	char pool_name[8];
503
	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
504

505
	if (!meta)
506
		return NULL;
507 508 509 510 511

	num_pages = disksize >> PAGE_SHIFT;
	meta->table = vzalloc(num_pages * sizeof(*meta->table));
	if (!meta->table) {
		pr_err("Error allocating zram address table\n");
512
		goto out_error;
513 514
	}

515 516
	snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
	meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
517 518
	if (!meta->mem_pool) {
		pr_err("Error creating memory pool\n");
519
		goto out_error;
520 521 522 523
	}

	return meta;

524
out_error:
525 526
	vfree(meta->table);
	kfree(meta);
527
	return NULL;
528 529
}

530 531 532 533 534
/*
 * To protect concurrent access to the same index entry,
 * caller should hold this table index entry's bit_spinlock to
 * indicate this index entry is accessing.
 */
535
static void zram_free_page(struct zram *zram, size_t index)
536
{
Minchan Kim's avatar
Minchan Kim committed
537 538
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;
539

540
	if (unlikely(!handle)) {
541 542 543 544
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
Minchan Kim's avatar
Minchan Kim committed
545 546
		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
			zram_clear_flag(meta, index, ZRAM_ZERO);
547
			atomic64_dec(&zram->stats.zero_pages);
548 549 550 551
		}
		return;
	}

Minchan Kim's avatar
Minchan Kim committed
552
	zs_free(meta->mem_pool, handle);
553

554 555
	atomic64_sub(zram_get_obj_size(meta, index),
			&zram->stats.compr_data_size);
556
	atomic64_dec(&zram->stats.pages_stored);
557

Minchan Kim's avatar
Minchan Kim committed
558
	meta->table[index].handle = 0;
559
	zram_set_obj_size(meta, index, 0);
560 561
}

562
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
563
{
564
	int ret = 0;
565
	unsigned char *cmem;
Minchan Kim's avatar
Minchan Kim committed
566
	struct zram_meta *meta = zram->meta;
Minchan Kim's avatar
Minchan Kim committed
567
	unsigned long handle;
Minchan Kim's avatar
Minchan Kim committed
568
	size_t size;
Minchan Kim's avatar
Minchan Kim committed
569

570
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim's avatar
Minchan Kim committed
571
	handle = meta->table[index].handle;
572
	size = zram_get_obj_size(meta, index);
573

Minchan Kim's avatar
Minchan Kim committed
574
	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
575
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
576
		clear_page(mem);
577 578
		return 0;
	}
579

Minchan Kim's avatar
Minchan Kim committed
580
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Minchan Kim's avatar
Minchan Kim committed
581
	if (size == PAGE_SIZE)
582
		copy_page(mem, cmem);
583
	else
584
		ret = zcomp_decompress(zram->comp, cmem, size, mem);
Minchan Kim's avatar
Minchan Kim committed
585
	zs_unmap_object(meta->mem_pool, handle);
586
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
587

588
	/* Should NEVER happen. Return bio error if it does. */
589
	if (unlikely(ret)) {
590 591
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
		return ret;
592
	}
593

594
	return 0;
595 596
}

597
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
598
			  u32 index, int offset)
599 600
{
	int ret;
601 602
	struct page *page;
	unsigned char *user_mem, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
603
	struct zram_meta *meta = zram->meta;
604 605
	page = bvec->bv_page;

606
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim's avatar
Minchan Kim committed
607 608
	if (unlikely(!meta->table[index].handle) ||
			zram_test_flag(meta, index, ZRAM_ZERO)) {
609
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
610
		handle_zero_page(bvec);
611 612
		return 0;
	}
613
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
614

615 616
	if (is_partial_io(bvec))
		/* Use  a temporary buffer to decompress the page */
617 618 619 620
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);

	user_mem = kmap_atomic(page);
	if (!is_partial_io(bvec))
621 622 623 624 625 626 627
		uncmem = user_mem;

	if (!uncmem) {
		pr_info("Unable to allocate temp memory\n");
		ret = -ENOMEM;
		goto out_cleanup;
	}
628

629
	ret = zram_decompress_page(zram, uncmem, index);
630
	/* Should NEVER happen. Return bio error if it does. */
631
	if (unlikely(ret))
632
		goto out_cleanup;
633

634 635 636 637 638 639 640 641 642 643 644
	if (is_partial_io(bvec))
		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
				bvec->bv_len);

	flush_dcache_page(page);
	ret = 0;
out_cleanup:
	kunmap_atomic(user_mem);
	if (is_partial_io(bvec))
		kfree(uncmem);
	return ret;
645 646 647 648
}

static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
649
{
650
	int ret = 0;
651
	size_t clen;
652
	unsigned long handle;
653
	struct page *page;
654
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
655
	struct zram_meta *meta = zram->meta;
656
	struct zcomp_strm *zstrm = NULL;
Minchan Kim's avatar
Minchan Kim committed
657
	unsigned long alloced_pages;
658

659
	page = bvec->bv_page;
660 661 662 663 664
	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
665
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
666 667 668 669
		if (!uncmem) {
			ret = -ENOMEM;
			goto out;
		}
670
		ret = zram_decompress_page(zram, uncmem, index);
671
		if (ret)
672 673 674
			goto out;
	}

675
	zstrm = zcomp_strm_find(zram->comp);
676
	user_mem = kmap_atomic(page);
677

678
	if (is_partial_io(bvec)) {
679 680
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
681 682 683
		kunmap_atomic(user_mem);
		user_mem = NULL;
	} else {
684
		uncmem = user_mem;
685
	}
686 687

	if (page_zero_filled(uncmem)) {
688 689
		if (user_mem)
			kunmap_atomic(user_mem);
690
		/* Free memory associated with this sector now. */
691
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
692
		zram_free_page(zram, index);
Minchan Kim's avatar
Minchan Kim committed
693
		zram_set_flag(meta, index, ZRAM_ZERO);
694
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
695

696
		atomic64_inc(&zram->stats.zero_pages);
697 698
		ret = 0;
		goto out;
699
	}
700

701
	ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
702 703 704 705 706
	if (!is_partial_io(bvec)) {
		kunmap_atomic(user_mem);
		user_mem = NULL;
		uncmem = NULL;
	}
707

708
	if (unlikely(ret)) {
709
		pr_err("Compression failed! err=%d\n", ret);
710
		goto out;
711
	}
712
	src = zstrm->buffer;
713 714
	if (unlikely(clen > max_zpage_size)) {
		clen = PAGE_SIZE;
715 716
		if (is_partial_io(bvec))
			src = uncmem;
717
	}
718

Minchan Kim's avatar
Minchan Kim committed
719
	handle = zs_malloc(meta->mem_pool, clen);
720
	if (!handle) {
721 722
		pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
			index, clen);
723 724
		ret = -ENOMEM;
		goto out;
725
	}
726

Minchan Kim's avatar
Minchan Kim committed
727 728
	alloced_pages = zs_get_total_pages(meta->mem_pool);
	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
729 730 731 732 733
		zs_free(meta->mem_pool, handle);
		ret = -ENOMEM;
		goto out;
	}

Minchan Kim's avatar
Minchan Kim committed
734 735
	update_used_max(zram, alloced_pages);

Minchan Kim's avatar
Minchan Kim committed
736
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
737

738
	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
739
		src = kmap_atomic(page);
740
		copy_page(cmem, src);
741
		kunmap_atomic(src);
742 743 744
	} else {
		memcpy(cmem, src, clen);
	}
745

746
	zcomp_strm_release(zram->comp, zstrm);
747
	zstrm = NULL;
Minchan Kim's avatar
Minchan Kim committed
748
	zs_unmap_object(meta->mem_pool, handle);
749

750 751 752 753
	/*
	 * Free memory associated with this sector
	 * before overwriting unused sectors.
	 */
754
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
755 756
	zram_free_page(zram, index);

Minchan Kim's avatar
Minchan Kim committed
757
	meta->table[index].handle = handle;
758 759
	zram_set_obj_size(meta, index, clen);
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
760

761
	/* Update stats */
762 763
	atomic64_add(clen, &zram->stats.compr_data_size);
	atomic64_inc(&zram->stats.pages_stored);
764
out:
765
	if (zstrm)
766
		zcomp_strm_release(zram->comp, zstrm);
767 768
	if (is_partial_io(bvec))
		kfree(uncmem);
769
	return ret;
770 771
}

Joonsoo Kim's avatar
Joonsoo Kim committed
772 773 774 775 776 777 778 779 780
/*
 * zram_bio_discard - handler on discard request
 * @index: physical block index in PAGE_SIZE units
 * @offset: byte offset within physical block
 */
static void zram_bio_discard(struct zram *zram, u32 index,
			     int offset, struct bio *bio)
{
	size_t n = bio->bi_iter.bi_size;
781
	struct zram_meta *meta = zram->meta;
Joonsoo Kim's avatar
Joonsoo Kim committed
782 783 784 785 786 787 788 789 790 791 792 793

	/*
	 * zram manages data in physical block size units. Because logical block
	 * size isn't identical with physical block size on some arch, we
	 * could get a discard request pointing to a specific offset within a
	 * certain physical block.  Although we can handle this request by
	 * reading that physiclal block and decompressing and partially zeroing
	 * and re-compressing and then re-storing it, this isn't reasonable
	 * because our intent with a discard request is to save memory.  So
	 * skipping this logical block is appropriate here.
	 */
	if (offset) {
794
		if (n <= (PAGE_SIZE - offset))
Joonsoo Kim's avatar
Joonsoo Kim committed
795 796
			return;

797
		n -= (PAGE_SIZE - offset);
Joonsoo Kim's avatar
Joonsoo Kim committed
798 799 800 801
		index++;
	}

	while (n >= PAGE_SIZE) {
802
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kim's avatar
Joonsoo Kim committed
803
		zram_free_page(zram, index);
804
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
805
		atomic64_inc(&zram->stats.notify_free);
Joonsoo Kim's avatar
Joonsoo Kim committed
806 807 808 809 810
		index++;
		n -= PAGE_SIZE;
	}
}

811 812
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
			int offset, int rw)
813
{
814
	unsigned long start_time = jiffies;
815 816
	int ret;

817 818
	generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
			&zram->disk->part0);
819

820 821 822 823 824 825
	if (rw == READ) {
		atomic64_inc(&zram->stats.num_reads);
		ret = zram_bvec_read(zram, bvec, index, offset);
	} else {
		atomic64_inc(&zram->stats.num_writes);
		ret = zram_bvec_write(zram, bvec, index, offset);
826
	}
827

828
	generic_end_io_acct(rw, &zram->disk->part0, start_time);
829

830 831 832 833 834
	if (unlikely(ret)) {
		if (rw == READ)
			atomic64_inc(&zram->stats.failed_reads);
		else
			atomic64_inc(&zram->stats.failed_writes);
835
	}
836

837
	return ret;
838 839
}

840
static void __zram_make_request(struct zram *zram, struct bio *bio)
841
{
842
	int offset, rw;
843
	u32 index;
844 845
	struct bio_vec bvec;
	struct bvec_iter iter;
846

847 848 849
	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
	offset = (bio->bi_iter.bi_sector &
		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
850

Joonsoo Kim's avatar
Joonsoo Kim committed
851 852
	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
		zram_bio_discard(zram, index, offset, bio);
853
		bio_endio(bio);
Joonsoo Kim's avatar
Joonsoo Kim committed
854 855 856
		return;
	}

857
	rw = bio_data_dir(bio);
858
	bio_for_each_segment(bvec, bio, iter) {
859 860
		int max_transfer_size = PAGE_SIZE - offset;

861
		if (bvec.bv_len > max_transfer_size) {
862 863 864 865 866 867
			/*
			 * zram_bvec_rw() can only make operation on a single
			 * zram page. Split the bio vector.
			 */
			struct bio_vec bv;

868
			bv.bv_page = bvec.bv_page;
869
			bv.bv_len = max_transfer_size;
870
			bv.bv_offset = bvec.bv_offset;
871

872
			if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
873 874
				goto out;

875
			bv.bv_len = bvec.bv_len - max_transfer_size;
876
			bv.bv_offset += max_transfer_size;
877
			if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
878 879
				goto out;
		} else
880
			if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
881 882
				goto out;

883
		update_position(&index, &offset, &bvec);
884
	}
885

886
	bio_endio(bio);
887
	return;
888 889 890 891 892 893

out:
	bio_io_error(bio);
}

/*
894
 * Handler function for all zram I/O requests.
895
 */
896
static void zram_make_request(struct request_queue *queue, struct bio *bio)
897
{
898
	struct zram *zram = queue->queuedata;
899

900
	if (unlikely(!zram_meta_get(zram)))
901
		goto error;
902

903 904
	if (!valid_io_request(zram, bio->bi_iter.bi_sector,
					bio->bi_iter.bi_size)) {
905
		atomic64_inc(&zram->stats.invalid_io);
906
		goto put_zram;
907 908
	}

909
	__zram_make_request(zram, bio);
910
	zram_meta_put(zram);
911
	return;
912 913
put_zram:
	zram_meta_put(zram);
914 915
error:
	bio_io_error(bio);
916 917
}

Nitin Gupta's avatar
Nitin Gupta committed
918 919
static void zram_slot_free_notify(struct block_device *bdev,
				unsigned long index)
920
{
921
	struct zram *zram;
922
	struct zram_meta *meta;
923

924
	zram = bdev->bd_disk->private_data;
925
	meta = zram->meta;
926

927
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
928
	zram_free_page(zram, index);
929
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
930
	atomic64_inc(&zram->stats.notify_free);
931 932
}

933 934 935
static int zram_rw_page(struct block_device *bdev, sector_t sector,
		       struct page *page, int rw)
{
936
	int offset, err = -EIO;
937 938 939 940 941