zram_drv.c 34.7 KB
Newer Older
1
/*
2
 * Compressed RAM block device
3
 *
4
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
Minchan Kim's avatar
Minchan Kim committed
5
 *               2012, 2013 Minchan Kim
6 7 8 9 10 11 12 13 14
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 *
 */

15
#define KMSG_COMPONENT "zram"
16 17 18 19
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

#include <linux/module.h>
#include <linux/kernel.h>
20
#include <linux/bio.h>
21 22 23 24 25 26
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/highmem.h>
27
#include <linux/slab.h>
28 29
#include <linux/string.h>
#include <linux/vmalloc.h>
30
#include <linux/err.h>
31
#include <linux/idr.h>
32
#include <linux/sysfs.h>
33

34
#include "zram_drv.h"
35

36
static DEFINE_IDR(zram_index_idr);
37 38 39
/* idr index must be protected */
static DEFINE_MUTEX(zram_index_mutex);

40
static int zram_major;
41
static const char *default_compressor = "lzo";
42 43

/* Module params (documentation at end) */
44
static unsigned int num_devices = 1;
45

46 47 48 49 50 51 52 53 54
static inline void deprecated_attr_warn(const char *name)
{
	pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
			task_pid_nr(current),
			current->comm,
			name,
			"See zram documentation.");
}

55
#define ZRAM_ATTR_RO(name)						\
56
static ssize_t name##_show(struct device *d,				\
57 58 59
				struct device_attribute *attr, char *b)	\
{									\
	struct zram *zram = dev_to_zram(d);				\
60 61
									\
	deprecated_attr_warn(__stringify(name));			\
62
	return scnprintf(b, PAGE_SIZE, "%llu\n",			\
63 64
		(u64)atomic64_read(&zram->stats.name));			\
}									\
65
static DEVICE_ATTR_RO(name);
66

67
static inline bool init_done(struct zram *zram)
68
{
69
	return zram->disksize;
70 71
}

72 73 74 75 76
static inline struct zram *dev_to_zram(struct device *dev)
{
	return (struct zram *)dev_to_disk(dev)->private_data;
}

77
/* flag operations require table entry bit_spin_lock() being held */
78 79
static int zram_test_flag(struct zram_meta *meta, u32 index,
			enum zram_pageflags flag)
80
{
81 82
	return meta->table[index].value & BIT(flag);
}
83

84 85 86 87 88
static void zram_set_flag(struct zram_meta *meta, u32 index,
			enum zram_pageflags flag)
{
	meta->table[index].value |= BIT(flag);
}
89

90 91 92 93 94
static void zram_clear_flag(struct zram_meta *meta, u32 index,
			enum zram_pageflags flag)
{
	meta->table[index].value &= ~BIT(flag);
}
95

96 97 98
static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
{
	return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
99 100
}

101 102
static void zram_set_obj_size(struct zram_meta *meta,
					u32 index, size_t size)
103
{
104
	unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
105

106 107 108
	meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
}

109
static inline bool is_partial_io(struct bio_vec *bvec)
110 111 112 113 114 115 116
{
	return bvec->bv_len != PAGE_SIZE;
}

/*
 * Check if request is within bounds and aligned on zram logical blocks.
 */
117
static inline bool valid_io_request(struct zram *zram,
118 119 120 121 122 123
		sector_t start, unsigned int size)
{
	u64 end, bound;

	/* unaligned request */
	if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
124
		return false;
125
	if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
126
		return false;
127 128 129 130 131

	end = start + (size >> SECTOR_SHIFT);
	bound = zram->disksize >> SECTOR_SHIFT;
	/* out of range range */
	if (unlikely(start >= bound || end > bound || start > end))
132
		return false;
133 134

	/* I/O request is valid */
135
	return true;
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
}

static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
{
	if (*offset + bvec->bv_len >= PAGE_SIZE)
		(*index)++;
	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}

static inline void update_used_max(struct zram *zram,
					const unsigned long pages)
{
	unsigned long old_max, cur_max;

	old_max = atomic_long_read(&zram->stats.max_used_pages);

	do {
		cur_max = old_max;
		if (pages > cur_max)
			old_max = atomic_long_cmpxchg(
				&zram->stats.max_used_pages, cur_max, pages);
	} while (old_max != cur_max);
}

160
static bool page_zero_filled(void *ptr)
161 162 163 164 165 166 167 168
{
	unsigned int pos;
	unsigned long *page;

	page = (unsigned long *)ptr;

	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
		if (page[pos])
169
			return false;
170 171
	}

172
	return true;
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
}

static void handle_zero_page(struct bio_vec *bvec)
{
	struct page *page = bvec->bv_page;
	void *user_mem;

	user_mem = kmap_atomic(page);
	if (is_partial_io(bvec))
		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
	else
		clear_page(user_mem);
	kunmap_atomic(user_mem);

	flush_dcache_page(page);
188 189 190 191 192
}

static ssize_t initstate_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
193
	u32 val;
194 195
	struct zram *zram = dev_to_zram(dev);

196 197 198
	down_read(&zram->init_lock);
	val = init_done(zram);
	up_read(&zram->init_lock);
199

200
	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
201 202
}

203 204 205 206 207 208 209 210
static ssize_t disksize_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
}

211 212 213 214 215
static ssize_t orig_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

216
	deprecated_attr_warn("orig_data_size");
217
	return scnprintf(buf, PAGE_SIZE, "%llu\n",
218
		(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
219 220 221 222 223 224 225 226
}

static ssize_t mem_used_total_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);

227
	deprecated_attr_warn("mem_used_total");
228
	down_read(&zram->init_lock);
229 230
	if (init_done(zram)) {
		struct zram_meta *meta = zram->meta;
231
		val = zs_get_total_pages(meta->mem_pool);
232
	}
233 234
	up_read(&zram->init_lock);

235
	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
236 237
}

238 239 240 241 242 243
static ssize_t mem_limit_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val;
	struct zram *zram = dev_to_zram(dev);

244
	deprecated_attr_warn("mem_limit");
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
	down_read(&zram->init_lock);
	val = zram->limit_pages;
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
}

static ssize_t mem_limit_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 limit;
	char *tmp;
	struct zram *zram = dev_to_zram(dev);

	limit = memparse(buf, &tmp);
	if (buf == tmp) /* no chars parsed, invalid input */
		return -EINVAL;

	down_write(&zram->init_lock);
	zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
	up_write(&zram->init_lock);

	return len;
}

Minchan Kim's avatar
Minchan Kim committed
270 271 272 273 274 275
static ssize_t mem_used_max_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);

276
	deprecated_attr_warn("mem_used_max");
Minchan Kim's avatar
Minchan Kim committed
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
	down_read(&zram->init_lock);
	if (init_done(zram))
		val = atomic_long_read(&zram->stats.max_used_pages);
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
}

static ssize_t mem_used_max_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int err;
	unsigned long val;
	struct zram *zram = dev_to_zram(dev);

	err = kstrtoul(buf, 10, &val);
	if (err || val != 0)
		return -EINVAL;

	down_read(&zram->init_lock);
297 298
	if (init_done(zram)) {
		struct zram_meta *meta = zram->meta;
Minchan Kim's avatar
Minchan Kim committed
299 300
		atomic_long_set(&zram->stats.max_used_pages,
				zs_get_total_pages(meta->mem_pool));
301
	}
Minchan Kim's avatar
Minchan Kim committed
302 303 304 305 306
	up_read(&zram->init_lock);

	return len;
}

307 308 309 310 311 312 313 314 315 316 317 318 319
static ssize_t max_comp_streams_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	int val;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	val = zram->max_comp_streams;
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
}

320 321 322 323 324
static ssize_t max_comp_streams_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int num;
	struct zram *zram = dev_to_zram(dev);
Minchan Kim's avatar
Minchan Kim committed
325
	int ret;
326

Minchan Kim's avatar
Minchan Kim committed
327 328 329
	ret = kstrtoint(buf, 0, &num);
	if (ret < 0)
		return ret;
330 331
	if (num < 1)
		return -EINVAL;
Minchan Kim's avatar
Minchan Kim committed
332

333 334
	down_write(&zram->init_lock);
	if (init_done(zram)) {
Minchan Kim's avatar
Minchan Kim committed
335
		if (!zcomp_set_max_streams(zram->comp, num)) {
336
			pr_info("Cannot change max compression streams\n");
Minchan Kim's avatar
Minchan Kim committed
337 338 339
			ret = -EINVAL;
			goto out;
		}
340
	}
Minchan Kim's avatar
Minchan Kim committed
341

342
	zram->max_comp_streams = num;
Minchan Kim's avatar
Minchan Kim committed
343 344
	ret = len;
out:
345
	up_write(&zram->init_lock);
Minchan Kim's avatar
Minchan Kim committed
346
	return ret;
347 348
}

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
static ssize_t comp_algorithm_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	size_t sz;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	sz = zcomp_available_show(zram->compressor, buf);
	up_read(&zram->init_lock);

	return sz;
}

static ssize_t comp_algorithm_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	struct zram *zram = dev_to_zram(dev);
366 367
	size_t sz;

368 369 370
	if (!zcomp_available_algorithm(buf))
		return -EINVAL;

371 372 373 374 375 376 377
	down_write(&zram->init_lock);
	if (init_done(zram)) {
		up_write(&zram->init_lock);
		pr_info("Can't change algorithm for initialized device\n");
		return -EBUSY;
	}
	strlcpy(zram->compressor, buf, sizeof(zram->compressor));
378 379 380 381 382 383

	/* ignore trailing newline */
	sz = strlen(zram->compressor);
	if (sz > 0 && zram->compressor[sz - 1] == '\n')
		zram->compressor[sz - 1] = 0x00;

384 385 386 387
	up_write(&zram->init_lock);
	return len;
}

388 389
static ssize_t compact_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
390
{
391 392
	struct zram *zram = dev_to_zram(dev);
	struct zram_meta *meta;
393

394 395 396 397 398
	down_read(&zram->init_lock);
	if (!init_done(zram)) {
		up_read(&zram->init_lock);
		return -EINVAL;
	}
399

400
	meta = zram->meta;
401
	zs_compact(meta->mem_pool);
402
	up_read(&zram->init_lock);
403

404
	return len;
405 406
}

407 408
static ssize_t io_stat_show(struct device *dev,
		struct device_attribute *attr, char *buf)
409
{
410 411
	struct zram *zram = dev_to_zram(dev);
	ssize_t ret;
412

413 414 415 416 417 418 419 420
	down_read(&zram->init_lock);
	ret = scnprintf(buf, PAGE_SIZE,
			"%8llu %8llu %8llu %8llu\n",
			(u64)atomic64_read(&zram->stats.failed_reads),
			(u64)atomic64_read(&zram->stats.failed_writes),
			(u64)atomic64_read(&zram->stats.invalid_io),
			(u64)atomic64_read(&zram->stats.notify_free));
	up_read(&zram->init_lock);
421

422
	return ret;
423 424
}

425 426
static ssize_t mm_stat_show(struct device *dev,
		struct device_attribute *attr, char *buf)
427
{
428
	struct zram *zram = dev_to_zram(dev);
429
	struct zs_pool_stats pool_stats;
430 431 432
	u64 orig_size, mem_used = 0;
	long max_used;
	ssize_t ret;
433

434 435
	memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));

436
	down_read(&zram->init_lock);
437
	if (init_done(zram)) {
438
		mem_used = zs_get_total_pages(zram->meta->mem_pool);
439 440
		zs_pool_stats(zram->meta->mem_pool, &pool_stats);
	}
441

442 443
	orig_size = atomic64_read(&zram->stats.pages_stored);
	max_used = atomic_long_read(&zram->stats.max_used_pages);
444

445
	ret = scnprintf(buf, PAGE_SIZE,
446
			"%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
447 448 449 450 451 452
			orig_size << PAGE_SHIFT,
			(u64)atomic64_read(&zram->stats.compr_data_size),
			mem_used << PAGE_SHIFT,
			zram->limit_pages << PAGE_SHIFT,
			max_used << PAGE_SHIFT,
			(u64)atomic64_read(&zram->stats.zero_pages),
453
			pool_stats.pages_compacted);
454
	up_read(&zram->init_lock);
455

456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
	return ret;
}

static DEVICE_ATTR_RO(io_stat);
static DEVICE_ATTR_RO(mm_stat);
ZRAM_ATTR_RO(num_reads);
ZRAM_ATTR_RO(num_writes);
ZRAM_ATTR_RO(failed_reads);
ZRAM_ATTR_RO(failed_writes);
ZRAM_ATTR_RO(invalid_io);
ZRAM_ATTR_RO(notify_free);
ZRAM_ATTR_RO(zero_pages);
ZRAM_ATTR_RO(compr_data_size);

static inline bool zram_meta_get(struct zram *zram)
{
	if (atomic_inc_not_zero(&zram->refcount))
		return true;
	return false;
}

static inline void zram_meta_put(struct zram *zram)
{
	atomic_dec(&zram->refcount);
}

static void zram_meta_free(struct zram_meta *meta, u64 disksize)
{
	size_t num_pages = disksize >> PAGE_SHIFT;
	size_t index;
486 487 488 489 490 491 492 493 494 495 496

	/* Free all pages that are still in this zram device */
	for (index = 0; index < num_pages; index++) {
		unsigned long handle = meta->table[index].handle;

		if (!handle)
			continue;

		zs_free(meta->mem_pool, handle);
	}

497 498 499 500 501
	zs_destroy_pool(meta->mem_pool);
	vfree(meta->table);
	kfree(meta);
}

502
static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
503 504 505
{
	size_t num_pages;
	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
506

507
	if (!meta)
508
		return NULL;
509 510 511 512 513

	num_pages = disksize >> PAGE_SHIFT;
	meta->table = vzalloc(num_pages * sizeof(*meta->table));
	if (!meta->table) {
		pr_err("Error allocating zram address table\n");
514
		goto out_error;
515 516
	}

517
	meta->mem_pool = zs_create_pool(pool_name);
518 519
	if (!meta->mem_pool) {
		pr_err("Error creating memory pool\n");
520
		goto out_error;
521 522 523 524
	}

	return meta;

525
out_error:
526 527
	vfree(meta->table);
	kfree(meta);
528
	return NULL;
529 530
}

531 532 533 534 535
/*
 * To protect concurrent access to the same index entry,
 * caller should hold this table index entry's bit_spinlock to
 * indicate this index entry is accessing.
 */
536
static void zram_free_page(struct zram *zram, size_t index)
537
{
Minchan Kim's avatar
Minchan Kim committed
538 539
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;
540

541
	if (unlikely(!handle)) {
542 543 544 545
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
Minchan Kim's avatar
Minchan Kim committed
546 547
		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
			zram_clear_flag(meta, index, ZRAM_ZERO);
548
			atomic64_dec(&zram->stats.zero_pages);
549 550 551 552
		}
		return;
	}

Minchan Kim's avatar
Minchan Kim committed
553
	zs_free(meta->mem_pool, handle);
554

555 556
	atomic64_sub(zram_get_obj_size(meta, index),
			&zram->stats.compr_data_size);
557
	atomic64_dec(&zram->stats.pages_stored);
558

Minchan Kim's avatar
Minchan Kim committed
559
	meta->table[index].handle = 0;
560
	zram_set_obj_size(meta, index, 0);
561 562
}

563
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
564
{
565
	int ret = 0;
566
	unsigned char *cmem;
Minchan Kim's avatar
Minchan Kim committed
567
	struct zram_meta *meta = zram->meta;
Minchan Kim's avatar
Minchan Kim committed
568
	unsigned long handle;
Minchan Kim's avatar
Minchan Kim committed
569
	size_t size;
Minchan Kim's avatar
Minchan Kim committed
570

571
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim's avatar
Minchan Kim committed
572
	handle = meta->table[index].handle;
573
	size = zram_get_obj_size(meta, index);
574

Minchan Kim's avatar
Minchan Kim committed
575
	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
576
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
577
		clear_page(mem);
578 579
		return 0;
	}
580

Minchan Kim's avatar
Minchan Kim committed
581
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Minchan Kim's avatar
Minchan Kim committed
582
	if (size == PAGE_SIZE)
583
		copy_page(mem, cmem);
584
	else
585
		ret = zcomp_decompress(zram->comp, cmem, size, mem);
Minchan Kim's avatar
Minchan Kim committed
586
	zs_unmap_object(meta->mem_pool, handle);
587
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
588

589
	/* Should NEVER happen. Return bio error if it does. */
590
	if (unlikely(ret)) {
591 592
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
		return ret;
593
	}
594

595
	return 0;
596 597
}

598
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
599
			  u32 index, int offset)
600 601
{
	int ret;
602 603
	struct page *page;
	unsigned char *user_mem, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
604
	struct zram_meta *meta = zram->meta;
605 606
	page = bvec->bv_page;

607
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim's avatar
Minchan Kim committed
608 609
	if (unlikely(!meta->table[index].handle) ||
			zram_test_flag(meta, index, ZRAM_ZERO)) {
610
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
611
		handle_zero_page(bvec);
612 613
		return 0;
	}
614
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
615

616 617
	if (is_partial_io(bvec))
		/* Use  a temporary buffer to decompress the page */
618 619 620 621
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);

	user_mem = kmap_atomic(page);
	if (!is_partial_io(bvec))
622 623 624
		uncmem = user_mem;

	if (!uncmem) {
625
		pr_err("Unable to allocate temp memory\n");
626 627 628
		ret = -ENOMEM;
		goto out_cleanup;
	}
629

630
	ret = zram_decompress_page(zram, uncmem, index);
631
	/* Should NEVER happen. Return bio error if it does. */
632
	if (unlikely(ret))
633
		goto out_cleanup;
634

635 636 637 638 639 640 641 642 643 644 645
	if (is_partial_io(bvec))
		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
				bvec->bv_len);

	flush_dcache_page(page);
	ret = 0;
out_cleanup:
	kunmap_atomic(user_mem);
	if (is_partial_io(bvec))
		kfree(uncmem);
	return ret;
646 647 648 649
}

static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
650
{
651
	int ret = 0;
652
	size_t clen;
653
	unsigned long handle = 0;
654
	struct page *page;
655
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
656
	struct zram_meta *meta = zram->meta;
657
	struct zcomp_strm *zstrm = NULL;
Minchan Kim's avatar
Minchan Kim committed
658
	unsigned long alloced_pages;
659

660
	page = bvec->bv_page;
661 662 663 664 665
	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
666
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
667 668 669 670
		if (!uncmem) {
			ret = -ENOMEM;
			goto out;
		}
671
		ret = zram_decompress_page(zram, uncmem, index);
672
		if (ret)
673 674 675
			goto out;
	}

676
compress_again:
677
	user_mem = kmap_atomic(page);
678
	if (is_partial_io(bvec)) {
679 680
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
681 682 683
		kunmap_atomic(user_mem);
		user_mem = NULL;
	} else {
684
		uncmem = user_mem;
685
	}
686 687

	if (page_zero_filled(uncmem)) {
688 689
		if (user_mem)
			kunmap_atomic(user_mem);
690
		/* Free memory associated with this sector now. */
691
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
692
		zram_free_page(zram, index);
Minchan Kim's avatar
Minchan Kim committed
693
		zram_set_flag(meta, index, ZRAM_ZERO);
694
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
695

696
		atomic64_inc(&zram->stats.zero_pages);
697 698
		ret = 0;
		goto out;
699
	}
700

701
	zstrm = zcomp_strm_find(zram->comp);
702
	ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
703 704 705 706 707
	if (!is_partial_io(bvec)) {
		kunmap_atomic(user_mem);
		user_mem = NULL;
		uncmem = NULL;
	}
708

709
	if (unlikely(ret)) {
710
		pr_err("Compression failed! err=%d\n", ret);
711
		goto out;
712
	}
713

714
	src = zstrm->buffer;
715 716
	if (unlikely(clen > max_zpage_size)) {
		clen = PAGE_SIZE;
717 718
		if (is_partial_io(bvec))
			src = uncmem;
719
	}
720

721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
	/*
	 * handle allocation has 2 paths:
	 * a) fast path is executed with preemption disabled (for
	 *  per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
	 *  since we can't sleep;
	 * b) slow path enables preemption and attempts to allocate
	 *  the page with __GFP_DIRECT_RECLAIM bit set. we have to
	 *  put per-cpu compression stream and, thus, to re-do
	 *  the compression once handle is allocated.
	 *
	 * if we have a 'non-null' handle here then we are coming
	 * from the slow path and handle has already been allocated.
	 */
	if (!handle)
		handle = zs_malloc(meta->mem_pool, clen,
				__GFP_KSWAPD_RECLAIM |
				__GFP_NOWARN |
				__GFP_HIGHMEM);
739
	if (!handle) {
740 741 742 743 744 745 746 747
		zcomp_strm_release(zram->comp, zstrm);
		zstrm = NULL;

		handle = zs_malloc(meta->mem_pool, clen,
				GFP_NOIO | __GFP_HIGHMEM);
		if (handle)
			goto compress_again;

748
		pr_err("Error allocating memory for compressed page: %u, size=%zu\n",
749
			index, clen);
750 751
		ret = -ENOMEM;
		goto out;
752
	}
753

Minchan Kim's avatar
Minchan Kim committed
754
	alloced_pages = zs_get_total_pages(meta->mem_pool);
755 756
	update_used_max(zram, alloced_pages);

Minchan Kim's avatar
Minchan Kim committed
757
	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
758 759 760 761 762
		zs_free(meta->mem_pool, handle);
		ret = -ENOMEM;
		goto out;
	}

Minchan Kim's avatar
Minchan Kim committed
763
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
764

765
	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
766
		src = kmap_atomic(page);
767
		copy_page(cmem, src);
768
		kunmap_atomic(src);
769 770 771
	} else {
		memcpy(cmem, src, clen);
	}
772

773
	zcomp_strm_release(zram->comp, zstrm);
774
	zstrm = NULL;
Minchan Kim's avatar
Minchan Kim committed
775
	zs_unmap_object(meta->mem_pool, handle);
776

777 778 779 780
	/*
	 * Free memory associated with this sector
	 * before overwriting unused sectors.
	 */
781
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
782 783
	zram_free_page(zram, index);

Minchan Kim's avatar
Minchan Kim committed
784
	meta->table[index].handle = handle;
785 786
	zram_set_obj_size(meta, index, clen);
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
787

788
	/* Update stats */
789 790
	atomic64_add(clen, &zram->stats.compr_data_size);
	atomic64_inc(&zram->stats.pages_stored);
791
out:
792
	if (zstrm)
793
		zcomp_strm_release(zram->comp, zstrm);
794 795
	if (is_partial_io(bvec))
		kfree(uncmem);
796
	return ret;
797 798
}

Joonsoo Kim's avatar
Joonsoo Kim committed
799 800 801 802 803 804 805 806 807
/*
 * zram_bio_discard - handler on discard request
 * @index: physical block index in PAGE_SIZE units
 * @offset: byte offset within physical block
 */
static void zram_bio_discard(struct zram *zram, u32 index,
			     int offset, struct bio *bio)
{
	size_t n = bio->bi_iter.bi_size;
808
	struct zram_meta *meta = zram->meta;
Joonsoo Kim's avatar
Joonsoo Kim committed
809 810 811 812 813 814 815 816 817 818 819 820

	/*
	 * zram manages data in physical block size units. Because logical block
	 * size isn't identical with physical block size on some arch, we
	 * could get a discard request pointing to a specific offset within a
	 * certain physical block.  Although we can handle this request by
	 * reading that physiclal block and decompressing and partially zeroing
	 * and re-compressing and then re-storing it, this isn't reasonable
	 * because our intent with a discard request is to save memory.  So
	 * skipping this logical block is appropriate here.
	 */
	if (offset) {
821
		if (n <= (PAGE_SIZE - offset))
Joonsoo Kim's avatar
Joonsoo Kim committed
822 823
			return;

824
		n -= (PAGE_SIZE - offset);
Joonsoo Kim's avatar
Joonsoo Kim committed
825 826 827 828
		index++;
	}

	while (n >= PAGE_SIZE) {
829
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kim's avatar
Joonsoo Kim committed
830
		zram_free_page(zram, index);
831
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
832
		atomic64_inc(&zram->stats.notify_free);
Joonsoo Kim's avatar
Joonsoo Kim committed
833 834 835 836 837
		index++;
		n -= PAGE_SIZE;
	}
}

838 839
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
			int offset, int rw)
840
{
841
	unsigned long start_time = jiffies;
842 843
	int ret;

844 845
	generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
			&zram->disk->part0);
846

847 848 849 850 851 852
	if (rw == READ) {
		atomic64_inc(&zram->stats.num_reads);
		ret = zram_bvec_read(zram, bvec, index, offset);
	} else {
		atomic64_inc(&zram->stats.num_writes);
		ret = zram_bvec_write(zram, bvec, index, offset);
853
	}
854

855
	generic_end_io_acct(rw, &zram->disk->part0, start_time);
856

857 858 859 860 861
	if (unlikely(ret)) {
		if (rw == READ)
			atomic64_inc(&zram->stats.failed_reads);
		else
			atomic64_inc(&zram->stats.failed_writes);
862
	}
863

864
	return ret;
865 866
}

867
static void __zram_make_request(struct zram *zram, struct bio *bio)
868
{
869
	int offset, rw;
870
	u32 index;
871 872
	struct bio_vec bvec;
	struct bvec_iter iter;
873

874 875 876
	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
	offset = (bio->bi_iter.bi_sector &
		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
877

Joonsoo Kim's avatar
Joonsoo Kim committed
878 879
	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
		zram_bio_discard(zram, index, offset, bio);
880
		bio_endio(bio);
Joonsoo Kim's avatar
Joonsoo Kim committed
881 882 883
		return;
	}

884
	rw = bio_data_dir(bio);
885
	bio_for_each_segment(bvec, bio, iter) {
886 887
		int max_transfer_size = PAGE_SIZE - offset;

888
		if (bvec.bv_len > max_transfer_size) {
889 890 891 892 893 894
			/*
			 * zram_bvec_rw() can only make operation on a single
			 * zram page. Split the bio vector.
			 */
			struct bio_vec bv;

895
			bv.bv_page = bvec.bv_page;
896
			bv.bv_len = max_transfer_size;
897
			bv.bv_offset = bvec.bv_offset;
898

899
			if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
900 901
				goto out;

902
			bv.bv_len = bvec.bv_len - max_transfer_size;
903
			bv.bv_offset += max_transfer_size;
904
			if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
905 906
				goto out;
		} else
907
			if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
908 909
				goto out;

910
		update_position(&index, &offset, &bvec);
911
	}
912

913
	bio_endio(bio);
914
	return;
915 916 917 918 919 920

out:
	bio_io_error(bio);
}

/*
921
 * Handler function for all zram I/O requests.
922
 */
923
static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
924
{
925
	struct zram *zram = queue->queuedata;