zram_drv.c 31.8 KB
Newer Older
1
/*
2
 * Compressed RAM block device
3
 *
4
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
Minchan Kim's avatar
Minchan Kim committed
5
 *               2012, 2013 Minchan Kim
6 7 8 9 10 11 12 13 14
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 *
 */

15
#define KMSG_COMPONENT "zram"
16 17 18 19
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

#include <linux/module.h>
#include <linux/kernel.h>
20
#include <linux/bio.h>
21 22 23 24 25 26
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/highmem.h>
27
#include <linux/slab.h>
28 29
#include <linux/string.h>
#include <linux/vmalloc.h>
30
#include <linux/err.h>
31
#include <linux/idr.h>
32

33
#include "zram_drv.h"
34

35
static DEFINE_IDR(zram_index_idr);
36
static int zram_major;
37
static const char *default_compressor = "lzo";
38 39

/* Module params (documentation at end) */
40
static unsigned int num_devices = 1;
41

42 43 44 45 46 47 48 49 50
static inline void deprecated_attr_warn(const char *name)
{
	pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
			task_pid_nr(current),
			current->comm,
			name,
			"See zram documentation.");
}

51
#define ZRAM_ATTR_RO(name)						\
52
static ssize_t name##_show(struct device *d,				\
53 54 55
				struct device_attribute *attr, char *b)	\
{									\
	struct zram *zram = dev_to_zram(d);				\
56 57
									\
	deprecated_attr_warn(__stringify(name));			\
58
	return scnprintf(b, PAGE_SIZE, "%llu\n",			\
59 60
		(u64)atomic64_read(&zram->stats.name));			\
}									\
61
static DEVICE_ATTR_RO(name);
62

63
static inline bool init_done(struct zram *zram)
64
{
65
	return zram->disksize;
66 67
}

68 69 70 71 72
static inline struct zram *dev_to_zram(struct device *dev)
{
	return (struct zram *)dev_to_disk(dev)->private_data;
}

73
/* flag operations require table entry bit_spin_lock() being held */
74 75
static int zram_test_flag(struct zram_meta *meta, u32 index,
			enum zram_pageflags flag)
76
{
77 78
	return meta->table[index].value & BIT(flag);
}
79

80 81 82 83 84
static void zram_set_flag(struct zram_meta *meta, u32 index,
			enum zram_pageflags flag)
{
	meta->table[index].value |= BIT(flag);
}
85

86 87 88 89 90
static void zram_clear_flag(struct zram_meta *meta, u32 index,
			enum zram_pageflags flag)
{
	meta->table[index].value &= ~BIT(flag);
}
91

92 93 94
static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
{
	return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
95 96
}

97 98
static void zram_set_obj_size(struct zram_meta *meta,
					u32 index, size_t size)
99
{
100
	unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
101

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
	meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
}

static inline int is_partial_io(struct bio_vec *bvec)
{
	return bvec->bv_len != PAGE_SIZE;
}

/*
 * Check if request is within bounds and aligned on zram logical blocks.
 */
static inline int valid_io_request(struct zram *zram,
		sector_t start, unsigned int size)
{
	u64 end, bound;

	/* unaligned request */
	if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
		return 0;
	if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
		return 0;

	end = start + (size >> SECTOR_SHIFT);
	bound = zram->disksize >> SECTOR_SHIFT;
	/* out of range range */
	if (unlikely(start >= bound || end > bound || start > end))
		return 0;

	/* I/O request is valid */
	return 1;
}

static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
{
	if (*offset + bvec->bv_len >= PAGE_SIZE)
		(*index)++;
	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}

static inline void update_used_max(struct zram *zram,
					const unsigned long pages)
{
	unsigned long old_max, cur_max;

	old_max = atomic_long_read(&zram->stats.max_used_pages);

	do {
		cur_max = old_max;
		if (pages > cur_max)
			old_max = atomic_long_cmpxchg(
				&zram->stats.max_used_pages, cur_max, pages);
	} while (old_max != cur_max);
}

static int page_zero_filled(void *ptr)
{
	unsigned int pos;
	unsigned long *page;

	page = (unsigned long *)ptr;

	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
		if (page[pos])
			return 0;
	}

	return 1;
}

static void handle_zero_page(struct bio_vec *bvec)
{
	struct page *page = bvec->bv_page;
	void *user_mem;

	user_mem = kmap_atomic(page);
	if (is_partial_io(bvec))
		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
	else
		clear_page(user_mem);
	kunmap_atomic(user_mem);

	flush_dcache_page(page);
184 185 186 187 188
}

static ssize_t initstate_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
189
	u32 val;
190 191
	struct zram *zram = dev_to_zram(dev);

192 193 194
	down_read(&zram->init_lock);
	val = init_done(zram);
	up_read(&zram->init_lock);
195

196
	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
197 198
}

199 200 201 202 203 204 205 206
static ssize_t disksize_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
}

207 208 209 210 211
static ssize_t orig_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

212
	deprecated_attr_warn("orig_data_size");
213
	return scnprintf(buf, PAGE_SIZE, "%llu\n",
214
		(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
215 216 217 218 219 220 221 222
}

static ssize_t mem_used_total_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);

223
	deprecated_attr_warn("mem_used_total");
224
	down_read(&zram->init_lock);
225 226
	if (init_done(zram)) {
		struct zram_meta *meta = zram->meta;
227
		val = zs_get_total_pages(meta->mem_pool);
228
	}
229 230
	up_read(&zram->init_lock);

231
	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
232 233
}

234 235 236 237 238 239
static ssize_t mem_limit_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val;
	struct zram *zram = dev_to_zram(dev);

240
	deprecated_attr_warn("mem_limit");
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
	down_read(&zram->init_lock);
	val = zram->limit_pages;
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
}

static ssize_t mem_limit_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 limit;
	char *tmp;
	struct zram *zram = dev_to_zram(dev);

	limit = memparse(buf, &tmp);
	if (buf == tmp) /* no chars parsed, invalid input */
		return -EINVAL;

	down_write(&zram->init_lock);
	zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
	up_write(&zram->init_lock);

	return len;
}

Minchan Kim's avatar
Minchan Kim committed
266 267 268 269 270 271
static ssize_t mem_used_max_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);

272
	deprecated_attr_warn("mem_used_max");
Minchan Kim's avatar
Minchan Kim committed
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
	down_read(&zram->init_lock);
	if (init_done(zram))
		val = atomic_long_read(&zram->stats.max_used_pages);
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
}

static ssize_t mem_used_max_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int err;
	unsigned long val;
	struct zram *zram = dev_to_zram(dev);

	err = kstrtoul(buf, 10, &val);
	if (err || val != 0)
		return -EINVAL;

	down_read(&zram->init_lock);
293 294
	if (init_done(zram)) {
		struct zram_meta *meta = zram->meta;
Minchan Kim's avatar
Minchan Kim committed
295 296
		atomic_long_set(&zram->stats.max_used_pages,
				zs_get_total_pages(meta->mem_pool));
297
	}
Minchan Kim's avatar
Minchan Kim committed
298 299 300 301 302
	up_read(&zram->init_lock);

	return len;
}

303 304 305 306 307 308 309 310 311 312 313 314 315
static ssize_t max_comp_streams_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	int val;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	val = zram->max_comp_streams;
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
}

316 317 318 319 320
static ssize_t max_comp_streams_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int num;
	struct zram *zram = dev_to_zram(dev);
Minchan Kim's avatar
Minchan Kim committed
321
	int ret;
322

Minchan Kim's avatar
Minchan Kim committed
323 324 325
	ret = kstrtoint(buf, 0, &num);
	if (ret < 0)
		return ret;
326 327
	if (num < 1)
		return -EINVAL;
Minchan Kim's avatar
Minchan Kim committed
328

329 330
	down_write(&zram->init_lock);
	if (init_done(zram)) {
Minchan Kim's avatar
Minchan Kim committed
331
		if (!zcomp_set_max_streams(zram->comp, num)) {
332
			pr_info("Cannot change max compression streams\n");
Minchan Kim's avatar
Minchan Kim committed
333 334 335
			ret = -EINVAL;
			goto out;
		}
336
	}
Minchan Kim's avatar
Minchan Kim committed
337

338
	zram->max_comp_streams = num;
Minchan Kim's avatar
Minchan Kim committed
339 340
	ret = len;
out:
341
	up_write(&zram->init_lock);
Minchan Kim's avatar
Minchan Kim committed
342
	return ret;
343 344
}

345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
static ssize_t comp_algorithm_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	size_t sz;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	sz = zcomp_available_show(zram->compressor, buf);
	up_read(&zram->init_lock);

	return sz;
}

static ssize_t comp_algorithm_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	struct zram *zram = dev_to_zram(dev);
	down_write(&zram->init_lock);
	if (init_done(zram)) {
		up_write(&zram->init_lock);
		pr_info("Can't change algorithm for initialized device\n");
		return -EBUSY;
	}
	strlcpy(zram->compressor, buf, sizeof(zram->compressor));
	up_write(&zram->init_lock);
	return len;
}

373 374
static ssize_t compact_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
375
{
376 377 378
	unsigned long nr_migrated;
	struct zram *zram = dev_to_zram(dev);
	struct zram_meta *meta;
379

380 381 382 383 384
	down_read(&zram->init_lock);
	if (!init_done(zram)) {
		up_read(&zram->init_lock);
		return -EINVAL;
	}
385

386 387 388 389
	meta = zram->meta;
	nr_migrated = zs_compact(meta->mem_pool);
	atomic64_add(nr_migrated, &zram->stats.num_migrated);
	up_read(&zram->init_lock);
390

391
	return len;
392 393
}

394 395
static ssize_t io_stat_show(struct device *dev,
		struct device_attribute *attr, char *buf)
396
{
397 398
	struct zram *zram = dev_to_zram(dev);
	ssize_t ret;
399

400 401 402 403 404 405 406 407
	down_read(&zram->init_lock);
	ret = scnprintf(buf, PAGE_SIZE,
			"%8llu %8llu %8llu %8llu\n",
			(u64)atomic64_read(&zram->stats.failed_reads),
			(u64)atomic64_read(&zram->stats.failed_writes),
			(u64)atomic64_read(&zram->stats.invalid_io),
			(u64)atomic64_read(&zram->stats.notify_free));
	up_read(&zram->init_lock);
408

409
	return ret;
410 411
}

412 413
static ssize_t mm_stat_show(struct device *dev,
		struct device_attribute *attr, char *buf)
414
{
415 416 417 418
	struct zram *zram = dev_to_zram(dev);
	u64 orig_size, mem_used = 0;
	long max_used;
	ssize_t ret;
419

420 421 422
	down_read(&zram->init_lock);
	if (init_done(zram))
		mem_used = zs_get_total_pages(zram->meta->mem_pool);
423

424 425
	orig_size = atomic64_read(&zram->stats.pages_stored);
	max_used = atomic_long_read(&zram->stats.max_used_pages);
426

427 428 429 430 431 432 433 434 435 436
	ret = scnprintf(buf, PAGE_SIZE,
			"%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n",
			orig_size << PAGE_SHIFT,
			(u64)atomic64_read(&zram->stats.compr_data_size),
			mem_used << PAGE_SHIFT,
			zram->limit_pages << PAGE_SHIFT,
			max_used << PAGE_SHIFT,
			(u64)atomic64_read(&zram->stats.zero_pages),
			(u64)atomic64_read(&zram->stats.num_migrated));
	up_read(&zram->init_lock);
437

438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
	return ret;
}

static DEVICE_ATTR_RO(io_stat);
static DEVICE_ATTR_RO(mm_stat);
ZRAM_ATTR_RO(num_reads);
ZRAM_ATTR_RO(num_writes);
ZRAM_ATTR_RO(failed_reads);
ZRAM_ATTR_RO(failed_writes);
ZRAM_ATTR_RO(invalid_io);
ZRAM_ATTR_RO(notify_free);
ZRAM_ATTR_RO(zero_pages);
ZRAM_ATTR_RO(compr_data_size);

static inline bool zram_meta_get(struct zram *zram)
{
	if (atomic_inc_not_zero(&zram->refcount))
		return true;
	return false;
}

static inline void zram_meta_put(struct zram *zram)
{
	atomic_dec(&zram->refcount);
}

static void zram_meta_free(struct zram_meta *meta, u64 disksize)
{
	size_t num_pages = disksize >> PAGE_SHIFT;
	size_t index;
468 469 470 471 472 473 474 475 476 477 478

	/* Free all pages that are still in this zram device */
	for (index = 0; index < num_pages; index++) {
		unsigned long handle = meta->table[index].handle;

		if (!handle)
			continue;

		zs_free(meta->mem_pool, handle);
	}

479 480 481 482 483
	zs_destroy_pool(meta->mem_pool);
	vfree(meta->table);
	kfree(meta);
}

484
static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
485 486
{
	size_t num_pages;
487
	char pool_name[8];
488
	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
489

490
	if (!meta)
491
		return NULL;
492 493 494 495 496

	num_pages = disksize >> PAGE_SHIFT;
	meta->table = vzalloc(num_pages * sizeof(*meta->table));
	if (!meta->table) {
		pr_err("Error allocating zram address table\n");
497
		goto out_error;
498 499
	}

500 501
	snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
	meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
502 503
	if (!meta->mem_pool) {
		pr_err("Error creating memory pool\n");
504
		goto out_error;
505 506 507 508
	}

	return meta;

509
out_error:
510 511
	vfree(meta->table);
	kfree(meta);
512
	return NULL;
513 514
}

515 516 517 518 519
/*
 * To protect concurrent access to the same index entry,
 * caller should hold this table index entry's bit_spinlock to
 * indicate this index entry is accessing.
 */
520
static void zram_free_page(struct zram *zram, size_t index)
521
{
Minchan Kim's avatar
Minchan Kim committed
522 523
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;
524

525
	if (unlikely(!handle)) {
526 527 528 529
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
Minchan Kim's avatar
Minchan Kim committed
530 531
		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
			zram_clear_flag(meta, index, ZRAM_ZERO);
532
			atomic64_dec(&zram->stats.zero_pages);
533 534 535 536
		}
		return;
	}

Minchan Kim's avatar
Minchan Kim committed
537
	zs_free(meta->mem_pool, handle);
538

539 540
	atomic64_sub(zram_get_obj_size(meta, index),
			&zram->stats.compr_data_size);
541
	atomic64_dec(&zram->stats.pages_stored);
542

Minchan Kim's avatar
Minchan Kim committed
543
	meta->table[index].handle = 0;
544
	zram_set_obj_size(meta, index, 0);
545 546
}

547
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
548
{
549
	int ret = 0;
550
	unsigned char *cmem;
Minchan Kim's avatar
Minchan Kim committed
551
	struct zram_meta *meta = zram->meta;
Minchan Kim's avatar
Minchan Kim committed
552
	unsigned long handle;
Minchan Kim's avatar
Minchan Kim committed
553
	size_t size;
Minchan Kim's avatar
Minchan Kim committed
554

555
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim's avatar
Minchan Kim committed
556
	handle = meta->table[index].handle;
557
	size = zram_get_obj_size(meta, index);
558

Minchan Kim's avatar
Minchan Kim committed
559
	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
560
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
561
		clear_page(mem);
562 563
		return 0;
	}
564

Minchan Kim's avatar
Minchan Kim committed
565
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Minchan Kim's avatar
Minchan Kim committed
566
	if (size == PAGE_SIZE)
567
		copy_page(mem, cmem);
568
	else
569
		ret = zcomp_decompress(zram->comp, cmem, size, mem);
Minchan Kim's avatar
Minchan Kim committed
570
	zs_unmap_object(meta->mem_pool, handle);
571
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
572

573
	/* Should NEVER happen. Return bio error if it does. */
574
	if (unlikely(ret)) {
575 576
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
		return ret;
577
	}
578

579
	return 0;
580 581
}

582
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
583
			  u32 index, int offset)
584 585
{
	int ret;
586 587
	struct page *page;
	unsigned char *user_mem, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
588
	struct zram_meta *meta = zram->meta;
589 590
	page = bvec->bv_page;

591
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim's avatar
Minchan Kim committed
592 593
	if (unlikely(!meta->table[index].handle) ||
			zram_test_flag(meta, index, ZRAM_ZERO)) {
594
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
595
		handle_zero_page(bvec);
596 597
		return 0;
	}
598
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
599

600 601
	if (is_partial_io(bvec))
		/* Use  a temporary buffer to decompress the page */
602 603 604 605
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);

	user_mem = kmap_atomic(page);
	if (!is_partial_io(bvec))
606 607 608 609 610 611 612
		uncmem = user_mem;

	if (!uncmem) {
		pr_info("Unable to allocate temp memory\n");
		ret = -ENOMEM;
		goto out_cleanup;
	}
613

614
	ret = zram_decompress_page(zram, uncmem, index);
615
	/* Should NEVER happen. Return bio error if it does. */
616
	if (unlikely(ret))
617
		goto out_cleanup;
618

619 620 621 622 623 624 625 626 627 628 629
	if (is_partial_io(bvec))
		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
				bvec->bv_len);

	flush_dcache_page(page);
	ret = 0;
out_cleanup:
	kunmap_atomic(user_mem);
	if (is_partial_io(bvec))
		kfree(uncmem);
	return ret;
630 631 632 633
}

static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
634
{
635
	int ret = 0;
636
	size_t clen;
637
	unsigned long handle;
638
	struct page *page;
639
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
640
	struct zram_meta *meta = zram->meta;
641
	struct zcomp_strm *zstrm;
642
	bool locked = false;
Minchan Kim's avatar
Minchan Kim committed
643
	unsigned long alloced_pages;
644

645
	page = bvec->bv_page;
646 647 648 649 650
	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
651
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
652 653 654 655
		if (!uncmem) {
			ret = -ENOMEM;
			goto out;
		}
656
		ret = zram_decompress_page(zram, uncmem, index);
657
		if (ret)
658 659 660
			goto out;
	}

661
	zstrm = zcomp_strm_find(zram->comp);
662
	locked = true;
663
	user_mem = kmap_atomic(page);
664

665
	if (is_partial_io(bvec)) {
666 667
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
668 669 670
		kunmap_atomic(user_mem);
		user_mem = NULL;
	} else {
671
		uncmem = user_mem;
672
	}
673 674

	if (page_zero_filled(uncmem)) {
675 676
		if (user_mem)
			kunmap_atomic(user_mem);
677
		/* Free memory associated with this sector now. */
678
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
679
		zram_free_page(zram, index);
Minchan Kim's avatar
Minchan Kim committed
680
		zram_set_flag(meta, index, ZRAM_ZERO);
681
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
682

683
		atomic64_inc(&zram->stats.zero_pages);
684 685
		ret = 0;
		goto out;
686
	}
687

688
	ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
689 690 691 692 693
	if (!is_partial_io(bvec)) {
		kunmap_atomic(user_mem);
		user_mem = NULL;
		uncmem = NULL;
	}
694

695
	if (unlikely(ret)) {
696
		pr_err("Compression failed! err=%d\n", ret);
697
		goto out;
698
	}
699
	src = zstrm->buffer;
700 701
	if (unlikely(clen > max_zpage_size)) {
		clen = PAGE_SIZE;
702 703
		if (is_partial_io(bvec))
			src = uncmem;
704
	}
705

Minchan Kim's avatar
Minchan Kim committed
706
	handle = zs_malloc(meta->mem_pool, clen);
707
	if (!handle) {
708 709
		pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
			index, clen);
710 711
		ret = -ENOMEM;
		goto out;
712
	}
713

Minchan Kim's avatar
Minchan Kim committed
714 715
	alloced_pages = zs_get_total_pages(meta->mem_pool);
	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
716 717 718 719 720
		zs_free(meta->mem_pool, handle);
		ret = -ENOMEM;
		goto out;
	}

Minchan Kim's avatar
Minchan Kim committed
721 722
	update_used_max(zram, alloced_pages);

Minchan Kim's avatar
Minchan Kim committed
723
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
724

725
	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
726
		src = kmap_atomic(page);
727
		copy_page(cmem, src);
728
		kunmap_atomic(src);
729 730 731
	} else {
		memcpy(cmem, src, clen);
	}
732

733 734
	zcomp_strm_release(zram->comp, zstrm);
	locked = false;
Minchan Kim's avatar
Minchan Kim committed
735
	zs_unmap_object(meta->mem_pool, handle);
736

737 738 739 740
	/*
	 * Free memory associated with this sector
	 * before overwriting unused sectors.
	 */
741
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
742 743
	zram_free_page(zram, index);

Minchan Kim's avatar
Minchan Kim committed
744
	meta->table[index].handle = handle;
745 746
	zram_set_obj_size(meta, index, clen);
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
747

748
	/* Update stats */
749 750
	atomic64_add(clen, &zram->stats.compr_data_size);
	atomic64_inc(&zram->stats.pages_stored);
751
out:
752
	if (locked)
753
		zcomp_strm_release(zram->comp, zstrm);
754 755
	if (is_partial_io(bvec))
		kfree(uncmem);
756
	return ret;
757 758
}

Joonsoo Kim's avatar
Joonsoo Kim committed
759 760 761 762 763 764 765 766 767
/*
 * zram_bio_discard - handler on discard request
 * @index: physical block index in PAGE_SIZE units
 * @offset: byte offset within physical block
 */
static void zram_bio_discard(struct zram *zram, u32 index,
			     int offset, struct bio *bio)
{
	size_t n = bio->bi_iter.bi_size;
768
	struct zram_meta *meta = zram->meta;
Joonsoo Kim's avatar
Joonsoo Kim committed
769 770 771 772 773 774 775 776 777 778 779 780

	/*
	 * zram manages data in physical block size units. Because logical block
	 * size isn't identical with physical block size on some arch, we
	 * could get a discard request pointing to a specific offset within a
	 * certain physical block.  Although we can handle this request by
	 * reading that physiclal block and decompressing and partially zeroing
	 * and re-compressing and then re-storing it, this isn't reasonable
	 * because our intent with a discard request is to save memory.  So
	 * skipping this logical block is appropriate here.
	 */
	if (offset) {
781
		if (n <= (PAGE_SIZE - offset))
Joonsoo Kim's avatar
Joonsoo Kim committed
782 783
			return;

784
		n -= (PAGE_SIZE - offset);
Joonsoo Kim's avatar
Joonsoo Kim committed
785 786 787 788
		index++;
	}

	while (n >= PAGE_SIZE) {
789
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kim's avatar
Joonsoo Kim committed
790
		zram_free_page(zram, index);
791
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
792
		atomic64_inc(&zram->stats.notify_free);
Joonsoo Kim's avatar
Joonsoo Kim committed
793 794 795 796 797
		index++;
		n -= PAGE_SIZE;
	}
}

798 799
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
			int offset, int rw)
800
{
801
	unsigned long start_time = jiffies;
802 803
	int ret;

804 805
	generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
			&zram->disk->part0);
806

807 808 809 810 811 812
	if (rw == READ) {
		atomic64_inc(&zram->stats.num_reads);
		ret = zram_bvec_read(zram, bvec, index, offset);
	} else {
		atomic64_inc(&zram->stats.num_writes);
		ret = zram_bvec_write(zram, bvec, index, offset);
813
	}
814

815
	generic_end_io_acct(rw, &zram->disk->part0, start_time);
816

817 818 819 820 821
	if (unlikely(ret)) {
		if (rw == READ)
			atomic64_inc(&zram->stats.failed_reads);
		else
			atomic64_inc(&zram->stats.failed_writes);
822
	}
823

824
	return ret;
825 826
}

827
static void __zram_make_request(struct zram *zram, struct bio *bio)
828
{
829
	int offset, rw;
830
	u32 index;
831 832
	struct bio_vec bvec;
	struct bvec_iter iter;
833

834 835 836
	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
	offset = (bio->bi_iter.bi_sector &
		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
837

Joonsoo Kim's avatar
Joonsoo Kim committed
838 839 840 841 842 843
	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
		zram_bio_discard(zram, index, offset, bio);
		bio_endio(bio, 0);
		return;
	}

844
	rw = bio_data_dir(bio);
845
	bio_for_each_segment(bvec, bio, iter) {
846 847
		int max_transfer_size = PAGE_SIZE - offset;

848
		if (bvec.bv_len > max_transfer_size) {
849 850 851 852 853 854
			/*
			 * zram_bvec_rw() can only make operation on a single
			 * zram page. Split the bio vector.
			 */
			struct bio_vec bv;

855
			bv.bv_page = bvec.bv_page;
856
			bv.bv_len = max_transfer_size;
857
			bv.bv_offset = bvec.bv_offset;
858

859
			if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
860 861
				goto out;

862
			bv.bv_len = bvec.bv_len - max_transfer_size;
863
			bv.bv_offset += max_transfer_size;
864
			if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
865 866
				goto out;
		} else
867
			if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
868 869
				goto out;

870
		update_position(&index, &offset, &bvec);
871
	}
872 873 874

	set_bit(BIO_UPTODATE, &bio->bi_flags);
	bio_endio(bio, 0);
875
	return;
876 877 878 879 880 881

out:
	bio_io_error(bio);
}

/*
882
 * Handler function for all zram I/O requests.
883
 */
884
static void zram_make_request(struct request_queue *queue, struct bio *bio)
885
{
886
	struct zram *zram = queue->queuedata;
887

888
	if (unlikely(!zram_meta_get(zram)))
889
		goto error;
890

891 892
	if (!valid_io_request(zram, bio->bi_iter.bi_sector,
					bio->bi_iter.bi_size)) {
893
		atomic64_inc(&zram->stats.invalid_io);
894
		goto put_zram;
895 896
	}

897
	__zram_make_request(zram, bio);
898
	zram_meta_put(zram);
899
	return;
900 901
put_zram:
	zram_meta_put(zram);
902 903
error:
	bio_io_error(bio);
904 905
}

Nitin Gupta's avatar
Nitin Gupta committed
906 907
static void zram_slot_free_notify(struct block_device *bdev,
				unsigned long index)
908
{
909
	struct zram *zram;
910
	struct zram_meta *meta;
911

912
	zram = bdev->bd_disk->private_data;
913
	meta = zram->meta;
914

915
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
916
	zram_free_page(zram, index);
917
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
918
	atomic64_inc(&zram->stats.notify_free);
919 920
}

921 922 923
static int zram_rw_page(struct block_device *bdev, sector_t sector,
		       struct page *page, int rw)
{
924
	int offset, err = -EIO;
925 926 927 928 929
	u32 index;
	struct zram *zram;
	struct bio_vec bv;

	zram = bdev->bd_disk->private_data;
930 931 932
	if (unlikely(!zram_meta_get(zram)))
		goto out;

933 934
	if (!valid_io_request(zram, sector, PAGE_SIZE)) {
		atomic64_inc(&zram->stats.invalid_io);
935 936
		err = -EINVAL;
		goto put_zram;
937 938 939 940 941 942 943 944 945 946
	}

	index = sector >> SECTORS_PER_PAGE_SHIFT;
	offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;

	bv.bv_page = page;
	bv.bv_len = PAGE_SIZE;
	bv.bv_offset = 0;

	err = zram_bvec_rw(zram, &bv, index, offset, rw);
947 948 949
put_zram:
	zram_meta_put(zram);
out:
950