zram_drv.c 30.1 KB
Newer Older
1
/*
2
 * Compressed RAM block device
3
 *
4
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
Minchan Kim's avatar
Minchan Kim committed
5
 *               2012, 2013 Minchan Kim
6 7 8 9 10 11 12 13 14
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 *
 */

15
#define KMSG_COMPONENT "zram"
16 17
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

18 19 20 21
#ifdef CONFIG_ZRAM_DEBUG
#define DEBUG
#endif

22 23
#include <linux/module.h>
#include <linux/kernel.h>
24
#include <linux/bio.h>
25 26 27 28 29 30
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/highmem.h>
31
#include <linux/slab.h>
32 33
#include <linux/string.h>
#include <linux/vmalloc.h>
34
#include <linux/err.h>
35

36
#include "zram_drv.h"
37 38

/* Globals */
39
static int zram_major;
40
static struct zram *zram_devices;
41
static const char *default_compressor = "lzo";
42 43

/* Module params (documentation at end) */
44
static unsigned int num_devices = 1;
45

46
#define ZRAM_ATTR_RO(name)						\
47
static ssize_t name##_show(struct device *d,		\
48 49 50
				struct device_attribute *attr, char *b)	\
{									\
	struct zram *zram = dev_to_zram(d);				\
51
	return scnprintf(b, PAGE_SIZE, "%llu\n",			\
52 53
		(u64)atomic64_read(&zram->stats.name));			\
}									\
54
static DEVICE_ATTR_RO(name);
55

56
static inline bool init_done(struct zram *zram)
57
{
58
	return zram->disksize;
59 60
}

61 62 63 64 65 66 67 68 69 70
static inline struct zram *dev_to_zram(struct device *dev)
{
	return (struct zram *)dev_to_disk(dev)->private_data;
}

static ssize_t disksize_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

71
	return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
72 73 74 75 76
}

static ssize_t initstate_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
77
	u32 val;
78 79
	struct zram *zram = dev_to_zram(dev);

80 81 82
	down_read(&zram->init_lock);
	val = init_done(zram);
	up_read(&zram->init_lock);
83

84
	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
85 86 87 88 89 90 91
}

static ssize_t orig_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

92
	return scnprintf(buf, PAGE_SIZE, "%llu\n",
93
		(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
94 95 96 97 98 99 100 101 102
}

static ssize_t mem_used_total_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
103 104
	if (init_done(zram)) {
		struct zram_meta *meta = zram->meta;
105
		val = zs_get_total_pages(meta->mem_pool);
106
	}
107 108
	up_read(&zram->init_lock);

109
	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
110 111
}

112 113 114 115 116 117 118 119 120 121
static ssize_t max_comp_streams_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	int val;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	val = zram->max_comp_streams;
	up_read(&zram->init_lock);

122
	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
123 124
}

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
static ssize_t mem_limit_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	val = zram->limit_pages;
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
}

static ssize_t mem_limit_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 limit;
	char *tmp;
	struct zram *zram = dev_to_zram(dev);

	limit = memparse(buf, &tmp);
	if (buf == tmp) /* no chars parsed, invalid input */
		return -EINVAL;

	down_write(&zram->init_lock);
	zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
	up_write(&zram->init_lock);

	return len;
}

Minchan Kim's avatar
Minchan Kim committed
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
static ssize_t mem_used_max_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	if (init_done(zram))
		val = atomic_long_read(&zram->stats.max_used_pages);
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
}

static ssize_t mem_used_max_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int err;
	unsigned long val;
	struct zram *zram = dev_to_zram(dev);

	err = kstrtoul(buf, 10, &val);
	if (err || val != 0)
		return -EINVAL;

	down_read(&zram->init_lock);
182 183
	if (init_done(zram)) {
		struct zram_meta *meta = zram->meta;
Minchan Kim's avatar
Minchan Kim committed
184 185
		atomic_long_set(&zram->stats.max_used_pages,
				zs_get_total_pages(meta->mem_pool));
186
	}
Minchan Kim's avatar
Minchan Kim committed
187 188 189 190 191
	up_read(&zram->init_lock);

	return len;
}

192 193 194 195 196
static ssize_t max_comp_streams_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int num;
	struct zram *zram = dev_to_zram(dev);
Minchan Kim's avatar
Minchan Kim committed
197
	int ret;
198

Minchan Kim's avatar
Minchan Kim committed
199 200 201
	ret = kstrtoint(buf, 0, &num);
	if (ret < 0)
		return ret;
202 203
	if (num < 1)
		return -EINVAL;
Minchan Kim's avatar
Minchan Kim committed
204

205 206
	down_write(&zram->init_lock);
	if (init_done(zram)) {
Minchan Kim's avatar
Minchan Kim committed
207
		if (!zcomp_set_max_streams(zram->comp, num)) {
208
			pr_info("Cannot change max compression streams\n");
Minchan Kim's avatar
Minchan Kim committed
209 210 211
			ret = -EINVAL;
			goto out;
		}
212
	}
Minchan Kim's avatar
Minchan Kim committed
213

214
	zram->max_comp_streams = num;
Minchan Kim's avatar
Minchan Kim committed
215 216
	ret = len;
out:
217
	up_write(&zram->init_lock);
Minchan Kim's avatar
Minchan Kim committed
218
	return ret;
219 220
}

221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
static ssize_t comp_algorithm_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	size_t sz;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	sz = zcomp_available_show(zram->compressor, buf);
	up_read(&zram->init_lock);

	return sz;
}

static ssize_t comp_algorithm_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	struct zram *zram = dev_to_zram(dev);
	down_write(&zram->init_lock);
	if (init_done(zram)) {
		up_write(&zram->init_lock);
		pr_info("Can't change algorithm for initialized device\n");
		return -EBUSY;
	}
	strlcpy(zram->compressor, buf, sizeof(zram->compressor));
	up_write(&zram->init_lock);
	return len;
}

Minchan Kim's avatar
Minchan Kim committed
249
/* flag operations needs meta->tb_lock */
Minchan Kim's avatar
Minchan Kim committed
250
static int zram_test_flag(struct zram_meta *meta, u32 index,
251
			enum zram_pageflags flag)
252
{
253
	return meta->table[index].value & BIT(flag);
254 255
}

Minchan Kim's avatar
Minchan Kim committed
256
static void zram_set_flag(struct zram_meta *meta, u32 index,
257
			enum zram_pageflags flag)
258
{
259
	meta->table[index].value |= BIT(flag);
260 261
}

Minchan Kim's avatar
Minchan Kim committed
262
static void zram_clear_flag(struct zram_meta *meta, u32 index,
263
			enum zram_pageflags flag)
264
{
265 266 267 268 269 270 271 272 273 274 275 276 277 278
	meta->table[index].value &= ~BIT(flag);
}

static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
{
	return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
}

static void zram_set_obj_size(struct zram_meta *meta,
					u32 index, size_t size)
{
	unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;

	meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
279 280
}

281 282 283 284 285 286 287 288
static inline int is_partial_io(struct bio_vec *bvec)
{
	return bvec->bv_len != PAGE_SIZE;
}

/*
 * Check if request is within bounds and aligned on zram logical blocks.
 */
289 290
static inline int valid_io_request(struct zram *zram,
		sector_t start, unsigned int size)
291
{
292
	u64 end, bound;
293

294
	/* unaligned request */
295
	if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
296
		return 0;
297
	if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
298 299
		return 0;

300
	end = start + (size >> SECTOR_SHIFT);
301 302
	bound = zram->disksize >> SECTOR_SHIFT;
	/* out of range range */
303
	if (unlikely(start >= bound || end > bound || start > end))
304 305 306 307 308 309
		return 0;

	/* I/O request is valid */
	return 1;
}

310
static void zram_meta_free(struct zram_meta *meta, u64 disksize)
311
{
312 313 314 315 316 317 318 319 320 321 322 323 324
	size_t num_pages = disksize >> PAGE_SHIFT;
	size_t index;

	/* Free all pages that are still in this zram device */
	for (index = 0; index < num_pages; index++) {
		unsigned long handle = meta->table[index].handle;

		if (!handle)
			continue;

		zs_free(meta->mem_pool, handle);
	}

325 326 327 328 329
	zs_destroy_pool(meta->mem_pool);
	vfree(meta->table);
	kfree(meta);
}

330
static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
331 332
{
	size_t num_pages;
333
	char pool_name[8];
334
	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
335

336
	if (!meta)
337
		return NULL;
338 339 340 341 342

	num_pages = disksize >> PAGE_SHIFT;
	meta->table = vzalloc(num_pages * sizeof(*meta->table));
	if (!meta->table) {
		pr_err("Error allocating zram address table\n");
343
		goto out_error;
344 345
	}

346 347
	snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
	meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
348 349
	if (!meta->mem_pool) {
		pr_err("Error creating memory pool\n");
350
		goto out_error;
351 352 353 354
	}

	return meta;

355
out_error:
356 357
	vfree(meta->table);
	kfree(meta);
358
	return NULL;
359 360
}

361 362 363 364 365 366 367 368 369 370 371 372
static inline bool zram_meta_get(struct zram *zram)
{
	if (atomic_inc_not_zero(&zram->refcount))
		return true;
	return false;
}

static inline void zram_meta_put(struct zram *zram)
{
	atomic_dec(&zram->refcount);
}

373 374 375 376 377 378 379
static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
{
	if (*offset + bvec->bv_len >= PAGE_SIZE)
		(*index)++;
	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}

380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
static int page_zero_filled(void *ptr)
{
	unsigned int pos;
	unsigned long *page;

	page = (unsigned long *)ptr;

	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
		if (page[pos])
			return 0;
	}

	return 1;
}

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
static void handle_zero_page(struct bio_vec *bvec)
{
	struct page *page = bvec->bv_page;
	void *user_mem;

	user_mem = kmap_atomic(page);
	if (is_partial_io(bvec))
		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
	else
		clear_page(user_mem);
	kunmap_atomic(user_mem);

	flush_dcache_page(page);
}

410 411 412 413 414 415

/*
 * To protect concurrent access to the same index entry,
 * caller should hold this table index entry's bit_spinlock to
 * indicate this index entry is accessing.
 */
416
static void zram_free_page(struct zram *zram, size_t index)
417
{
Minchan Kim's avatar
Minchan Kim committed
418 419
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;
420

421
	if (unlikely(!handle)) {
422 423 424 425
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
Minchan Kim's avatar
Minchan Kim committed
426 427
		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
			zram_clear_flag(meta, index, ZRAM_ZERO);
428
			atomic64_dec(&zram->stats.zero_pages);
429 430 431 432
		}
		return;
	}

Minchan Kim's avatar
Minchan Kim committed
433
	zs_free(meta->mem_pool, handle);
434

435 436
	atomic64_sub(zram_get_obj_size(meta, index),
			&zram->stats.compr_data_size);
437
	atomic64_dec(&zram->stats.pages_stored);
438

Minchan Kim's avatar
Minchan Kim committed
439
	meta->table[index].handle = 0;
440
	zram_set_obj_size(meta, index, 0);
441 442
}

443
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
444
{
445
	int ret = 0;
446
	unsigned char *cmem;
Minchan Kim's avatar
Minchan Kim committed
447
	struct zram_meta *meta = zram->meta;
Minchan Kim's avatar
Minchan Kim committed
448
	unsigned long handle;
Minchan Kim's avatar
Minchan Kim committed
449
	size_t size;
Minchan Kim's avatar
Minchan Kim committed
450

451
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim's avatar
Minchan Kim committed
452
	handle = meta->table[index].handle;
453
	size = zram_get_obj_size(meta, index);
454

Minchan Kim's avatar
Minchan Kim committed
455
	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
456
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
457
		clear_page(mem);
458 459
		return 0;
	}
460

Minchan Kim's avatar
Minchan Kim committed
461
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Minchan Kim's avatar
Minchan Kim committed
462
	if (size == PAGE_SIZE)
463
		copy_page(mem, cmem);
464
	else
465
		ret = zcomp_decompress(zram->comp, cmem, size, mem);
Minchan Kim's avatar
Minchan Kim committed
466
	zs_unmap_object(meta->mem_pool, handle);
467
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
468

469
	/* Should NEVER happen. Return bio error if it does. */
470
	if (unlikely(ret)) {
471 472
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
		return ret;
473
	}
474

475
	return 0;
476 477
}

478
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
479
			  u32 index, int offset)
480 481
{
	int ret;
482 483
	struct page *page;
	unsigned char *user_mem, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
484
	struct zram_meta *meta = zram->meta;
485 486
	page = bvec->bv_page;

487
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim's avatar
Minchan Kim committed
488 489
	if (unlikely(!meta->table[index].handle) ||
			zram_test_flag(meta, index, ZRAM_ZERO)) {
490
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
491
		handle_zero_page(bvec);
492 493
		return 0;
	}
494
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
495

496 497
	if (is_partial_io(bvec))
		/* Use  a temporary buffer to decompress the page */
498 499 500 501
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);

	user_mem = kmap_atomic(page);
	if (!is_partial_io(bvec))
502 503 504 505 506 507 508
		uncmem = user_mem;

	if (!uncmem) {
		pr_info("Unable to allocate temp memory\n");
		ret = -ENOMEM;
		goto out_cleanup;
	}
509

510
	ret = zram_decompress_page(zram, uncmem, index);
511
	/* Should NEVER happen. Return bio error if it does. */
512
	if (unlikely(ret))
513
		goto out_cleanup;
514

515 516 517 518 519 520 521 522 523 524 525
	if (is_partial_io(bvec))
		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
				bvec->bv_len);

	flush_dcache_page(page);
	ret = 0;
out_cleanup:
	kunmap_atomic(user_mem);
	if (is_partial_io(bvec))
		kfree(uncmem);
	return ret;
526 527
}

Minchan Kim's avatar
Minchan Kim committed
528 529 530
static inline void update_used_max(struct zram *zram,
					const unsigned long pages)
{
531
	unsigned long old_max, cur_max;
Minchan Kim's avatar
Minchan Kim committed
532 533 534 535 536 537 538 539 540 541 542

	old_max = atomic_long_read(&zram->stats.max_used_pages);

	do {
		cur_max = old_max;
		if (pages > cur_max)
			old_max = atomic_long_cmpxchg(
				&zram->stats.max_used_pages, cur_max, pages);
	} while (old_max != cur_max);
}

543 544
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
545
{
546
	int ret = 0;
547
	size_t clen;
548
	unsigned long handle;
549
	struct page *page;
550
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
551
	struct zram_meta *meta = zram->meta;
552
	struct zcomp_strm *zstrm;
553
	bool locked = false;
Minchan Kim's avatar
Minchan Kim committed
554
	unsigned long alloced_pages;
555

556
	page = bvec->bv_page;
557 558 559 560 561
	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
562
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
563 564 565 566
		if (!uncmem) {
			ret = -ENOMEM;
			goto out;
		}
567
		ret = zram_decompress_page(zram, uncmem, index);
568
		if (ret)
569 570 571
			goto out;
	}

572
	zstrm = zcomp_strm_find(zram->comp);
573
	locked = true;
574
	user_mem = kmap_atomic(page);
575

576
	if (is_partial_io(bvec)) {
577 578
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
579 580 581
		kunmap_atomic(user_mem);
		user_mem = NULL;
	} else {
582
		uncmem = user_mem;
583
	}
584 585

	if (page_zero_filled(uncmem)) {
586 587
		if (user_mem)
			kunmap_atomic(user_mem);
588
		/* Free memory associated with this sector now. */
589
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
590
		zram_free_page(zram, index);
Minchan Kim's avatar
Minchan Kim committed
591
		zram_set_flag(meta, index, ZRAM_ZERO);
592
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
593

594
		atomic64_inc(&zram->stats.zero_pages);
595 596
		ret = 0;
		goto out;
597
	}
598

599
	ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
600 601 602 603 604
	if (!is_partial_io(bvec)) {
		kunmap_atomic(user_mem);
		user_mem = NULL;
		uncmem = NULL;
	}
605

606
	if (unlikely(ret)) {
607
		pr_err("Compression failed! err=%d\n", ret);
608
		goto out;
609
	}
610
	src = zstrm->buffer;
611 612
	if (unlikely(clen > max_zpage_size)) {
		clen = PAGE_SIZE;
613 614
		if (is_partial_io(bvec))
			src = uncmem;
615
	}
616

Minchan Kim's avatar
Minchan Kim committed
617
	handle = zs_malloc(meta->mem_pool, clen);
618
	if (!handle) {
619 620
		pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
			index, clen);
621 622
		ret = -ENOMEM;
		goto out;
623
	}
624

Minchan Kim's avatar
Minchan Kim committed
625 626
	alloced_pages = zs_get_total_pages(meta->mem_pool);
	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
627 628 629 630 631
		zs_free(meta->mem_pool, handle);
		ret = -ENOMEM;
		goto out;
	}

Minchan Kim's avatar
Minchan Kim committed
632 633
	update_used_max(zram, alloced_pages);

Minchan Kim's avatar
Minchan Kim committed
634
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
635

636
	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
637
		src = kmap_atomic(page);
638
		copy_page(cmem, src);
639
		kunmap_atomic(src);
640 641 642
	} else {
		memcpy(cmem, src, clen);
	}
643

644 645
	zcomp_strm_release(zram->comp, zstrm);
	locked = false;
Minchan Kim's avatar
Minchan Kim committed
646
	zs_unmap_object(meta->mem_pool, handle);
647

648 649 650 651
	/*
	 * Free memory associated with this sector
	 * before overwriting unused sectors.
	 */
652
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
653 654
	zram_free_page(zram, index);

Minchan Kim's avatar
Minchan Kim committed
655
	meta->table[index].handle = handle;
656 657
	zram_set_obj_size(meta, index, clen);
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
658

659
	/* Update stats */
660 661
	atomic64_add(clen, &zram->stats.compr_data_size);
	atomic64_inc(&zram->stats.pages_stored);
662
out:
663
	if (locked)
664
		zcomp_strm_release(zram->comp, zstrm);
665 666
	if (is_partial_io(bvec))
		kfree(uncmem);
667
	return ret;
668 669 670
}

static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
671
			int offset, int rw)
672
{
673
	unsigned long start_time = jiffies;
674
	int ret;
675

676 677 678
	generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
			&zram->disk->part0);

679 680
	if (rw == READ) {
		atomic64_inc(&zram->stats.num_reads);
681
		ret = zram_bvec_read(zram, bvec, index, offset);
682 683
	} else {
		atomic64_inc(&zram->stats.num_writes);
684
		ret = zram_bvec_write(zram, bvec, index, offset);
685
	}
686

687 688
	generic_end_io_acct(rw, &zram->disk->part0, start_time);

689 690 691 692 693 694 695
	if (unlikely(ret)) {
		if (rw == READ)
			atomic64_inc(&zram->stats.failed_reads);
		else
			atomic64_inc(&zram->stats.failed_writes);
	}

696
	return ret;
697 698
}

Joonsoo Kim's avatar
Joonsoo Kim committed
699 700 701 702 703 704 705 706 707
/*
 * zram_bio_discard - handler on discard request
 * @index: physical block index in PAGE_SIZE units
 * @offset: byte offset within physical block
 */
static void zram_bio_discard(struct zram *zram, u32 index,
			     int offset, struct bio *bio)
{
	size_t n = bio->bi_iter.bi_size;
708
	struct zram_meta *meta = zram->meta;
Joonsoo Kim's avatar
Joonsoo Kim committed
709 710 711 712 713 714 715 716 717 718 719 720

	/*
	 * zram manages data in physical block size units. Because logical block
	 * size isn't identical with physical block size on some arch, we
	 * could get a discard request pointing to a specific offset within a
	 * certain physical block.  Although we can handle this request by
	 * reading that physiclal block and decompressing and partially zeroing
	 * and re-compressing and then re-storing it, this isn't reasonable
	 * because our intent with a discard request is to save memory.  So
	 * skipping this logical block is appropriate here.
	 */
	if (offset) {
721
		if (n <= (PAGE_SIZE - offset))
Joonsoo Kim's avatar
Joonsoo Kim committed
722 723
			return;

724
		n -= (PAGE_SIZE - offset);
Joonsoo Kim's avatar
Joonsoo Kim committed
725 726 727 728
		index++;
	}

	while (n >= PAGE_SIZE) {
729
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kim's avatar
Joonsoo Kim committed
730
		zram_free_page(zram, index);
731
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
732
		atomic64_inc(&zram->stats.notify_free);
Joonsoo Kim's avatar
Joonsoo Kim committed
733 734 735 736 737
		index++;
		n -= PAGE_SIZE;
	}
}

738
static void zram_reset_device(struct zram *zram)
739
{
740 741 742 743
	struct zram_meta *meta;
	struct zcomp *comp;
	u64 disksize;

744
	down_write(&zram->init_lock);
745 746 747

	zram->limit_pages = 0;

748
	if (!init_done(zram)) {
749
		up_write(&zram->init_lock);
750
		return;
751
	}
752

753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
	meta = zram->meta;
	comp = zram->comp;
	disksize = zram->disksize;
	/*
	 * Refcount will go down to 0 eventually and r/w handler
	 * cannot handle further I/O so it will bail out by
	 * check zram_meta_get.
	 */
	zram_meta_put(zram);
	/*
	 * We want to free zram_meta in process context to avoid
	 * deadlock between reclaim path and any other locks.
	 */
	wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);

768 769 770
	/* Reset stats */
	memset(&zram->stats, 0, sizeof(zram->stats));
	zram->disksize = 0;
771
	zram->max_comp_streams = 1;
772 773
	set_capacity(zram->disk, 0);

774
	up_write(&zram->init_lock);
775 776 777
	/* I/O operation under all of CPU are done so let's free */
	zram_meta_free(meta, disksize);
	zcomp_destroy(comp);
778 779 780 781 782 783
}

static ssize_t disksize_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 disksize;
784
	struct zcomp *comp;
785 786
	struct zram_meta *meta;
	struct zram *zram = dev_to_zram(dev);
787
	int err;
788 789 790 791 792 793

	disksize = memparse(buf, NULL);
	if (!disksize)
		return -EINVAL;

	disksize = PAGE_ALIGN(disksize);
794
	meta = zram_meta_alloc(zram->disk->first_minor, disksize);
795 796
	if (!meta)
		return -ENOMEM;
797

798
	comp = zcomp_create(zram->compressor, zram->max_comp_streams);
799
	if (IS_ERR(comp)) {
800 801
		pr_info("Cannot initialise %s compressing backend\n",
				zram->compressor);
802 803
		err = PTR_ERR(comp);
		goto out_free_meta;
804 805
	}

806
	down_write(&zram->init_lock);
807
	if (init_done(zram)) {
808
		pr_info("Cannot change disksize for initialized device\n");
809
		err = -EBUSY;
810
		goto out_destroy_comp;
811 812
	}

813 814
	init_waitqueue_head(&zram->io_done);
	atomic_set(&zram->refcount, 1);
815
	zram->meta = meta;
816
	zram->comp = comp;
817 818 819
	zram->disksize = disksize;
	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
	up_write(&zram->init_lock);
820 821 822 823 824 825 826 827

	/*
	 * Revalidate disk out of the init_lock to avoid lockdep splat.
	 * It's okay because disk's capacity is protected by init_lock
	 * so that revalidate_disk always sees up-to-date capacity.
	 */
	revalidate_disk(zram->disk);

828
	return len;
829

830 831 832 833
out_destroy_comp:
	up_write(&zram->init_lock);
	zcomp_destroy(comp);
out_free_meta:
834
	zram_meta_free(meta, disksize);
835
	return err;
836 837 838 839 840 841 842 843 844 845 846 847 848
}

static ssize_t reset_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int ret;
	unsigned short do_reset;
	struct zram *zram;
	struct block_device *bdev;

	zram = dev_to_zram(dev);
	bdev = bdget_disk(zram->disk, 0);

849 850 851
	if (!bdev)
		return -ENOMEM;

852
	mutex_lock(&bdev->bd_mutex);
853
	/* Do not reset an active device! */
854
	if (bdev->bd_openers) {
855 856 857
		ret = -EBUSY;
		goto out;
	}
858 859 860

	ret = kstrtou16(buf, 10, &do_reset);
	if (ret)
861
		goto out;
862

863 864 865 866
	if (!do_reset) {
		ret = -EINVAL;
		goto out;
	}
867 868

	/* Make sure all pending I/O is finished */
869
	fsync_bdev(bdev);
870 871 872 873
	zram_reset_device(zram);

	mutex_unlock(&bdev->bd_mutex);
	revalidate_disk(zram->disk);
874
	bdput(bdev);
875 876

	return len;
877 878

out:
879
	mutex_unlock(&bdev->bd_mutex);
880 881
	bdput(bdev);
	return ret;
882 883
}

884
static void __zram_make_request(struct zram *zram, struct bio *bio)
885
{
886
	int offset, rw;
887
	u32 index;
888 889
	struct bio_vec bvec;
	struct bvec_iter iter;
890

891 892 893
	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
	offset = (bio->bi_iter.bi_sector &
		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
894

Joonsoo Kim's avatar
Joonsoo Kim committed
895 896 897 898 899 900
	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
		zram_bio_discard(zram, index, offset, bio);
		bio_endio(bio, 0);
		return;
	}

901
	rw = bio_data_dir(bio);
902
	bio_for_each_segment(bvec, bio, iter) {
903 904
		int max_transfer_size = PAGE_SIZE - offset;

905
		if (bvec.bv_len > max_transfer_size) {
906 907 908 909 910 911
			/*
			 * zram_bvec_rw() can only make operation on a single
			 * zram page. Split the bio vector.
			 */
			struct bio_vec bv;

912
			bv.bv_page = bvec.bv_page;
913
			bv.bv_len = max_transfer_size;
914
			bv.bv_offset = bvec.bv_offset;
915

916
			if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
917 918
				goto out;

919
			bv.bv_len = bvec.bv_len - max_transfer_size;
920
			bv.bv_offset += max_transfer_size;
921
			if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
922 923
				goto out;
		} else
924
			if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
925 926
				goto out;

927
		update_position(&index, &offset, &bvec);
928