zram_drv.c 26 KB
Newer Older
1
/*
2
 * Compressed RAM block device
3
 *
4
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
Minchan Kim's avatar
Minchan Kim committed
5
 *               2012, 2013 Minchan Kim
6 7 8 9 10 11 12 13 14
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 *
 */

15
#define KMSG_COMPONENT "zram"
16 17
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

18 19 20 21
#ifdef CONFIG_ZRAM_DEBUG
#define DEBUG
#endif

22 23
#include <linux/module.h>
#include <linux/kernel.h>
24
#include <linux/bio.h>
25 26 27 28 29 30
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/highmem.h>
31
#include <linux/slab.h>
32 33
#include <linux/string.h>
#include <linux/vmalloc.h>
34
#include <linux/err.h>
35

36
#include "zram_drv.h"
37 38

/* Globals */
39
static int zram_major;
40
static struct zram *zram_devices;
41
static const char *default_compressor = "lzo";
42 43

/* Module params (documentation at end) */
44
static unsigned int num_devices = 1;
45

46 47 48 49 50
#define ZRAM_ATTR_RO(name)						\
static ssize_t zram_attr_##name##_show(struct device *d,		\
				struct device_attribute *attr, char *b)	\
{									\
	struct zram *zram = dev_to_zram(d);				\
51
	return scnprintf(b, PAGE_SIZE, "%llu\n",			\
52 53 54 55 56
		(u64)atomic64_read(&zram->stats.name));			\
}									\
static struct device_attribute dev_attr_##name =			\
	__ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);

57 58 59 60 61
static inline int init_done(struct zram *zram)
{
	return zram->meta != NULL;
}

62 63 64 65 66 67 68 69 70 71
static inline struct zram *dev_to_zram(struct device *dev)
{
	return (struct zram *)dev_to_disk(dev)->private_data;
}

static ssize_t disksize_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

72
	return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
73 74 75 76 77
}

static ssize_t initstate_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
78
	u32 val;
79 80
	struct zram *zram = dev_to_zram(dev);

81 82 83
	down_read(&zram->init_lock);
	val = init_done(zram);
	up_read(&zram->init_lock);
84

85
	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
86 87 88 89 90 91 92
}

static ssize_t orig_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

93
	return scnprintf(buf, PAGE_SIZE, "%llu\n",
94
		(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
95 96 97 98 99 100 101 102 103 104
}

static ssize_t mem_used_total_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);
	struct zram_meta *meta = zram->meta;

	down_read(&zram->init_lock);
105
	if (init_done(zram))
106
		val = zs_get_total_pages(meta->mem_pool);
107 108
	up_read(&zram->init_lock);

109
	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
110 111
}

112 113 114 115 116 117 118 119 120 121
static ssize_t max_comp_streams_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	int val;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	val = zram->max_comp_streams;
	up_read(&zram->init_lock);

122
	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
123 124
}

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
static ssize_t mem_limit_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	val = zram->limit_pages;
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
}

static ssize_t mem_limit_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 limit;
	char *tmp;
	struct zram *zram = dev_to_zram(dev);

	limit = memparse(buf, &tmp);
	if (buf == tmp) /* no chars parsed, invalid input */
		return -EINVAL;

	down_write(&zram->init_lock);
	zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
	up_write(&zram->init_lock);

	return len;
}

156 157 158 159 160
static ssize_t max_comp_streams_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int num;
	struct zram *zram = dev_to_zram(dev);
Minchan Kim's avatar
Minchan Kim committed
161
	int ret;
162

Minchan Kim's avatar
Minchan Kim committed
163 164 165
	ret = kstrtoint(buf, 0, &num);
	if (ret < 0)
		return ret;
166 167
	if (num < 1)
		return -EINVAL;
Minchan Kim's avatar
Minchan Kim committed
168

169 170
	down_write(&zram->init_lock);
	if (init_done(zram)) {
Minchan Kim's avatar
Minchan Kim committed
171
		if (!zcomp_set_max_streams(zram->comp, num)) {
172
			pr_info("Cannot change max compression streams\n");
Minchan Kim's avatar
Minchan Kim committed
173 174 175
			ret = -EINVAL;
			goto out;
		}
176
	}
Minchan Kim's avatar
Minchan Kim committed
177

178
	zram->max_comp_streams = num;
Minchan Kim's avatar
Minchan Kim committed
179 180
	ret = len;
out:
181
	up_write(&zram->init_lock);
Minchan Kim's avatar
Minchan Kim committed
182
	return ret;
183 184
}

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
static ssize_t comp_algorithm_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	size_t sz;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	sz = zcomp_available_show(zram->compressor, buf);
	up_read(&zram->init_lock);

	return sz;
}

static ssize_t comp_algorithm_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	struct zram *zram = dev_to_zram(dev);
	down_write(&zram->init_lock);
	if (init_done(zram)) {
		up_write(&zram->init_lock);
		pr_info("Can't change algorithm for initialized device\n");
		return -EBUSY;
	}
	strlcpy(zram->compressor, buf, sizeof(zram->compressor));
	up_write(&zram->init_lock);
	return len;
}

Minchan Kim's avatar
Minchan Kim committed
213
/* flag operations needs meta->tb_lock */
Minchan Kim's avatar
Minchan Kim committed
214
static int zram_test_flag(struct zram_meta *meta, u32 index,
215
			enum zram_pageflags flag)
216
{
217
	return meta->table[index].value & BIT(flag);
218 219
}

Minchan Kim's avatar
Minchan Kim committed
220
static void zram_set_flag(struct zram_meta *meta, u32 index,
221
			enum zram_pageflags flag)
222
{
223
	meta->table[index].value |= BIT(flag);
224 225
}

Minchan Kim's avatar
Minchan Kim committed
226
static void zram_clear_flag(struct zram_meta *meta, u32 index,
227
			enum zram_pageflags flag)
228
{
229 230 231 232 233 234 235 236 237 238 239 240 241 242
	meta->table[index].value &= ~BIT(flag);
}

static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
{
	return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
}

static void zram_set_obj_size(struct zram_meta *meta,
					u32 index, size_t size)
{
	unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;

	meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
243 244
}

245 246 247 248 249 250 251 252 253 254 255
static inline int is_partial_io(struct bio_vec *bvec)
{
	return bvec->bv_len != PAGE_SIZE;
}

/*
 * Check if request is within bounds and aligned on zram logical blocks.
 */
static inline int valid_io_request(struct zram *zram, struct bio *bio)
{
	u64 start, end, bound;
256

257
	/* unaligned request */
258 259
	if (unlikely(bio->bi_iter.bi_sector &
		     (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
260
		return 0;
261
	if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
262 263
		return 0;

264 265
	start = bio->bi_iter.bi_sector;
	end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
266 267
	bound = zram->disksize >> SECTOR_SHIFT;
	/* out of range range */
268
	if (unlikely(start >= bound || end > bound || start > end))
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
		return 0;

	/* I/O request is valid */
	return 1;
}

static void zram_meta_free(struct zram_meta *meta)
{
	zs_destroy_pool(meta->mem_pool);
	vfree(meta->table);
	kfree(meta);
}

static struct zram_meta *zram_meta_alloc(u64 disksize)
{
	size_t num_pages;
	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
	if (!meta)
		goto out;

	num_pages = disksize >> PAGE_SHIFT;
	meta->table = vzalloc(num_pages * sizeof(*meta->table));
	if (!meta->table) {
		pr_err("Error allocating zram address table\n");
293
		goto free_meta;
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
	}

	meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
	if (!meta->mem_pool) {
		pr_err("Error creating memory pool\n");
		goto free_table;
	}

	return meta;

free_table:
	vfree(meta->table);
free_meta:
	kfree(meta);
	meta = NULL;
out:
	return meta;
}

static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
{
	if (*offset + bvec->bv_len >= PAGE_SIZE)
		(*index)++;
	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}

320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
static int page_zero_filled(void *ptr)
{
	unsigned int pos;
	unsigned long *page;

	page = (unsigned long *)ptr;

	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
		if (page[pos])
			return 0;
	}

	return 1;
}

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
static void handle_zero_page(struct bio_vec *bvec)
{
	struct page *page = bvec->bv_page;
	void *user_mem;

	user_mem = kmap_atomic(page);
	if (is_partial_io(bvec))
		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
	else
		clear_page(user_mem);
	kunmap_atomic(user_mem);

	flush_dcache_page(page);
}

350 351 352 353 354 355

/*
 * To protect concurrent access to the same index entry,
 * caller should hold this table index entry's bit_spinlock to
 * indicate this index entry is accessing.
 */
356
static void zram_free_page(struct zram *zram, size_t index)
357
{
Minchan Kim's avatar
Minchan Kim committed
358 359
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;
360

361
	if (unlikely(!handle)) {
362 363 364 365
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
Minchan Kim's avatar
Minchan Kim committed
366 367
		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
			zram_clear_flag(meta, index, ZRAM_ZERO);
368
			atomic64_dec(&zram->stats.zero_pages);
369 370 371 372
		}
		return;
	}

Minchan Kim's avatar
Minchan Kim committed
373
	zs_free(meta->mem_pool, handle);
374

375 376
	atomic64_sub(zram_get_obj_size(meta, index),
			&zram->stats.compr_data_size);
377
	atomic64_dec(&zram->stats.pages_stored);
378

Minchan Kim's avatar
Minchan Kim committed
379
	meta->table[index].handle = 0;
380
	zram_set_obj_size(meta, index, 0);
381 382
}

383
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
384
{
385
	int ret = 0;
386
	unsigned char *cmem;
Minchan Kim's avatar
Minchan Kim committed
387
	struct zram_meta *meta = zram->meta;
Minchan Kim's avatar
Minchan Kim committed
388
	unsigned long handle;
Minchan Kim's avatar
Minchan Kim committed
389
	size_t size;
Minchan Kim's avatar
Minchan Kim committed
390

391
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim's avatar
Minchan Kim committed
392
	handle = meta->table[index].handle;
393
	size = zram_get_obj_size(meta, index);
394

Minchan Kim's avatar
Minchan Kim committed
395
	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
396
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
397
		clear_page(mem);
398 399
		return 0;
	}
400

Minchan Kim's avatar
Minchan Kim committed
401
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Minchan Kim's avatar
Minchan Kim committed
402
	if (size == PAGE_SIZE)
403
		copy_page(mem, cmem);
404
	else
405
		ret = zcomp_decompress(zram->comp, cmem, size, mem);
Minchan Kim's avatar
Minchan Kim committed
406
	zs_unmap_object(meta->mem_pool, handle);
407
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
408

409
	/* Should NEVER happen. Return bio error if it does. */
410
	if (unlikely(ret)) {
411 412
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
		return ret;
413
	}
414

415
	return 0;
416 417
}

418 419
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
			  u32 index, int offset, struct bio *bio)
420 421
{
	int ret;
422 423
	struct page *page;
	unsigned char *user_mem, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
424
	struct zram_meta *meta = zram->meta;
425 426
	page = bvec->bv_page;

427
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim's avatar
Minchan Kim committed
428 429
	if (unlikely(!meta->table[index].handle) ||
			zram_test_flag(meta, index, ZRAM_ZERO)) {
430
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
431
		handle_zero_page(bvec);
432 433
		return 0;
	}
434
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
435

436 437
	if (is_partial_io(bvec))
		/* Use  a temporary buffer to decompress the page */
438 439 440 441
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);

	user_mem = kmap_atomic(page);
	if (!is_partial_io(bvec))
442 443 444 445 446 447 448
		uncmem = user_mem;

	if (!uncmem) {
		pr_info("Unable to allocate temp memory\n");
		ret = -ENOMEM;
		goto out_cleanup;
	}
449

450
	ret = zram_decompress_page(zram, uncmem, index);
451
	/* Should NEVER happen. Return bio error if it does. */
452
	if (unlikely(ret))
453
		goto out_cleanup;
454

455 456 457 458 459 460 461 462 463 464 465
	if (is_partial_io(bvec))
		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
				bvec->bv_len);

	flush_dcache_page(page);
	ret = 0;
out_cleanup:
	kunmap_atomic(user_mem);
	if (is_partial_io(bvec))
		kfree(uncmem);
	return ret;
466 467 468 469
}

static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
470
{
471
	int ret = 0;
472
	size_t clen;
473
	unsigned long handle;
474
	struct page *page;
475
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
476
	struct zram_meta *meta = zram->meta;
477
	struct zcomp_strm *zstrm;
478
	bool locked = false;
479

480
	page = bvec->bv_page;
481 482 483 484 485
	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
486
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
487 488 489 490
		if (!uncmem) {
			ret = -ENOMEM;
			goto out;
		}
491
		ret = zram_decompress_page(zram, uncmem, index);
492
		if (ret)
493 494 495
			goto out;
	}

496
	zstrm = zcomp_strm_find(zram->comp);
497
	locked = true;
498
	user_mem = kmap_atomic(page);
499

500
	if (is_partial_io(bvec)) {
501 502
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
503 504 505
		kunmap_atomic(user_mem);
		user_mem = NULL;
	} else {
506
		uncmem = user_mem;
507
	}
508 509

	if (page_zero_filled(uncmem)) {
510
		kunmap_atomic(user_mem);
511
		/* Free memory associated with this sector now. */
512
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
513
		zram_free_page(zram, index);
Minchan Kim's avatar
Minchan Kim committed
514
		zram_set_flag(meta, index, ZRAM_ZERO);
515
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
516

517
		atomic64_inc(&zram->stats.zero_pages);
518 519
		ret = 0;
		goto out;
520
	}
521

522
	ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
523 524 525 526 527
	if (!is_partial_io(bvec)) {
		kunmap_atomic(user_mem);
		user_mem = NULL;
		uncmem = NULL;
	}
528

529
	if (unlikely(ret)) {
530
		pr_err("Compression failed! err=%d\n", ret);
531
		goto out;
532
	}
533
	src = zstrm->buffer;
534 535
	if (unlikely(clen > max_zpage_size)) {
		clen = PAGE_SIZE;
536 537
		if (is_partial_io(bvec))
			src = uncmem;
538
	}
539

Minchan Kim's avatar
Minchan Kim committed
540
	handle = zs_malloc(meta->mem_pool, clen);
541
	if (!handle) {
542 543
		pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
			index, clen);
544 545
		ret = -ENOMEM;
		goto out;
546
	}
547 548 549 550 551 552 553 554

	if (zram->limit_pages &&
		zs_get_total_pages(meta->mem_pool) > zram->limit_pages) {
		zs_free(meta->mem_pool, handle);
		ret = -ENOMEM;
		goto out;
	}

Minchan Kim's avatar
Minchan Kim committed
555
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
556

557
	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
558
		src = kmap_atomic(page);
559
		copy_page(cmem, src);
560
		kunmap_atomic(src);
561 562 563
	} else {
		memcpy(cmem, src, clen);
	}
564

565 566
	zcomp_strm_release(zram->comp, zstrm);
	locked = false;
Minchan Kim's avatar
Minchan Kim committed
567
	zs_unmap_object(meta->mem_pool, handle);
568

569 570 571 572
	/*
	 * Free memory associated with this sector
	 * before overwriting unused sectors.
	 */
573
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
574 575
	zram_free_page(zram, index);

Minchan Kim's avatar
Minchan Kim committed
576
	meta->table[index].handle = handle;
577 578
	zram_set_obj_size(meta, index, clen);
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
579

580
	/* Update stats */
581 582
	atomic64_add(clen, &zram->stats.compr_data_size);
	atomic64_inc(&zram->stats.pages_stored);
583
out:
584
	if (locked)
585
		zcomp_strm_release(zram->comp, zstrm);
586 587
	if (is_partial_io(bvec))
		kfree(uncmem);
588
	return ret;
589 590 591
}

static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
592
			int offset, struct bio *bio)
593
{
594
	int ret;
595
	int rw = bio_data_dir(bio);
596

597 598
	if (rw == READ) {
		atomic64_inc(&zram->stats.num_reads);
599
		ret = zram_bvec_read(zram, bvec, index, offset, bio);
600 601
	} else {
		atomic64_inc(&zram->stats.num_writes);
602
		ret = zram_bvec_write(zram, bvec, index, offset);
603
	}
604

605 606 607 608 609 610 611
	if (unlikely(ret)) {
		if (rw == READ)
			atomic64_inc(&zram->stats.failed_reads);
		else
			atomic64_inc(&zram->stats.failed_writes);
	}

612
	return ret;
613 614
}

Joonsoo Kim's avatar
Joonsoo Kim committed
615 616 617 618 619 620 621 622 623
/*
 * zram_bio_discard - handler on discard request
 * @index: physical block index in PAGE_SIZE units
 * @offset: byte offset within physical block
 */
static void zram_bio_discard(struct zram *zram, u32 index,
			     int offset, struct bio *bio)
{
	size_t n = bio->bi_iter.bi_size;
624
	struct zram_meta *meta = zram->meta;
Joonsoo Kim's avatar
Joonsoo Kim committed
625 626 627 628 629 630 631 632 633 634 635 636

	/*
	 * zram manages data in physical block size units. Because logical block
	 * size isn't identical with physical block size on some arch, we
	 * could get a discard request pointing to a specific offset within a
	 * certain physical block.  Although we can handle this request by
	 * reading that physiclal block and decompressing and partially zeroing
	 * and re-compressing and then re-storing it, this isn't reasonable
	 * because our intent with a discard request is to save memory.  So
	 * skipping this logical block is appropriate here.
	 */
	if (offset) {
637
		if (n <= (PAGE_SIZE - offset))
Joonsoo Kim's avatar
Joonsoo Kim committed
638 639
			return;

640
		n -= (PAGE_SIZE - offset);
Joonsoo Kim's avatar
Joonsoo Kim committed
641 642 643 644
		index++;
	}

	while (n >= PAGE_SIZE) {
645
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kim's avatar
Joonsoo Kim committed
646
		zram_free_page(zram, index);
647
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kim's avatar
Joonsoo Kim committed
648 649 650 651 652
		index++;
		n -= PAGE_SIZE;
	}
}

Minchan Kim's avatar
Minchan Kim committed
653
static void zram_reset_device(struct zram *zram, bool reset_capacity)
654
{
655 656 657
	size_t index;
	struct zram_meta *meta;

658
	down_write(&zram->init_lock);
659 660 661

	zram->limit_pages = 0;

662
	if (!init_done(zram)) {
663
		up_write(&zram->init_lock);
664
		return;
665
	}
666 667 668 669 670 671 672 673 674 675 676

	meta = zram->meta;
	/* Free all pages that are still in this zram device */
	for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
		unsigned long handle = meta->table[index].handle;
		if (!handle)
			continue;

		zs_free(meta->mem_pool, handle);
	}

677
	zcomp_destroy(zram->comp);
678 679
	zram->max_comp_streams = 1;

680 681 682 683 684 685
	zram_meta_free(zram->meta);
	zram->meta = NULL;
	/* Reset stats */
	memset(&zram->stats, 0, sizeof(zram->stats));

	zram->disksize = 0;
686
	if (reset_capacity)
Minchan Kim's avatar
Minchan Kim committed
687
		set_capacity(zram->disk, 0);
688

689
	up_write(&zram->init_lock);
690 691 692 693 694 695 696 697

	/*
	 * Revalidate disk out of the init_lock to avoid lockdep splat.
	 * It's okay because disk's capacity is protected by init_lock
	 * so that revalidate_disk always sees up-to-date capacity.
	 */
	if (reset_capacity)
		revalidate_disk(zram->disk);
698 699 700 701 702 703
}

static ssize_t disksize_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 disksize;
704
	struct zcomp *comp;
705 706
	struct zram_meta *meta;
	struct zram *zram = dev_to_zram(dev);
707
	int err;
708 709 710 711 712 713 714

	disksize = memparse(buf, NULL);
	if (!disksize)
		return -EINVAL;

	disksize = PAGE_ALIGN(disksize);
	meta = zram_meta_alloc(disksize);
715 716
	if (!meta)
		return -ENOMEM;
717

718
	comp = zcomp_create(zram->compressor, zram->max_comp_streams);
719
	if (IS_ERR(comp)) {
720 721
		pr_info("Cannot initialise %s compressing backend\n",
				zram->compressor);
722 723
		err = PTR_ERR(comp);
		goto out_free_meta;
724 725
	}

726
	down_write(&zram->init_lock);
727
	if (init_done(zram)) {
728
		pr_info("Cannot change disksize for initialized device\n");
729
		err = -EBUSY;
730
		goto out_destroy_comp;
731 732
	}

733
	zram->meta = meta;
734
	zram->comp = comp;
735 736 737
	zram->disksize = disksize;
	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
	up_write(&zram->init_lock);
738 739 740 741 742 743 744 745

	/*
	 * Revalidate disk out of the init_lock to avoid lockdep splat.
	 * It's okay because disk's capacity is protected by init_lock
	 * so that revalidate_disk always sees up-to-date capacity.
	 */
	revalidate_disk(zram->disk);

746
	return len;
747

748 749 750 751
out_destroy_comp:
	up_write(&zram->init_lock);
	zcomp_destroy(comp);
out_free_meta:
752 753
	zram_meta_free(meta);
	return err;
754 755 756 757 758 759 760 761 762 763 764 765 766
}

static ssize_t reset_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int ret;
	unsigned short do_reset;
	struct zram *zram;
	struct block_device *bdev;

	zram = dev_to_zram(dev);
	bdev = bdget_disk(zram->disk, 0);

767 768 769
	if (!bdev)
		return -ENOMEM;

770
	/* Do not reset an active device! */
771 772 773 774
	if (bdev->bd_holders) {
		ret = -EBUSY;
		goto out;
	}
775 776 777

	ret = kstrtou16(buf, 10, &do_reset);
	if (ret)
778
		goto out;
779

780 781 782 783
	if (!do_reset) {
		ret = -EINVAL;
		goto out;
	}
784 785

	/* Make sure all pending I/O is finished */
786
	fsync_bdev(bdev);
787
	bdput(bdev);
788

Minchan Kim's avatar
Minchan Kim committed
789
	zram_reset_device(zram, true);
790
	return len;
791 792 793 794

out:
	bdput(bdev);
	return ret;
795 796
}

797
static void __zram_make_request(struct zram *zram, struct bio *bio)
798
{
799
	int offset;
800
	u32 index;
801 802
	struct bio_vec bvec;
	struct bvec_iter iter;
803

804 805 806
	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
	offset = (bio->bi_iter.bi_sector &
		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
807

Joonsoo Kim's avatar
Joonsoo Kim committed
808 809 810 811 812 813
	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
		zram_bio_discard(zram, index, offset, bio);
		bio_endio(bio, 0);
		return;
	}

814
	bio_for_each_segment(bvec, bio, iter) {
815 816
		int max_transfer_size = PAGE_SIZE - offset;

817
		if (bvec.bv_len > max_transfer_size) {
818 819 820 821 822 823
			/*
			 * zram_bvec_rw() can only make operation on a single
			 * zram page. Split the bio vector.
			 */
			struct bio_vec bv;

824
			bv.bv_page = bvec.bv_page;
825
			bv.bv_len = max_transfer_size;
826
			bv.bv_offset = bvec.bv_offset;
827

828
			if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
829 830
				goto out;

831
			bv.bv_len = bvec.bv_len - max_transfer_size;
832
			bv.bv_offset += max_transfer_size;
833
			if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
834 835
				goto out;
		} else
836
			if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
837 838
				goto out;

839
		update_position(&index, &offset, &bvec);
840
	}
841 842 843

	set_bit(BIO_UPTODATE, &bio->bi_flags);
	bio_endio(bio, 0);
844
	return;
845 846 847 848 849 850

out:
	bio_io_error(bio);
}

/*
851
 * Handler function for all zram I/O requests.
852
 */
853
static void zram_make_request(struct request_queue *queue, struct bio *bio)
854
{
855
	struct zram *zram = queue->queuedata;
856

857
	down_read(&zram->init_lock);
858
	if (unlikely(!init_done(zram)))
859
		goto error;
860

861
	if (!valid_io_request(zram, bio)) {
862
		atomic64_inc(&zram->stats.invalid_io);
863
		goto error;
864 865
	}

866
	__zram_make_request(zram, bio);
867
	up_read(&zram->init_lock);
868

869
	return;
870 871

error:
872
	up_read(&zram->init_lock);
873
	bio_io_error(bio);
874 875
}

Nitin Gupta's avatar
Nitin Gupta committed
876 877
static void zram_slot_free_notify(struct block_device *bdev,
				unsigned long index)
878
{
879
	struct zram *zram;
880
	struct zram_meta *meta;
881

882
	zram = bdev->bd_disk->private_data;
883
	meta = zram->meta;
884

885
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
886
	zram_free_page(zram, index);
887
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
888
	atomic64_inc(&zram->stats.notify_free);
889 890
}

891 892
static const struct block_device_operations zram_devops = {
	.swap_slot_free_notify = zram_slot_free_notify,
893
	.owner = THIS_MODULE
894 895
};

896 897 898 899 900 901
static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
		disksize_show, disksize_store);
static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
902 903
static DEVICE_ATTR(mem_limit, S_IRUGO | S_IWUSR, mem_limit_show,
		mem_limit_store);
904 905
static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
		max_comp_streams_show, max_comp_streams_store);
906 907
static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
		comp_algorithm_show, comp_algorithm_store);
908

909 910
ZRAM_ATTR_RO(num_reads);
ZRAM_ATTR_RO(num_writes);
911 912
ZRAM_ATTR_RO(failed_reads);
ZRAM_ATTR_RO(failed_writes);
913 914 915 916 917
ZRAM_ATTR_RO(invalid_io);
ZRAM_ATTR_RO(notify_free);
ZRAM_ATTR_RO(zero_pages);
ZRAM_ATTR_RO(compr_data_size);

918 919 920 921 922 923
static struct attribute *zram_disk_attrs[] = {
	&dev_attr_disksize.attr,
	&dev_attr_initstate.attr,
	&dev_attr_reset.attr,
	&dev_attr_num_reads.attr,
	&dev_attr_num_writes.attr,
924 925
	&dev_attr_failed_reads.attr,
	&dev_attr_failed_writes.attr,
926 927 928 929 930 931
	&dev_attr_invalid_io.attr,
	&dev_attr_notify_free.attr,
	&dev_attr_zero_pages.attr,
	&dev_attr_orig_data_size.attr,
	&dev_attr_compr_data_size.attr,
	&dev_attr_mem_used_total.attr,
932
	&dev_attr_mem_limit.attr,