zram_drv.c 25 KB
Newer Older
1
/*
2
 * Compressed RAM block device
3
 *
4
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
Minchan Kim's avatar
Minchan Kim committed
5
 *               2012, 2013 Minchan Kim
6 7 8 9 10 11 12 13 14
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 *
 */

15
#define KMSG_COMPONENT "zram"
16 17
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

18 19 20 21
#ifdef CONFIG_ZRAM_DEBUG
#define DEBUG
#endif

22 23
#include <linux/module.h>
#include <linux/kernel.h>
24
#include <linux/bio.h>
25 26 27 28 29 30
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/highmem.h>
31
#include <linux/slab.h>
32 33
#include <linux/string.h>
#include <linux/vmalloc.h>
34
#include <linux/err.h>
35

36
#include "zram_drv.h"
37 38

/* Globals */
39
static int zram_major;
40
static struct zram *zram_devices;
41
static const char *default_compressor = "lzo";
42 43

/* Module params (documentation at end) */
44
static unsigned int num_devices = 1;
45

46 47 48 49 50
#define ZRAM_ATTR_RO(name)						\
static ssize_t zram_attr_##name##_show(struct device *d,		\
				struct device_attribute *attr, char *b)	\
{									\
	struct zram *zram = dev_to_zram(d);				\
51
	return scnprintf(b, PAGE_SIZE, "%llu\n",			\
52 53 54 55 56
		(u64)atomic64_read(&zram->stats.name));			\
}									\
static struct device_attribute dev_attr_##name =			\
	__ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);

57 58 59 60 61
static inline int init_done(struct zram *zram)
{
	return zram->meta != NULL;
}

62 63 64 65 66 67 68 69 70 71
static inline struct zram *dev_to_zram(struct device *dev)
{
	return (struct zram *)dev_to_disk(dev)->private_data;
}

static ssize_t disksize_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

72
	return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
73 74 75 76 77
}

static ssize_t initstate_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
78
	u32 val;
79 80
	struct zram *zram = dev_to_zram(dev);

81 82 83
	down_read(&zram->init_lock);
	val = init_done(zram);
	up_read(&zram->init_lock);
84

85
	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
86 87 88 89 90 91 92
}

static ssize_t orig_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

93
	return scnprintf(buf, PAGE_SIZE, "%llu\n",
94
		(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
95 96 97 98 99 100 101 102 103 104
}

static ssize_t mem_used_total_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);
	struct zram_meta *meta = zram->meta;

	down_read(&zram->init_lock);
105
	if (init_done(zram))
106
		val = zs_get_total_pages(meta->mem_pool);
107 108
	up_read(&zram->init_lock);

109
	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
110 111
}

112 113 114 115 116 117 118 119 120 121
static ssize_t max_comp_streams_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	int val;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	val = zram->max_comp_streams;
	up_read(&zram->init_lock);

122
	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
123 124 125 126 127 128 129
}

static ssize_t max_comp_streams_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int num;
	struct zram *zram = dev_to_zram(dev);
Minchan Kim's avatar
Minchan Kim committed
130
	int ret;
131

Minchan Kim's avatar
Minchan Kim committed
132 133 134
	ret = kstrtoint(buf, 0, &num);
	if (ret < 0)
		return ret;
135 136
	if (num < 1)
		return -EINVAL;
Minchan Kim's avatar
Minchan Kim committed
137

138 139
	down_write(&zram->init_lock);
	if (init_done(zram)) {
Minchan Kim's avatar
Minchan Kim committed
140
		if (!zcomp_set_max_streams(zram->comp, num)) {
141
			pr_info("Cannot change max compression streams\n");
Minchan Kim's avatar
Minchan Kim committed
142 143 144
			ret = -EINVAL;
			goto out;
		}
145
	}
Minchan Kim's avatar
Minchan Kim committed
146

147
	zram->max_comp_streams = num;
Minchan Kim's avatar
Minchan Kim committed
148 149
	ret = len;
out:
150
	up_write(&zram->init_lock);
Minchan Kim's avatar
Minchan Kim committed
151
	return ret;
152 153
}

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
static ssize_t comp_algorithm_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	size_t sz;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	sz = zcomp_available_show(zram->compressor, buf);
	up_read(&zram->init_lock);

	return sz;
}

static ssize_t comp_algorithm_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	struct zram *zram = dev_to_zram(dev);
	down_write(&zram->init_lock);
	if (init_done(zram)) {
		up_write(&zram->init_lock);
		pr_info("Can't change algorithm for initialized device\n");
		return -EBUSY;
	}
	strlcpy(zram->compressor, buf, sizeof(zram->compressor));
	up_write(&zram->init_lock);
	return len;
}

Minchan Kim's avatar
Minchan Kim committed
182
/* flag operations needs meta->tb_lock */
Minchan Kim's avatar
Minchan Kim committed
183
static int zram_test_flag(struct zram_meta *meta, u32 index,
184
			enum zram_pageflags flag)
185
{
186
	return meta->table[index].value & BIT(flag);
187 188
}

Minchan Kim's avatar
Minchan Kim committed
189
static void zram_set_flag(struct zram_meta *meta, u32 index,
190
			enum zram_pageflags flag)
191
{
192
	meta->table[index].value |= BIT(flag);
193 194
}

Minchan Kim's avatar
Minchan Kim committed
195
static void zram_clear_flag(struct zram_meta *meta, u32 index,
196
			enum zram_pageflags flag)
197
{
198 199 200 201 202 203 204 205 206 207 208 209 210 211
	meta->table[index].value &= ~BIT(flag);
}

static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
{
	return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
}

static void zram_set_obj_size(struct zram_meta *meta,
					u32 index, size_t size)
{
	unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;

	meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
212 213
}

214 215 216 217 218 219 220 221 222 223 224
static inline int is_partial_io(struct bio_vec *bvec)
{
	return bvec->bv_len != PAGE_SIZE;
}

/*
 * Check if request is within bounds and aligned on zram logical blocks.
 */
static inline int valid_io_request(struct zram *zram, struct bio *bio)
{
	u64 start, end, bound;
225

226
	/* unaligned request */
227 228
	if (unlikely(bio->bi_iter.bi_sector &
		     (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
229
		return 0;
230
	if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
231 232
		return 0;

233 234
	start = bio->bi_iter.bi_sector;
	end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
235 236
	bound = zram->disksize >> SECTOR_SHIFT;
	/* out of range range */
237
	if (unlikely(start >= bound || end > bound || start > end))
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
		return 0;

	/* I/O request is valid */
	return 1;
}

static void zram_meta_free(struct zram_meta *meta)
{
	zs_destroy_pool(meta->mem_pool);
	vfree(meta->table);
	kfree(meta);
}

static struct zram_meta *zram_meta_alloc(u64 disksize)
{
	size_t num_pages;
	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
	if (!meta)
		goto out;

	num_pages = disksize >> PAGE_SHIFT;
	meta->table = vzalloc(num_pages * sizeof(*meta->table));
	if (!meta->table) {
		pr_err("Error allocating zram address table\n");
262
		goto free_meta;
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
	}

	meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
	if (!meta->mem_pool) {
		pr_err("Error creating memory pool\n");
		goto free_table;
	}

	return meta;

free_table:
	vfree(meta->table);
free_meta:
	kfree(meta);
	meta = NULL;
out:
	return meta;
}

static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
{
	if (*offset + bvec->bv_len >= PAGE_SIZE)
		(*index)++;
	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}

289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
static int page_zero_filled(void *ptr)
{
	unsigned int pos;
	unsigned long *page;

	page = (unsigned long *)ptr;

	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
		if (page[pos])
			return 0;
	}

	return 1;
}

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
static void handle_zero_page(struct bio_vec *bvec)
{
	struct page *page = bvec->bv_page;
	void *user_mem;

	user_mem = kmap_atomic(page);
	if (is_partial_io(bvec))
		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
	else
		clear_page(user_mem);
	kunmap_atomic(user_mem);

	flush_dcache_page(page);
}

319 320 321 322 323 324

/*
 * To protect concurrent access to the same index entry,
 * caller should hold this table index entry's bit_spinlock to
 * indicate this index entry is accessing.
 */
325
static void zram_free_page(struct zram *zram, size_t index)
326
{
Minchan Kim's avatar
Minchan Kim committed
327 328
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;
329

330
	if (unlikely(!handle)) {
331 332 333 334
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
Minchan Kim's avatar
Minchan Kim committed
335 336
		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
			zram_clear_flag(meta, index, ZRAM_ZERO);
337
			atomic64_dec(&zram->stats.zero_pages);
338 339 340 341
		}
		return;
	}

Minchan Kim's avatar
Minchan Kim committed
342
	zs_free(meta->mem_pool, handle);
343

344 345
	atomic64_sub(zram_get_obj_size(meta, index),
			&zram->stats.compr_data_size);
346
	atomic64_dec(&zram->stats.pages_stored);
347

Minchan Kim's avatar
Minchan Kim committed
348
	meta->table[index].handle = 0;
349
	zram_set_obj_size(meta, index, 0);
350 351
}

352
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
353
{
354
	int ret = 0;
355
	unsigned char *cmem;
Minchan Kim's avatar
Minchan Kim committed
356
	struct zram_meta *meta = zram->meta;
Minchan Kim's avatar
Minchan Kim committed
357
	unsigned long handle;
Minchan Kim's avatar
Minchan Kim committed
358
	size_t size;
Minchan Kim's avatar
Minchan Kim committed
359

360
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim's avatar
Minchan Kim committed
361
	handle = meta->table[index].handle;
362
	size = zram_get_obj_size(meta, index);
363

Minchan Kim's avatar
Minchan Kim committed
364
	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
365
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
366
		clear_page(mem);
367 368
		return 0;
	}
369

Minchan Kim's avatar
Minchan Kim committed
370
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Minchan Kim's avatar
Minchan Kim committed
371
	if (size == PAGE_SIZE)
372
		copy_page(mem, cmem);
373
	else
374
		ret = zcomp_decompress(zram->comp, cmem, size, mem);
Minchan Kim's avatar
Minchan Kim committed
375
	zs_unmap_object(meta->mem_pool, handle);
376
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
377

378
	/* Should NEVER happen. Return bio error if it does. */
379
	if (unlikely(ret)) {
380 381
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
		return ret;
382
	}
383

384
	return 0;
385 386
}

387 388
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
			  u32 index, int offset, struct bio *bio)
389 390
{
	int ret;
391 392
	struct page *page;
	unsigned char *user_mem, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
393
	struct zram_meta *meta = zram->meta;
394 395
	page = bvec->bv_page;

396
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim's avatar
Minchan Kim committed
397 398
	if (unlikely(!meta->table[index].handle) ||
			zram_test_flag(meta, index, ZRAM_ZERO)) {
399
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
400
		handle_zero_page(bvec);
401 402
		return 0;
	}
403
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
404

405 406
	if (is_partial_io(bvec))
		/* Use  a temporary buffer to decompress the page */
407 408 409 410
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);

	user_mem = kmap_atomic(page);
	if (!is_partial_io(bvec))
411 412 413 414 415 416 417
		uncmem = user_mem;

	if (!uncmem) {
		pr_info("Unable to allocate temp memory\n");
		ret = -ENOMEM;
		goto out_cleanup;
	}
418

419
	ret = zram_decompress_page(zram, uncmem, index);
420
	/* Should NEVER happen. Return bio error if it does. */
421
	if (unlikely(ret))
422
		goto out_cleanup;
423

424 425 426 427 428 429 430 431 432 433 434
	if (is_partial_io(bvec))
		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
				bvec->bv_len);

	flush_dcache_page(page);
	ret = 0;
out_cleanup:
	kunmap_atomic(user_mem);
	if (is_partial_io(bvec))
		kfree(uncmem);
	return ret;
435 436 437 438
}

static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
439
{
440
	int ret = 0;
441
	size_t clen;
442
	unsigned long handle;
443
	struct page *page;
444
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
445
	struct zram_meta *meta = zram->meta;
446
	struct zcomp_strm *zstrm;
447
	bool locked = false;
448

449
	page = bvec->bv_page;
450 451 452 453 454
	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
455
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
456 457 458 459
		if (!uncmem) {
			ret = -ENOMEM;
			goto out;
		}
460
		ret = zram_decompress_page(zram, uncmem, index);
461
		if (ret)
462 463 464
			goto out;
	}

465
	zstrm = zcomp_strm_find(zram->comp);
466
	locked = true;
467
	user_mem = kmap_atomic(page);
468

469
	if (is_partial_io(bvec)) {
470 471
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
472 473 474
		kunmap_atomic(user_mem);
		user_mem = NULL;
	} else {
475
		uncmem = user_mem;
476
	}
477 478

	if (page_zero_filled(uncmem)) {
479
		kunmap_atomic(user_mem);
480
		/* Free memory associated with this sector now. */
481
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
482
		zram_free_page(zram, index);
Minchan Kim's avatar
Minchan Kim committed
483
		zram_set_flag(meta, index, ZRAM_ZERO);
484
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
485

486
		atomic64_inc(&zram->stats.zero_pages);
487 488
		ret = 0;
		goto out;
489
	}
490

491
	ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
492 493 494 495 496
	if (!is_partial_io(bvec)) {
		kunmap_atomic(user_mem);
		user_mem = NULL;
		uncmem = NULL;
	}
497

498
	if (unlikely(ret)) {
499
		pr_err("Compression failed! err=%d\n", ret);
500
		goto out;
501
	}
502
	src = zstrm->buffer;
503 504
	if (unlikely(clen > max_zpage_size)) {
		clen = PAGE_SIZE;
505 506
		if (is_partial_io(bvec))
			src = uncmem;
507
	}
508

Minchan Kim's avatar
Minchan Kim committed
509
	handle = zs_malloc(meta->mem_pool, clen);
510
	if (!handle) {
511 512
		pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
			index, clen);
513 514
		ret = -ENOMEM;
		goto out;
515
	}
Minchan Kim's avatar
Minchan Kim committed
516
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
517

518
	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
519
		src = kmap_atomic(page);
520
		copy_page(cmem, src);
521
		kunmap_atomic(src);
522 523 524
	} else {
		memcpy(cmem, src, clen);
	}
525

526 527
	zcomp_strm_release(zram->comp, zstrm);
	locked = false;
Minchan Kim's avatar
Minchan Kim committed
528
	zs_unmap_object(meta->mem_pool, handle);
529

530 531 532 533
	/*
	 * Free memory associated with this sector
	 * before overwriting unused sectors.
	 */
534
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
535 536
	zram_free_page(zram, index);

Minchan Kim's avatar
Minchan Kim committed
537
	meta->table[index].handle = handle;
538 539
	zram_set_obj_size(meta, index, clen);
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
540

541
	/* Update stats */
542 543
	atomic64_add(clen, &zram->stats.compr_data_size);
	atomic64_inc(&zram->stats.pages_stored);
544
out:
545
	if (locked)
546
		zcomp_strm_release(zram->comp, zstrm);
547 548
	if (is_partial_io(bvec))
		kfree(uncmem);
549
	return ret;
550 551 552
}

static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
553
			int offset, struct bio *bio)
554
{
555
	int ret;
556
	int rw = bio_data_dir(bio);
557

558 559
	if (rw == READ) {
		atomic64_inc(&zram->stats.num_reads);
560
		ret = zram_bvec_read(zram, bvec, index, offset, bio);
561 562
	} else {
		atomic64_inc(&zram->stats.num_writes);
563
		ret = zram_bvec_write(zram, bvec, index, offset);
564
	}
565

566 567 568 569 570 571 572
	if (unlikely(ret)) {
		if (rw == READ)
			atomic64_inc(&zram->stats.failed_reads);
		else
			atomic64_inc(&zram->stats.failed_writes);
	}

573
	return ret;
574 575
}

Joonsoo Kim's avatar
Joonsoo Kim committed
576 577 578 579 580 581 582 583 584
/*
 * zram_bio_discard - handler on discard request
 * @index: physical block index in PAGE_SIZE units
 * @offset: byte offset within physical block
 */
static void zram_bio_discard(struct zram *zram, u32 index,
			     int offset, struct bio *bio)
{
	size_t n = bio->bi_iter.bi_size;
585
	struct zram_meta *meta = zram->meta;
Joonsoo Kim's avatar
Joonsoo Kim committed
586 587 588 589 590 591 592 593 594 595 596 597

	/*
	 * zram manages data in physical block size units. Because logical block
	 * size isn't identical with physical block size on some arch, we
	 * could get a discard request pointing to a specific offset within a
	 * certain physical block.  Although we can handle this request by
	 * reading that physiclal block and decompressing and partially zeroing
	 * and re-compressing and then re-storing it, this isn't reasonable
	 * because our intent with a discard request is to save memory.  So
	 * skipping this logical block is appropriate here.
	 */
	if (offset) {
598
		if (n <= (PAGE_SIZE - offset))
Joonsoo Kim's avatar
Joonsoo Kim committed
599 600
			return;

601
		n -= (PAGE_SIZE - offset);
Joonsoo Kim's avatar
Joonsoo Kim committed
602 603 604 605
		index++;
	}

	while (n >= PAGE_SIZE) {
606
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kim's avatar
Joonsoo Kim committed
607
		zram_free_page(zram, index);
608
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kim's avatar
Joonsoo Kim committed
609 610 611 612 613
		index++;
		n -= PAGE_SIZE;
	}
}

Minchan Kim's avatar
Minchan Kim committed
614
static void zram_reset_device(struct zram *zram, bool reset_capacity)
615
{
616 617 618
	size_t index;
	struct zram_meta *meta;

619
	down_write(&zram->init_lock);
620
	if (!init_done(zram)) {
621
		up_write(&zram->init_lock);
622
		return;
623
	}
624 625 626 627 628 629 630 631 632 633 634

	meta = zram->meta;
	/* Free all pages that are still in this zram device */
	for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
		unsigned long handle = meta->table[index].handle;
		if (!handle)
			continue;

		zs_free(meta->mem_pool, handle);
	}

635
	zcomp_destroy(zram->comp);
636 637
	zram->max_comp_streams = 1;

638 639 640 641 642 643
	zram_meta_free(zram->meta);
	zram->meta = NULL;
	/* Reset stats */
	memset(&zram->stats, 0, sizeof(zram->stats));

	zram->disksize = 0;
644
	if (reset_capacity)
Minchan Kim's avatar
Minchan Kim committed
645
		set_capacity(zram->disk, 0);
646

647
	up_write(&zram->init_lock);
648 649 650 651 652 653 654 655

	/*
	 * Revalidate disk out of the init_lock to avoid lockdep splat.
	 * It's okay because disk's capacity is protected by init_lock
	 * so that revalidate_disk always sees up-to-date capacity.
	 */
	if (reset_capacity)
		revalidate_disk(zram->disk);
656 657 658 659 660 661
}

static ssize_t disksize_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 disksize;
662
	struct zcomp *comp;
663 664
	struct zram_meta *meta;
	struct zram *zram = dev_to_zram(dev);
665
	int err;
666 667 668 669 670 671 672

	disksize = memparse(buf, NULL);
	if (!disksize)
		return -EINVAL;

	disksize = PAGE_ALIGN(disksize);
	meta = zram_meta_alloc(disksize);
673 674
	if (!meta)
		return -ENOMEM;
675

676
	comp = zcomp_create(zram->compressor, zram->max_comp_streams);
677
	if (IS_ERR(comp)) {
678 679
		pr_info("Cannot initialise %s compressing backend\n",
				zram->compressor);
680 681
		err = PTR_ERR(comp);
		goto out_free_meta;
682 683
	}

684
	down_write(&zram->init_lock);
685
	if (init_done(zram)) {
686
		pr_info("Cannot change disksize for initialized device\n");
687
		err = -EBUSY;
688
		goto out_destroy_comp;
689 690
	}

691
	zram->meta = meta;
692
	zram->comp = comp;
693 694 695
	zram->disksize = disksize;
	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
	up_write(&zram->init_lock);
696 697 698 699 700 701 702 703

	/*
	 * Revalidate disk out of the init_lock to avoid lockdep splat.
	 * It's okay because disk's capacity is protected by init_lock
	 * so that revalidate_disk always sees up-to-date capacity.
	 */
	revalidate_disk(zram->disk);

704
	return len;
705

706 707 708 709
out_destroy_comp:
	up_write(&zram->init_lock);
	zcomp_destroy(comp);
out_free_meta:
710 711
	zram_meta_free(meta);
	return err;
712 713 714 715 716 717 718 719 720 721 722 723 724
}

static ssize_t reset_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int ret;
	unsigned short do_reset;
	struct zram *zram;
	struct block_device *bdev;

	zram = dev_to_zram(dev);
	bdev = bdget_disk(zram->disk, 0);

725 726 727
	if (!bdev)
		return -ENOMEM;

728
	/* Do not reset an active device! */
729 730 731 732
	if (bdev->bd_holders) {
		ret = -EBUSY;
		goto out;
	}
733 734 735

	ret = kstrtou16(buf, 10, &do_reset);
	if (ret)
736
		goto out;
737

738 739 740 741
	if (!do_reset) {
		ret = -EINVAL;
		goto out;
	}
742 743

	/* Make sure all pending I/O is finished */
744
	fsync_bdev(bdev);
745
	bdput(bdev);
746

Minchan Kim's avatar
Minchan Kim committed
747
	zram_reset_device(zram, true);
748
	return len;
749 750 751 752

out:
	bdput(bdev);
	return ret;
753 754
}

755
static void __zram_make_request(struct zram *zram, struct bio *bio)
756
{
757
	int offset;
758
	u32 index;
759 760
	struct bio_vec bvec;
	struct bvec_iter iter;
761

762 763 764
	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
	offset = (bio->bi_iter.bi_sector &
		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
765

Joonsoo Kim's avatar
Joonsoo Kim committed
766 767 768 769 770 771
	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
		zram_bio_discard(zram, index, offset, bio);
		bio_endio(bio, 0);
		return;
	}

772
	bio_for_each_segment(bvec, bio, iter) {
773 774
		int max_transfer_size = PAGE_SIZE - offset;

775
		if (bvec.bv_len > max_transfer_size) {
776 777 778 779 780 781
			/*
			 * zram_bvec_rw() can only make operation on a single
			 * zram page. Split the bio vector.
			 */
			struct bio_vec bv;

782
			bv.bv_page = bvec.bv_page;
783
			bv.bv_len = max_transfer_size;
784
			bv.bv_offset = bvec.bv_offset;
785

786
			if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
787 788
				goto out;

789
			bv.bv_len = bvec.bv_len - max_transfer_size;
790
			bv.bv_offset += max_transfer_size;
791
			if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
792 793
				goto out;
		} else
794
			if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
795 796
				goto out;

797
		update_position(&index, &offset, &bvec);
798
	}
799 800 801

	set_bit(BIO_UPTODATE, &bio->bi_flags);
	bio_endio(bio, 0);
802
	return;
803 804 805 806 807 808

out:
	bio_io_error(bio);
}

/*
809
 * Handler function for all zram I/O requests.
810
 */
811
static void zram_make_request(struct request_queue *queue, struct bio *bio)
812
{
813
	struct zram *zram = queue->queuedata;
814

815
	down_read(&zram->init_lock);
816
	if (unlikely(!init_done(zram)))
817
		goto error;
818

819
	if (!valid_io_request(zram, bio)) {
820
		atomic64_inc(&zram->stats.invalid_io);
821
		goto error;
822 823
	}

824
	__zram_make_request(zram, bio);
825
	up_read(&zram->init_lock);
826

827
	return;
828 829

error:
830
	up_read(&zram->init_lock);
831
	bio_io_error(bio);
832 833
}

Nitin Gupta's avatar
Nitin Gupta committed
834 835
static void zram_slot_free_notify(struct block_device *bdev,
				unsigned long index)
836
{
837
	struct zram *zram;
838
	struct zram_meta *meta;
839

840
	zram = bdev->bd_disk->private_data;
841
	meta = zram->meta;
842

843
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
844
	zram_free_page(zram, index);
845
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
846
	atomic64_inc(&zram->stats.notify_free);
847 848
}

849 850
static const struct block_device_operations zram_devops = {
	.swap_slot_free_notify = zram_slot_free_notify,
851
	.owner = THIS_MODULE
852 853
};

854 855 856 857 858 859
static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
		disksize_show, disksize_store);
static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
860 861
static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
		max_comp_streams_show, max_comp_streams_store);
862 863
static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
		comp_algorithm_show, comp_algorithm_store);
864

865 866
ZRAM_ATTR_RO(num_reads);
ZRAM_ATTR_RO(num_writes);
867 868
ZRAM_ATTR_RO(failed_reads);
ZRAM_ATTR_RO(failed_writes);
869 870 871 872 873
ZRAM_ATTR_RO(invalid_io);
ZRAM_ATTR_RO(notify_free);
ZRAM_ATTR_RO(zero_pages);
ZRAM_ATTR_RO(compr_data_size);

874 875 876 877 878 879
static struct attribute *zram_disk_attrs[] = {
	&dev_attr_disksize.attr,
	&dev_attr_initstate.attr,
	&dev_attr_reset.attr,
	&dev_attr_num_reads.attr,
	&dev_attr_num_writes.attr,
880 881
	&dev_attr_failed_reads.attr,
	&dev_attr_failed_writes.attr,
882 883 884 885 886 887
	&dev_attr_invalid_io.attr,
	&dev_attr_notify_free.attr,
	&dev_attr_zero_pages.attr,
	&dev_attr_orig_data_size.attr,
	&dev_attr_compr_data_size.attr,
	&dev_attr_mem_used_total.attr,
888
	&dev_attr_max_comp_streams.attr,
889
	&dev_attr_comp_algorithm.attr,
890 891 892 893 894 895 896
	NULL,
};

static struct attribute_group zram_disk_attr_group = {
	.attrs = zram_disk_attrs,
};

897
static int create_device(struct zram *zram, int device_id)
898
{
899
	int ret = -ENOMEM;
900

901
	init_rwsem(&zram->init_lock);
902

903 904
	zram->queue = blk_alloc_queue(GFP_KERNEL);
	if (!zram->queue) {
905 906
		pr_err("Error allocating disk queue for device %d\n",
			device_id);
907
		goto out;
908 909
	}