zram_drv.c 22.3 KB
Newer Older
1
/*
2
 * Compressed RAM block device
3
 *
4
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
5 6 7 8 9 10 11 12 13
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 *
 */

14
#define KMSG_COMPONENT "zram"
15 16
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

17 18 19 20
#ifdef CONFIG_ZRAM_DEBUG
#define DEBUG
#endif

21 22
#include <linux/module.h>
#include <linux/kernel.h>
23
#include <linux/bio.h>
24 25 26 27 28 29
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/highmem.h>
30
#include <linux/slab.h>
31 32 33 34
#include <linux/lzo.h>
#include <linux/string.h>
#include <linux/vmalloc.h>

35
#include "zram_drv.h"
36 37

/* Globals */
38
static int zram_major;
39
static struct zram *zram_devices;
40 41

/* Module params (documentation at end) */
42
static unsigned int num_devices = 1;
43

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
static inline struct zram *dev_to_zram(struct device *dev)
{
	return (struct zram *)dev_to_disk(dev)->private_data;
}

static ssize_t disksize_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n", zram->disksize);
}

static ssize_t initstate_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%u\n", zram->init_done);
}

static ssize_t num_reads_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.num_reads));
}

static ssize_t num_writes_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.num_writes));
}

static ssize_t invalid_io_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.invalid_io));
}

static ssize_t notify_free_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.notify_free));
}

static ssize_t zero_pages_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%u\n", zram->stats.pages_zero);
}

static ssize_t orig_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
		(u64)(zram->stats.pages_stored) << PAGE_SHIFT);
}

static ssize_t compr_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.compr_size));
}

static ssize_t mem_used_total_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);
	struct zram_meta *meta = zram->meta;

	down_read(&zram->init_lock);
	if (zram->init_done)
		val = zs_get_total_size_bytes(meta->mem_pool);
	up_read(&zram->init_lock);

	return sprintf(buf, "%llu\n", val);
}

Minchan Kim's avatar
Minchan Kim committed
142
static int zram_test_flag(struct zram_meta *meta, u32 index,
143
			enum zram_pageflags flag)
144
{
Minchan Kim's avatar
Minchan Kim committed
145
	return meta->table[index].flags & BIT(flag);
146 147
}

Minchan Kim's avatar
Minchan Kim committed
148
static void zram_set_flag(struct zram_meta *meta, u32 index,
149
			enum zram_pageflags flag)
150
{
Minchan Kim's avatar
Minchan Kim committed
151
	meta->table[index].flags |= BIT(flag);
152 153
}

Minchan Kim's avatar
Minchan Kim committed
154
static void zram_clear_flag(struct zram_meta *meta, u32 index,
155
			enum zram_pageflags flag)
156
{
Minchan Kim's avatar
Minchan Kim committed
157
	meta->table[index].flags &= ~BIT(flag);
158 159
}

160 161 162 163 164 165 166 167 168 169 170
static inline int is_partial_io(struct bio_vec *bvec)
{
	return bvec->bv_len != PAGE_SIZE;
}

/*
 * Check if request is within bounds and aligned on zram logical blocks.
 */
static inline int valid_io_request(struct zram *zram, struct bio *bio)
{
	u64 start, end, bound;
171

172
	/* unaligned request */
173 174
	if (unlikely(bio->bi_iter.bi_sector &
		     (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
175
		return 0;
176
	if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
177 178
		return 0;

179 180
	start = bio->bi_iter.bi_sector;
	end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
181 182
	bound = zram->disksize >> SECTOR_SHIFT;
	/* out of range range */
183
	if (unlikely(start >= bound || end > bound || start > end))
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
		return 0;

	/* I/O request is valid */
	return 1;
}

static void zram_meta_free(struct zram_meta *meta)
{
	zs_destroy_pool(meta->mem_pool);
	kfree(meta->compress_workmem);
	free_pages((unsigned long)meta->compress_buffer, 1);
	vfree(meta->table);
	kfree(meta);
}

static struct zram_meta *zram_meta_alloc(u64 disksize)
{
	size_t num_pages;
	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
	if (!meta)
		goto out;

	meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
	if (!meta->compress_workmem)
		goto free_meta;

	meta->compress_buffer =
		(void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
	if (!meta->compress_buffer) {
		pr_err("Error allocating compressor buffer space\n");
		goto free_workmem;
	}

	num_pages = disksize >> PAGE_SHIFT;
	meta->table = vzalloc(num_pages * sizeof(*meta->table));
	if (!meta->table) {
		pr_err("Error allocating zram address table\n");
		goto free_buffer;
	}

	meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
	if (!meta->mem_pool) {
		pr_err("Error creating memory pool\n");
		goto free_table;
	}

	return meta;

free_table:
	vfree(meta->table);
free_buffer:
	free_pages((unsigned long)meta->compress_buffer, 1);
free_workmem:
	kfree(meta->compress_workmem);
free_meta:
	kfree(meta);
	meta = NULL;
out:
	return meta;
}

static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
{
	if (*offset + bvec->bv_len >= PAGE_SIZE)
		(*index)++;
	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
static int page_zero_filled(void *ptr)
{
	unsigned int pos;
	unsigned long *page;

	page = (unsigned long *)ptr;

	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
		if (page[pos])
			return 0;
	}

	return 1;
}

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
static void handle_zero_page(struct bio_vec *bvec)
{
	struct page *page = bvec->bv_page;
	void *user_mem;

	user_mem = kmap_atomic(page);
	if (is_partial_io(bvec))
		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
	else
		clear_page(user_mem);
	kunmap_atomic(user_mem);

	flush_dcache_page(page);
}

282
static void zram_free_page(struct zram *zram, size_t index)
283
{
Minchan Kim's avatar
Minchan Kim committed
284 285 286
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;
	u16 size = meta->table[index].size;
287

288
	if (unlikely(!handle)) {
289 290 291 292
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
Minchan Kim's avatar
Minchan Kim committed
293 294
		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
			zram_clear_flag(meta, index, ZRAM_ZERO);
295
			zram->stats.pages_zero--;
296 297 298 299
		}
		return;
	}

300
	if (unlikely(size > max_zpage_size))
301
		zram->stats.bad_compress--;
302

Minchan Kim's avatar
Minchan Kim committed
303
	zs_free(meta->mem_pool, handle);
304

305
	if (size <= PAGE_SIZE / 2)
306
		zram->stats.good_compress--;
307

308
	atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
309
	zram->stats.pages_stored--;
310

Minchan Kim's avatar
Minchan Kim committed
311 312
	meta->table[index].handle = 0;
	meta->table[index].size = 0;
313 314
}

315
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
316
{
317 318 319
	int ret = LZO_E_OK;
	size_t clen = PAGE_SIZE;
	unsigned char *cmem;
Minchan Kim's avatar
Minchan Kim committed
320 321
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;
322

Minchan Kim's avatar
Minchan Kim committed
323
	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
324
		clear_page(mem);
325 326
		return 0;
	}
327

Minchan Kim's avatar
Minchan Kim committed
328 329
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
	if (meta->table[index].size == PAGE_SIZE)
330
		copy_page(mem, cmem);
331
	else
Minchan Kim's avatar
Minchan Kim committed
332
		ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
333
						mem, &clen);
Minchan Kim's avatar
Minchan Kim committed
334
	zs_unmap_object(meta->mem_pool, handle);
335

336 337 338
	/* Should NEVER happen. Return bio error if it does. */
	if (unlikely(ret != LZO_E_OK)) {
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
339
		atomic64_inc(&zram->stats.failed_reads);
340
		return ret;
341
	}
342

343
	return 0;
344 345
}

346 347
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
			  u32 index, int offset, struct bio *bio)
348 349
{
	int ret;
350 351
	struct page *page;
	unsigned char *user_mem, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
352
	struct zram_meta *meta = zram->meta;
353 354
	page = bvec->bv_page;

Minchan Kim's avatar
Minchan Kim committed
355 356
	if (unlikely(!meta->table[index].handle) ||
			zram_test_flag(meta, index, ZRAM_ZERO)) {
357
		handle_zero_page(bvec);
358 359 360
		return 0;
	}

361 362
	if (is_partial_io(bvec))
		/* Use  a temporary buffer to decompress the page */
363 364 365 366
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);

	user_mem = kmap_atomic(page);
	if (!is_partial_io(bvec))
367 368 369 370 371 372 373
		uncmem = user_mem;

	if (!uncmem) {
		pr_info("Unable to allocate temp memory\n");
		ret = -ENOMEM;
		goto out_cleanup;
	}
374

375
	ret = zram_decompress_page(zram, uncmem, index);
376
	/* Should NEVER happen. Return bio error if it does. */
377
	if (unlikely(ret != LZO_E_OK))
378
		goto out_cleanup;
379

380 381 382 383 384 385 386 387 388 389 390
	if (is_partial_io(bvec))
		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
				bvec->bv_len);

	flush_dcache_page(page);
	ret = 0;
out_cleanup:
	kunmap_atomic(user_mem);
	if (is_partial_io(bvec))
		kfree(uncmem);
	return ret;
391 392 393 394
}

static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
395
{
396
	int ret = 0;
397
	size_t clen;
398
	unsigned long handle;
399
	struct page *page;
400
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
401
	struct zram_meta *meta = zram->meta;
402

403
	page = bvec->bv_page;
Minchan Kim's avatar
Minchan Kim committed
404
	src = meta->compress_buffer;
405

406 407 408 409 410
	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
411
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
412 413 414 415
		if (!uncmem) {
			ret = -ENOMEM;
			goto out;
		}
416
		ret = zram_decompress_page(zram, uncmem, index);
417
		if (ret)
418 419 420
			goto out;
	}

421
	user_mem = kmap_atomic(page);
422

423
	if (is_partial_io(bvec)) {
424 425
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
426 427 428
		kunmap_atomic(user_mem);
		user_mem = NULL;
	} else {
429
		uncmem = user_mem;
430
	}
431 432

	if (page_zero_filled(uncmem)) {
433
		kunmap_atomic(user_mem);
434 435 436
		/* Free memory associated with this sector now. */
		zram_free_page(zram, index);

437
		zram->stats.pages_zero++;
Minchan Kim's avatar
Minchan Kim committed
438
		zram_set_flag(meta, index, ZRAM_ZERO);
439 440
		ret = 0;
		goto out;
441
	}
442

443 444 445 446 447 448 449 450
	/*
	 * zram_slot_free_notify could miss free so that let's
	 * double check.
	 */
	if (unlikely(meta->table[index].handle ||
			zram_test_flag(meta, index, ZRAM_ZERO)))
		zram_free_page(zram, index);

451
	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
Minchan Kim's avatar
Minchan Kim committed
452
			       meta->compress_workmem);
453

454 455 456 457 458
	if (!is_partial_io(bvec)) {
		kunmap_atomic(user_mem);
		user_mem = NULL;
		uncmem = NULL;
	}
459

460 461
	if (unlikely(ret != LZO_E_OK)) {
		pr_err("Compression failed! err=%d\n", ret);
462
		goto out;
463
	}
464

465
	if (unlikely(clen > max_zpage_size)) {
466
		zram->stats.bad_compress++;
467
		clen = PAGE_SIZE;
468 469 470
		src = NULL;
		if (is_partial_io(bvec))
			src = uncmem;
471
	}
472

Minchan Kim's avatar
Minchan Kim committed
473
	handle = zs_malloc(meta->mem_pool, clen);
474
	if (!handle) {
475 476
		pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
			index, clen);
477 478
		ret = -ENOMEM;
		goto out;
479
	}
Minchan Kim's avatar
Minchan Kim committed
480
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
481

482
	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
483
		src = kmap_atomic(page);
484
		copy_page(cmem, src);
485
		kunmap_atomic(src);
486 487 488
	} else {
		memcpy(cmem, src, clen);
	}
489

Minchan Kim's avatar
Minchan Kim committed
490
	zs_unmap_object(meta->mem_pool, handle);
491

492 493 494 495 496 497
	/*
	 * Free memory associated with this sector
	 * before overwriting unused sectors.
	 */
	zram_free_page(zram, index);

Minchan Kim's avatar
Minchan Kim committed
498 499
	meta->table[index].handle = handle;
	meta->table[index].size = clen;
500

501
	/* Update stats */
502
	atomic64_add(clen, &zram->stats.compr_size);
503
	zram->stats.pages_stored++;
504
	if (clen <= PAGE_SIZE / 2)
505
		zram->stats.good_compress++;
506

507
out:
508 509 510
	if (is_partial_io(bvec))
		kfree(uncmem);

511
	if (ret)
512
		atomic64_inc(&zram->stats.failed_writes);
513
	return ret;
514 515
}

516 517 518 519 520 521 522 523 524 525 526 527 528 529
static void handle_pending_slot_free(struct zram *zram)
{
	struct zram_slot_free *free_rq;

	spin_lock(&zram->slot_free_lock);
	while (zram->slot_free_rq) {
		free_rq = zram->slot_free_rq;
		zram->slot_free_rq = free_rq->next;
		zram_free_page(zram, free_rq->index);
		kfree(free_rq);
	}
	spin_unlock(&zram->slot_free_lock);
}

530
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
531
			int offset, struct bio *bio, int rw)
532
{
533
	int ret;
534

535 536
	if (rw == READ) {
		down_read(&zram->lock);
537
		handle_pending_slot_free(zram);
538 539 540 541
		ret = zram_bvec_read(zram, bvec, index, offset, bio);
		up_read(&zram->lock);
	} else {
		down_write(&zram->lock);
542
		handle_pending_slot_free(zram);
543 544 545 546 547
		ret = zram_bvec_write(zram, bvec, index, offset);
		up_write(&zram->lock);
	}

	return ret;
548 549
}

Minchan Kim's avatar
Minchan Kim committed
550
static void zram_reset_device(struct zram *zram, bool reset_capacity)
551
{
552 553 554
	size_t index;
	struct zram_meta *meta;

555 556
	flush_work(&zram->free_work);

557 558 559
	down_write(&zram->init_lock);
	if (!zram->init_done) {
		up_write(&zram->init_lock);
560
		return;
561
	}
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580

	meta = zram->meta;
	zram->init_done = 0;

	/* Free all pages that are still in this zram device */
	for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
		unsigned long handle = meta->table[index].handle;
		if (!handle)
			continue;

		zs_free(meta->mem_pool, handle);
	}

	zram_meta_free(zram->meta);
	zram->meta = NULL;
	/* Reset stats */
	memset(&zram->stats, 0, sizeof(zram->stats));

	zram->disksize = 0;
Minchan Kim's avatar
Minchan Kim committed
581 582
	if (reset_capacity)
		set_capacity(zram->disk, 0);
583
	up_write(&zram->init_lock);
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650
}

static void zram_init_device(struct zram *zram, struct zram_meta *meta)
{
	if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
		pr_info(
		"There is little point creating a zram of greater than "
		"twice the size of memory since we expect a 2:1 compression "
		"ratio. Note that zram uses about 0.1%% of the size of "
		"the disk when not in use so a huge zram is "
		"wasteful.\n"
		"\tMemory Size: %lu kB\n"
		"\tSize you selected: %llu kB\n"
		"Continuing anyway ...\n",
		(totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
		);
	}

	/* zram devices sort of resembles non-rotational disks */
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);

	zram->meta = meta;
	zram->init_done = 1;

	pr_debug("Initialization done!\n");
}

static ssize_t disksize_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 disksize;
	struct zram_meta *meta;
	struct zram *zram = dev_to_zram(dev);

	disksize = memparse(buf, NULL);
	if (!disksize)
		return -EINVAL;

	disksize = PAGE_ALIGN(disksize);
	meta = zram_meta_alloc(disksize);
	down_write(&zram->init_lock);
	if (zram->init_done) {
		up_write(&zram->init_lock);
		zram_meta_free(meta);
		pr_info("Cannot change disksize for initialized device\n");
		return -EBUSY;
	}

	zram->disksize = disksize;
	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
	zram_init_device(zram, meta);
	up_write(&zram->init_lock);

	return len;
}

static ssize_t reset_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int ret;
	unsigned short do_reset;
	struct zram *zram;
	struct block_device *bdev;

	zram = dev_to_zram(dev);
	bdev = bdget_disk(zram->disk, 0);

651 652 653
	if (!bdev)
		return -ENOMEM;

654
	/* Do not reset an active device! */
655 656 657 658
	if (bdev->bd_holders) {
		ret = -EBUSY;
		goto out;
	}
659 660 661

	ret = kstrtou16(buf, 10, &do_reset);
	if (ret)
662
		goto out;
663

664 665 666 667
	if (!do_reset) {
		ret = -EINVAL;
		goto out;
	}
668 669

	/* Make sure all pending I/O is finished */
670
	fsync_bdev(bdev);
671
	bdput(bdev);
672

Minchan Kim's avatar
Minchan Kim committed
673
	zram_reset_device(zram, true);
674
	return len;
675 676 677 678

out:
	bdput(bdev);
	return ret;
679 680 681 682
}

static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
{
683
	int offset;
684
	u32 index;
685 686
	struct bio_vec bvec;
	struct bvec_iter iter;
687 688 689

	switch (rw) {
	case READ:
690
		atomic64_inc(&zram->stats.num_reads);
691 692
		break;
	case WRITE:
693
		atomic64_inc(&zram->stats.num_writes);
694 695 696
		break;
	}

697 698 699
	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
	offset = (bio->bi_iter.bi_sector &
		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
700

701
	bio_for_each_segment(bvec, bio, iter) {
702 703
		int max_transfer_size = PAGE_SIZE - offset;

704
		if (bvec.bv_len > max_transfer_size) {
705 706 707 708 709 710
			/*
			 * zram_bvec_rw() can only make operation on a single
			 * zram page. Split the bio vector.
			 */
			struct bio_vec bv;

711
			bv.bv_page = bvec.bv_page;
712
			bv.bv_len = max_transfer_size;
713
			bv.bv_offset = bvec.bv_offset;
714 715 716 717

			if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
				goto out;

718
			bv.bv_len = bvec.bv_len - max_transfer_size;
719 720 721 722
			bv.bv_offset += max_transfer_size;
			if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
				goto out;
		} else
723
			if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
724 725 726
			    < 0)
				goto out;

727
		update_position(&index, &offset, &bvec);
728
	}
729 730 731

	set_bit(BIO_UPTODATE, &bio->bi_flags);
	bio_endio(bio, 0);
732
	return;
733 734 735 736 737 738

out:
	bio_io_error(bio);
}

/*
739
 * Handler function for all zram I/O requests.
740
 */
741
static void zram_make_request(struct request_queue *queue, struct bio *bio)
742
{
743
	struct zram *zram = queue->queuedata;
744

745 746
	down_read(&zram->init_lock);
	if (unlikely(!zram->init_done))
747
		goto error;
748

749
	if (!valid_io_request(zram, bio)) {
750
		atomic64_inc(&zram->stats.invalid_io);
751
		goto error;
752 753
	}

754
	__zram_make_request(zram, bio, bio_data_dir(bio));
755
	up_read(&zram->init_lock);
756

757
	return;
758 759

error:
760
	up_read(&zram->init_lock);
761
	bio_io_error(bio);
762 763
}

764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
static void zram_slot_free(struct work_struct *work)
{
	struct zram *zram;

	zram = container_of(work, struct zram, free_work);
	down_write(&zram->lock);
	handle_pending_slot_free(zram);
	up_write(&zram->lock);
}

static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
{
	spin_lock(&zram->slot_free_lock);
	free_rq->next = zram->slot_free_rq;
	zram->slot_free_rq = free_rq;
	spin_unlock(&zram->slot_free_lock);
}

Nitin Gupta's avatar
Nitin Gupta committed
782 783
static void zram_slot_free_notify(struct block_device *bdev,
				unsigned long index)
784
{
785
	struct zram *zram;
786
	struct zram_slot_free *free_rq;
787

788
	zram = bdev->bd_disk->private_data;
789
	atomic64_inc(&zram->stats.notify_free);
790 791 792 793 794 795 796 797

	free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
	if (!free_rq)
		return;

	free_rq->index = index;
	add_slot_free(zram, free_rq);
	schedule_work(&zram->free_work);
798 799
}

800 801
static const struct block_device_operations zram_devops = {
	.swap_slot_free_notify = zram_slot_free_notify,
802
	.owner = THIS_MODULE
803 804
};

805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836
static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
		disksize_show, disksize_store);
static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);

static struct attribute *zram_disk_attrs[] = {
	&dev_attr_disksize.attr,
	&dev_attr_initstate.attr,
	&dev_attr_reset.attr,
	&dev_attr_num_reads.attr,
	&dev_attr_num_writes.attr,
	&dev_attr_invalid_io.attr,
	&dev_attr_notify_free.attr,
	&dev_attr_zero_pages.attr,
	&dev_attr_orig_data_size.attr,
	&dev_attr_compr_data_size.attr,
	&dev_attr_mem_used_total.attr,
	NULL,
};

static struct attribute_group zram_disk_attr_group = {
	.attrs = zram_disk_attrs,
};

837
static int create_device(struct zram *zram, int device_id)
838
{
839
	int ret = -ENOMEM;
840

841
	init_rwsem(&zram->lock);
842
	init_rwsem(&zram->init_lock);
843

844 845 846 847
	INIT_WORK(&zram->free_work, zram_slot_free);
	spin_lock_init(&zram->slot_free_lock);
	zram->slot_free_rq = NULL;

848 849
	zram->queue = blk_alloc_queue(GFP_KERNEL);
	if (!zram->queue) {
850 851
		pr_err("Error allocating disk queue for device %d\n",
			device_id);
852
		goto out;
853 854
	}

855 856
	blk_queue_make_request(zram->queue, zram_make_request);
	zram->queue->queuedata = zram;
857 858

	 /* gendisk structure */
859 860
	zram->disk = alloc_disk(1);
	if (!zram->disk) {
861
		pr_warn("Error allocating disk structure for device %d\n",
862
			device_id);
863
		goto out_free_queue;
864 865
	}

866 867 868 869 870 871
	zram->disk->major = zram_major;
	zram->disk->first_minor = device_id;
	zram->disk->fops = &zram_devops;
	zram->disk->queue = zram->queue;
	zram->disk->private_data = zram;
	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
872

873
	/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
874
	set_capacity(zram->disk, 0);
875

876 877 878 879
	/*
	 * To ensure that we always get PAGE_SIZE aligned
	 * and n*PAGE_SIZED sized I/O requests.
	 */
880
	blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
881 882
	blk_queue_logical_block_size(zram->disk->queue,
					ZRAM_LOGICAL_BLOCK_SIZE);
883 884
	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
885

886
	add_disk(zram->disk);
887

888 889 890
	ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
				&zram_disk_attr_group);
	if (ret < 0) {
891
		pr_warn("Error creating sysfs group");
892
		goto out_free_disk;
893 894
	}

895
	zram->init_done = 0;
896
	return 0;
897

898 899 900 901 902
out_free_disk:
	del_gendisk(zram->disk);
	put_disk(zram->disk);
out_free_queue:
	blk_cleanup_queue(zram->queue);
903 904
out:
	return ret;
905 906
}

907
static void destroy_device(struct zram *zram)
908
{
909 910 911
	sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
			&zram_disk_attr_group);

912 913
	del_gendisk(zram->disk);
	put_disk(zram->disk);
914

915
	blk_cleanup_queue(zram->queue);
916 917
}

918
static int __init zram_init(void)
919
{
920
	int ret, dev_id;
921

922
	if (num_devices > max_num_devices) {
923
		pr_warn("Invalid value for num_devices: %u\n",
924
				num_devices);
925 926
		ret = -EINVAL;
		goto out;
927 928
	}

929 930
	zram_major = register_blkdev(0, "zram");
	if (zram_major <= 0) {
931
		pr_warn("Unable to get major number\n");
932 933
		ret = -EBUSY;
		goto out;
934 935 936
	}

	/* Allocate the device array and initialize each one */
937
	zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
938
	if (!zram_devices) {
939 940 941
		ret = -ENOMEM;
		goto unregister;
	}
942

943
	for (dev_id = 0; dev_id < num_devices; dev_id++) {
944
		ret = create_device(&zram_devices[dev_id], dev_id);
945
		if (ret)
946
			goto free_devices;
947 948
	}

949 950
	pr_info("Created %u device(s) ...\n", num_devices);

951
	return 0;
952

953
free_devices:
954
	while (dev_id)
955 956
		destroy_device(&zram_devices[--dev_id]);
	kfree(zram_devices);
957
unregister:
958
	unregister_blkdev(zram_major, "zram");
959
out:
960 961 962
	return ret;
}

963
static void __exit zram_exit(void)
964 965
{
	int i;
966
	struct zram *zram;
967

968
	for (i = 0; i < num_devices; i++) {
969
		zram = &zram_devices[i];
970

971
		destroy_device(zram);
Minchan Kim's avatar
Minchan Kim committed
972 973 974 975 976
		/*
		 * Shouldn't access zram->disk after destroy_device
		 * because destroy_device already released zram->disk.
		 */
		zram_reset_device(zram, false);
977 978
	}

979
	unregister_blkdev(zram_major, "zram");
980

981
	kfree(zram_devices);
982 983 984
	pr_debug("Cleanup done!\n");
}

985 986
module_init(zram_init);
module_exit(zram_exit);
987

988 989 990
module_param(num_devices, uint, 0);
MODULE_PARM_DESC(num_devices, "Number of zram devices");

991 992
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
993
MODULE_DESCRIPTION("Compressed RAM Block Device");