zram_drv.c 22.2 KB
Newer Older
1
/*
2
 * Compressed RAM block device
3
 *
4
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
Minchan Kim's avatar
Minchan Kim committed
5
 *               2012, 2013 Minchan Kim
6 7 8 9 10 11 12 13 14
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 *
 */

15
#define KMSG_COMPONENT "zram"
16 17
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

18 19 20 21
#ifdef CONFIG_ZRAM_DEBUG
#define DEBUG
#endif

22 23
#include <linux/module.h>
#include <linux/kernel.h>
24
#include <linux/bio.h>
25 26 27 28 29 30
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/highmem.h>
31
#include <linux/slab.h>
32 33 34 35
#include <linux/lzo.h>
#include <linux/string.h>
#include <linux/vmalloc.h>

36
#include "zram_drv.h"
37 38

/* Globals */
39
static int zram_major;
40
static struct zram *zram_devices;
41 42

/* Module params (documentation at end) */
43
static unsigned int num_devices = 1;
44

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
static inline struct zram *dev_to_zram(struct device *dev)
{
	return (struct zram *)dev_to_disk(dev)->private_data;
}

static ssize_t disksize_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n", zram->disksize);
}

static ssize_t initstate_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%u\n", zram->init_done);
}

static ssize_t num_reads_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.num_reads));
}

static ssize_t num_writes_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.num_writes));
}

static ssize_t invalid_io_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.invalid_io));
}

static ssize_t notify_free_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.notify_free));
}

static ssize_t zero_pages_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

107
	return sprintf(buf, "%u\n", atomic_read(&zram->stats.pages_zero));
108 109 110 111 112 113 114 115
}

static ssize_t orig_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
116
		(u64)(atomic_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
}

static ssize_t compr_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.compr_size));
}

static ssize_t mem_used_total_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);
	struct zram_meta *meta = zram->meta;

	down_read(&zram->init_lock);
	if (zram->init_done)
		val = zs_get_total_size_bytes(meta->mem_pool);
	up_read(&zram->init_lock);

	return sprintf(buf, "%llu\n", val);
}

Minchan Kim's avatar
Minchan Kim committed
143
static int zram_test_flag(struct zram_meta *meta, u32 index,
144
			enum zram_pageflags flag)
145
{
Minchan Kim's avatar
Minchan Kim committed
146
	return meta->table[index].flags & BIT(flag);
147 148
}

Minchan Kim's avatar
Minchan Kim committed
149
static void zram_set_flag(struct zram_meta *meta, u32 index,
150
			enum zram_pageflags flag)
151
{
Minchan Kim's avatar
Minchan Kim committed
152
	meta->table[index].flags |= BIT(flag);
153 154
}

Minchan Kim's avatar
Minchan Kim committed
155
static void zram_clear_flag(struct zram_meta *meta, u32 index,
156
			enum zram_pageflags flag)
157
{
Minchan Kim's avatar
Minchan Kim committed
158
	meta->table[index].flags &= ~BIT(flag);
159 160
}

161 162 163 164 165 166 167 168 169 170 171
static inline int is_partial_io(struct bio_vec *bvec)
{
	return bvec->bv_len != PAGE_SIZE;
}

/*
 * Check if request is within bounds and aligned on zram logical blocks.
 */
static inline int valid_io_request(struct zram *zram, struct bio *bio)
{
	u64 start, end, bound;
172

173
	/* unaligned request */
174 175
	if (unlikely(bio->bi_iter.bi_sector &
		     (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
176
		return 0;
177
	if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
178 179
		return 0;

180 181
	start = bio->bi_iter.bi_sector;
	end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
182 183
	bound = zram->disksize >> SECTOR_SHIFT;
	/* out of range range */
184
	if (unlikely(start >= bound || end > bound || start > end))
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
		return 0;

	/* I/O request is valid */
	return 1;
}

static void zram_meta_free(struct zram_meta *meta)
{
	zs_destroy_pool(meta->mem_pool);
	kfree(meta->compress_workmem);
	free_pages((unsigned long)meta->compress_buffer, 1);
	vfree(meta->table);
	kfree(meta);
}

static struct zram_meta *zram_meta_alloc(u64 disksize)
{
	size_t num_pages;
	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
	if (!meta)
		goto out;

	meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
	if (!meta->compress_workmem)
		goto free_meta;

	meta->compress_buffer =
		(void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
	if (!meta->compress_buffer) {
		pr_err("Error allocating compressor buffer space\n");
		goto free_workmem;
	}

	num_pages = disksize >> PAGE_SHIFT;
	meta->table = vzalloc(num_pages * sizeof(*meta->table));
	if (!meta->table) {
		pr_err("Error allocating zram address table\n");
		goto free_buffer;
	}

	meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
	if (!meta->mem_pool) {
		pr_err("Error creating memory pool\n");
		goto free_table;
	}

	return meta;

free_table:
	vfree(meta->table);
free_buffer:
	free_pages((unsigned long)meta->compress_buffer, 1);
free_workmem:
	kfree(meta->compress_workmem);
free_meta:
	kfree(meta);
	meta = NULL;
out:
	return meta;
}

static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
{
	if (*offset + bvec->bv_len >= PAGE_SIZE)
		(*index)++;
	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}

253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
static int page_zero_filled(void *ptr)
{
	unsigned int pos;
	unsigned long *page;

	page = (unsigned long *)ptr;

	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
		if (page[pos])
			return 0;
	}

	return 1;
}

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
static void handle_zero_page(struct bio_vec *bvec)
{
	struct page *page = bvec->bv_page;
	void *user_mem;

	user_mem = kmap_atomic(page);
	if (is_partial_io(bvec))
		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
	else
		clear_page(user_mem);
	kunmap_atomic(user_mem);

	flush_dcache_page(page);
}

283
static void zram_free_page(struct zram *zram, size_t index)
284
{
Minchan Kim's avatar
Minchan Kim committed
285 286 287
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;
	u16 size = meta->table[index].size;
288

289
	if (unlikely(!handle)) {
290 291 292 293
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
Minchan Kim's avatar
Minchan Kim committed
294 295
		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
			zram_clear_flag(meta, index, ZRAM_ZERO);
296
			atomic_dec(&zram->stats.pages_zero);
297 298 299 300
		}
		return;
	}

301
	if (unlikely(size > max_zpage_size))
302
		atomic_dec(&zram->stats.bad_compress);
303

Minchan Kim's avatar
Minchan Kim committed
304
	zs_free(meta->mem_pool, handle);
305

306
	if (size <= PAGE_SIZE / 2)
307
		atomic_dec(&zram->stats.good_compress);
308

309
	atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
310
	atomic_dec(&zram->stats.pages_stored);
311

Minchan Kim's avatar
Minchan Kim committed
312 313
	meta->table[index].handle = 0;
	meta->table[index].size = 0;
314 315
}

316
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
317
{
318 319 320
	int ret = LZO_E_OK;
	size_t clen = PAGE_SIZE;
	unsigned char *cmem;
Minchan Kim's avatar
Minchan Kim committed
321 322
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;
323

Minchan Kim's avatar
Minchan Kim committed
324
	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
325
		clear_page(mem);
326 327
		return 0;
	}
328

Minchan Kim's avatar
Minchan Kim committed
329 330
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
	if (meta->table[index].size == PAGE_SIZE)
331
		copy_page(mem, cmem);
332
	else
Minchan Kim's avatar
Minchan Kim committed
333
		ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
334
						mem, &clen);
Minchan Kim's avatar
Minchan Kim committed
335
	zs_unmap_object(meta->mem_pool, handle);
336

337 338 339
	/* Should NEVER happen. Return bio error if it does. */
	if (unlikely(ret != LZO_E_OK)) {
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
340
		atomic64_inc(&zram->stats.failed_reads);
341
		return ret;
342
	}
343

344
	return 0;
345 346
}

347 348
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
			  u32 index, int offset, struct bio *bio)
349 350
{
	int ret;
351 352
	struct page *page;
	unsigned char *user_mem, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
353
	struct zram_meta *meta = zram->meta;
354 355
	page = bvec->bv_page;

Minchan Kim's avatar
Minchan Kim committed
356 357
	if (unlikely(!meta->table[index].handle) ||
			zram_test_flag(meta, index, ZRAM_ZERO)) {
358
		handle_zero_page(bvec);
359 360 361
		return 0;
	}

362 363
	if (is_partial_io(bvec))
		/* Use  a temporary buffer to decompress the page */
364 365 366 367
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);

	user_mem = kmap_atomic(page);
	if (!is_partial_io(bvec))
368 369 370 371 372 373 374
		uncmem = user_mem;

	if (!uncmem) {
		pr_info("Unable to allocate temp memory\n");
		ret = -ENOMEM;
		goto out_cleanup;
	}
375

376
	ret = zram_decompress_page(zram, uncmem, index);
377
	/* Should NEVER happen. Return bio error if it does. */
378
	if (unlikely(ret != LZO_E_OK))
379
		goto out_cleanup;
380

381 382 383 384 385 386 387 388 389 390 391
	if (is_partial_io(bvec))
		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
				bvec->bv_len);

	flush_dcache_page(page);
	ret = 0;
out_cleanup:
	kunmap_atomic(user_mem);
	if (is_partial_io(bvec))
		kfree(uncmem);
	return ret;
392 393 394 395
}

static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
396
{
397
	int ret = 0;
398
	size_t clen;
399
	unsigned long handle;
400
	struct page *page;
401
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
402
	struct zram_meta *meta = zram->meta;
403

404
	page = bvec->bv_page;
Minchan Kim's avatar
Minchan Kim committed
405
	src = meta->compress_buffer;
406

407 408 409 410 411
	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
412
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
413 414 415 416
		if (!uncmem) {
			ret = -ENOMEM;
			goto out;
		}
417
		ret = zram_decompress_page(zram, uncmem, index);
418
		if (ret)
419 420 421
			goto out;
	}

422
	user_mem = kmap_atomic(page);
423

424
	if (is_partial_io(bvec)) {
425 426
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
427 428 429
		kunmap_atomic(user_mem);
		user_mem = NULL;
	} else {
430
		uncmem = user_mem;
431
	}
432 433

	if (page_zero_filled(uncmem)) {
434
		kunmap_atomic(user_mem);
435 436 437
		/* Free memory associated with this sector now. */
		zram_free_page(zram, index);

438
		atomic_inc(&zram->stats.pages_zero);
Minchan Kim's avatar
Minchan Kim committed
439
		zram_set_flag(meta, index, ZRAM_ZERO);
440 441
		ret = 0;
		goto out;
442
	}
443

444
	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
Minchan Kim's avatar
Minchan Kim committed
445
			       meta->compress_workmem);
446

447 448 449 450 451
	if (!is_partial_io(bvec)) {
		kunmap_atomic(user_mem);
		user_mem = NULL;
		uncmem = NULL;
	}
452

453 454
	if (unlikely(ret != LZO_E_OK)) {
		pr_err("Compression failed! err=%d\n", ret);
455
		goto out;
456
	}
457

458
	if (unlikely(clen > max_zpage_size)) {
459
		atomic_inc(&zram->stats.bad_compress);
460
		clen = PAGE_SIZE;
461 462 463
		src = NULL;
		if (is_partial_io(bvec))
			src = uncmem;
464
	}
465

Minchan Kim's avatar
Minchan Kim committed
466
	handle = zs_malloc(meta->mem_pool, clen);
467
	if (!handle) {
468 469
		pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
			index, clen);
470 471
		ret = -ENOMEM;
		goto out;
472
	}
Minchan Kim's avatar
Minchan Kim committed
473
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
474

475
	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
476
		src = kmap_atomic(page);
477
		copy_page(cmem, src);
478
		kunmap_atomic(src);
479 480 481
	} else {
		memcpy(cmem, src, clen);
	}
482

Minchan Kim's avatar
Minchan Kim committed
483
	zs_unmap_object(meta->mem_pool, handle);
484

485 486 487 488 489 490
	/*
	 * Free memory associated with this sector
	 * before overwriting unused sectors.
	 */
	zram_free_page(zram, index);

Minchan Kim's avatar
Minchan Kim committed
491 492
	meta->table[index].handle = handle;
	meta->table[index].size = clen;
493

494
	/* Update stats */
495
	atomic64_add(clen, &zram->stats.compr_size);
496
	atomic_inc(&zram->stats.pages_stored);
497
	if (clen <= PAGE_SIZE / 2)
498
		atomic_inc(&zram->stats.good_compress);
499

500
out:
501 502 503
	if (is_partial_io(bvec))
		kfree(uncmem);

504
	if (ret)
505
		atomic64_inc(&zram->stats.failed_writes);
506
	return ret;
507 508
}

509 510 511 512 513 514 515 516 517 518 519 520 521 522
static void handle_pending_slot_free(struct zram *zram)
{
	struct zram_slot_free *free_rq;

	spin_lock(&zram->slot_free_lock);
	while (zram->slot_free_rq) {
		free_rq = zram->slot_free_rq;
		zram->slot_free_rq = free_rq->next;
		zram_free_page(zram, free_rq->index);
		kfree(free_rq);
	}
	spin_unlock(&zram->slot_free_lock);
}

523
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
524
			int offset, struct bio *bio, int rw)
525
{
526
	int ret;
527

528 529 530 531 532 533
	if (rw == READ) {
		down_read(&zram->lock);
		ret = zram_bvec_read(zram, bvec, index, offset, bio);
		up_read(&zram->lock);
	} else {
		down_write(&zram->lock);
534
		handle_pending_slot_free(zram);
535 536 537 538 539
		ret = zram_bvec_write(zram, bvec, index, offset);
		up_write(&zram->lock);
	}

	return ret;
540 541
}

Minchan Kim's avatar
Minchan Kim committed
542
static void zram_reset_device(struct zram *zram, bool reset_capacity)
543
{
544 545 546
	size_t index;
	struct zram_meta *meta;

547 548 549
	down_write(&zram->init_lock);
	if (!zram->init_done) {
		up_write(&zram->init_lock);
550
		return;
551
	}
552

553 554
	flush_work(&zram->free_work);

555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
	meta = zram->meta;
	zram->init_done = 0;

	/* Free all pages that are still in this zram device */
	for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
		unsigned long handle = meta->table[index].handle;
		if (!handle)
			continue;

		zs_free(meta->mem_pool, handle);
	}

	zram_meta_free(zram->meta);
	zram->meta = NULL;
	/* Reset stats */
	memset(&zram->stats, 0, sizeof(zram->stats));

	zram->disksize = 0;
Minchan Kim's avatar
Minchan Kim committed
573 574
	if (reset_capacity)
		set_capacity(zram->disk, 0);
575
	up_write(&zram->init_lock);
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
}

static void zram_init_device(struct zram *zram, struct zram_meta *meta)
{
	if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
		pr_info(
		"There is little point creating a zram of greater than "
		"twice the size of memory since we expect a 2:1 compression "
		"ratio. Note that zram uses about 0.1%% of the size of "
		"the disk when not in use so a huge zram is "
		"wasteful.\n"
		"\tMemory Size: %lu kB\n"
		"\tSize you selected: %llu kB\n"
		"Continuing anyway ...\n",
		(totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
		);
	}

	/* zram devices sort of resembles non-rotational disks */
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);

	zram->meta = meta;
	zram->init_done = 1;

	pr_debug("Initialization done!\n");
}

static ssize_t disksize_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 disksize;
	struct zram_meta *meta;
	struct zram *zram = dev_to_zram(dev);

	disksize = memparse(buf, NULL);
	if (!disksize)
		return -EINVAL;

	disksize = PAGE_ALIGN(disksize);
	meta = zram_meta_alloc(disksize);
	down_write(&zram->init_lock);
	if (zram->init_done) {
		up_write(&zram->init_lock);
		zram_meta_free(meta);
		pr_info("Cannot change disksize for initialized device\n");
		return -EBUSY;
	}

	zram->disksize = disksize;
	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
	zram_init_device(zram, meta);
	up_write(&zram->init_lock);

	return len;
}

static ssize_t reset_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int ret;
	unsigned short do_reset;
	struct zram *zram;
	struct block_device *bdev;

	zram = dev_to_zram(dev);
	bdev = bdget_disk(zram->disk, 0);

643 644 645
	if (!bdev)
		return -ENOMEM;

646
	/* Do not reset an active device! */
647 648 649 650
	if (bdev->bd_holders) {
		ret = -EBUSY;
		goto out;
	}
651 652 653

	ret = kstrtou16(buf, 10, &do_reset);
	if (ret)
654
		goto out;
655

656 657 658 659
	if (!do_reset) {
		ret = -EINVAL;
		goto out;
	}
660 661

	/* Make sure all pending I/O is finished */
662
	fsync_bdev(bdev);
663
	bdput(bdev);
664

Minchan Kim's avatar
Minchan Kim committed
665
	zram_reset_device(zram, true);
666
	return len;
667 668 669 670

out:
	bdput(bdev);
	return ret;
671 672 673 674
}

static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
{
675
	int offset;
676
	u32 index;
677 678
	struct bio_vec bvec;
	struct bvec_iter iter;
679 680 681

	switch (rw) {
	case READ:
682
		atomic64_inc(&zram->stats.num_reads);
683 684
		break;
	case WRITE:
685
		atomic64_inc(&zram->stats.num_writes);
686 687 688
		break;
	}

689 690 691
	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
	offset = (bio->bi_iter.bi_sector &
		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
692

693
	bio_for_each_segment(bvec, bio, iter) {
694 695
		int max_transfer_size = PAGE_SIZE - offset;

696
		if (bvec.bv_len > max_transfer_size) {
697 698 699 700 701 702
			/*
			 * zram_bvec_rw() can only make operation on a single
			 * zram page. Split the bio vector.
			 */
			struct bio_vec bv;

703
			bv.bv_page = bvec.bv_page;
704
			bv.bv_len = max_transfer_size;
705
			bv.bv_offset = bvec.bv_offset;
706 707 708 709

			if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
				goto out;

710
			bv.bv_len = bvec.bv_len - max_transfer_size;
711 712 713 714
			bv.bv_offset += max_transfer_size;
			if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
				goto out;
		} else
715
			if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
716 717 718
			    < 0)
				goto out;

719
		update_position(&index, &offset, &bvec);
720
	}
721 722 723

	set_bit(BIO_UPTODATE, &bio->bi_flags);
	bio_endio(bio, 0);
724
	return;
725 726 727 728 729 730

out:
	bio_io_error(bio);
}

/*
731
 * Handler function for all zram I/O requests.
732
 */
733
static void zram_make_request(struct request_queue *queue, struct bio *bio)
734
{
735
	struct zram *zram = queue->queuedata;
736

737 738
	down_read(&zram->init_lock);
	if (unlikely(!zram->init_done))
739
		goto error;
740

741
	if (!valid_io_request(zram, bio)) {
742
		atomic64_inc(&zram->stats.invalid_io);
743
		goto error;
744 745
	}

746
	__zram_make_request(zram, bio, bio_data_dir(bio));
747
	up_read(&zram->init_lock);
748

749
	return;
750 751

error:
752
	up_read(&zram->init_lock);
753
	bio_io_error(bio);
754 755
}

756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
static void zram_slot_free(struct work_struct *work)
{
	struct zram *zram;

	zram = container_of(work, struct zram, free_work);
	down_write(&zram->lock);
	handle_pending_slot_free(zram);
	up_write(&zram->lock);
}

static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
{
	spin_lock(&zram->slot_free_lock);
	free_rq->next = zram->slot_free_rq;
	zram->slot_free_rq = free_rq;
	spin_unlock(&zram->slot_free_lock);
}

Nitin Gupta's avatar
Nitin Gupta committed
774 775
static void zram_slot_free_notify(struct block_device *bdev,
				unsigned long index)
776
{
777
	struct zram *zram;
778
	struct zram_slot_free *free_rq;
779

780
	zram = bdev->bd_disk->private_data;
781
	atomic64_inc(&zram->stats.notify_free);
782 783 784 785 786 787 788 789

	free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
	if (!free_rq)
		return;

	free_rq->index = index;
	add_slot_free(zram, free_rq);
	schedule_work(&zram->free_work);
790 791
}

792 793
static const struct block_device_operations zram_devops = {
	.swap_slot_free_notify = zram_slot_free_notify,
794
	.owner = THIS_MODULE
795 796
};

797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
		disksize_show, disksize_store);
static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);

static struct attribute *zram_disk_attrs[] = {
	&dev_attr_disksize.attr,
	&dev_attr_initstate.attr,
	&dev_attr_reset.attr,
	&dev_attr_num_reads.attr,
	&dev_attr_num_writes.attr,
	&dev_attr_invalid_io.attr,
	&dev_attr_notify_free.attr,
	&dev_attr_zero_pages.attr,
	&dev_attr_orig_data_size.attr,
	&dev_attr_compr_data_size.attr,
	&dev_attr_mem_used_total.attr,
	NULL,
};

static struct attribute_group zram_disk_attr_group = {
	.attrs = zram_disk_attrs,
};

829
static int create_device(struct zram *zram, int device_id)
830
{
831
	int ret = -ENOMEM;
832

833
	init_rwsem(&zram->lock);
834
	init_rwsem(&zram->init_lock);
835

836 837 838 839
	INIT_WORK(&zram->free_work, zram_slot_free);
	spin_lock_init(&zram->slot_free_lock);
	zram->slot_free_rq = NULL;

840 841
	zram->queue = blk_alloc_queue(GFP_KERNEL);
	if (!zram->queue) {
842 843
		pr_err("Error allocating disk queue for device %d\n",
			device_id);
844
		goto out;
845 846
	}

847 848
	blk_queue_make_request(zram->queue, zram_make_request);
	zram->queue->queuedata = zram;
849 850

	 /* gendisk structure */
851 852
	zram->disk = alloc_disk(1);
	if (!zram->disk) {
853
		pr_warn("Error allocating disk structure for device %d\n",
854
			device_id);
855
		goto out_free_queue;
856 857
	}

858 859 860 861 862 863
	zram->disk->major = zram_major;
	zram->disk->first_minor = device_id;
	zram->disk->fops = &zram_devops;
	zram->disk->queue = zram->queue;
	zram->disk->private_data = zram;
	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
864

865
	/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
866
	set_capacity(zram->disk, 0);
867

868 869 870 871
	/*
	 * To ensure that we always get PAGE_SIZE aligned
	 * and n*PAGE_SIZED sized I/O requests.
	 */
872
	blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
873 874
	blk_queue_logical_block_size(zram->disk->queue,
					ZRAM_LOGICAL_BLOCK_SIZE);
875 876
	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
877

878
	add_disk(zram->disk);
879

880 881 882
	ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
				&zram_disk_attr_group);
	if (ret < 0) {
883
		pr_warn("Error creating sysfs group");
884
		goto out_free_disk;
885 886
	}

887
	zram->init_done = 0;
888
	return 0;
889

890 891 892 893 894
out_free_disk:
	del_gendisk(zram->disk);
	put_disk(zram->disk);
out_free_queue:
	blk_cleanup_queue(zram->queue);
895 896
out:
	return ret;
897 898
}

899
static void destroy_device(struct zram *zram)
900
{
901 902 903
	sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
			&zram_disk_attr_group);

904 905
	del_gendisk(zram->disk);
	put_disk(zram->disk);
906

907
	blk_cleanup_queue(zram->queue);
908 909
}

910
static int __init zram_init(void)
911
{
912
	int ret, dev_id;
913

914
	if (num_devices > max_num_devices) {
915
		pr_warn("Invalid value for num_devices: %u\n",
916
				num_devices);
917 918
		ret = -EINVAL;
		goto out;
919 920
	}

921 922
	zram_major = register_blkdev(0, "zram");
	if (zram_major <= 0) {
923
		pr_warn("Unable to get major number\n");
924 925
		ret = -EBUSY;
		goto out;
926 927 928
	}

	/* Allocate the device array and initialize each one */
929
	zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
930
	if (!zram_devices) {
931 932 933
		ret = -ENOMEM;
		goto unregister;
	}
934

935
	for (dev_id = 0; dev_id < num_devices; dev_id++) {
936
		ret = create_device(&zram_devices[dev_id], dev_id);
937
		if (ret)
938
			goto free_devices;
939 940
	}

941 942
	pr_info("Created %u device(s) ...\n", num_devices);

943
	return 0;
944

945
free_devices:
946
	while (dev_id)
947 948
		destroy_device(&zram_devices[--dev_id]);
	kfree(zram_devices);
949
unregister:
950
	unregister_blkdev(zram_major, "zram");
951
out:
952 953 954
	return ret;
}

955
static void __exit zram_exit(void)
956 957
{
	int i;
958
	struct zram *zram;
959

960
	for (i = 0; i < num_devices; i++) {
961
		zram = &zram_devices[i];
962

963
		destroy_device(zram);
Minchan Kim's avatar
Minchan Kim committed
964 965 966 967 968
		/*
		 * Shouldn't access zram->disk after destroy_device
		 * because destroy_device already released zram->disk.
		 */
		zram_reset_device(zram, false);
969 970
	}

971
	unregister_blkdev(zram_major, "zram");
972

973
	kfree(zram_devices);
974 975 976
	pr_debug("Cleanup done!\n");
}

977 978
module_init(zram_init);
module_exit(zram_exit);
979

980 981 982
module_param(num_devices, uint, 0);
MODULE_PARM_DESC(num_devices, "Number of zram devices");

983 984
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
985
MODULE_DESCRIPTION("Compressed RAM Block Device");