zram_drv.c 21.7 KB
Newer Older
1
/*
2
 * Compressed RAM block device
3
 *
4
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
Minchan Kim's avatar
Minchan Kim committed
5
 *               2012, 2013 Minchan Kim
6 7 8 9 10 11 12 13 14
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 *
 */

15
#define KMSG_COMPONENT "zram"
16 17
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

18 19 20 21
#ifdef CONFIG_ZRAM_DEBUG
#define DEBUG
#endif

22 23
#include <linux/module.h>
#include <linux/kernel.h>
24
#include <linux/bio.h>
25 26 27 28 29 30
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/highmem.h>
31
#include <linux/slab.h>
32 33 34 35
#include <linux/lzo.h>
#include <linux/string.h>
#include <linux/vmalloc.h>

36
#include "zram_drv.h"
37 38

/* Globals */
39
static int zram_major;
40
static struct zram *zram_devices;
41 42

/* Module params (documentation at end) */
43
static unsigned int num_devices = 1;
44

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
static inline struct zram *dev_to_zram(struct device *dev)
{
	return (struct zram *)dev_to_disk(dev)->private_data;
}

static ssize_t disksize_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n", zram->disksize);
}

static ssize_t initstate_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%u\n", zram->init_done);
}

static ssize_t num_reads_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.num_reads));
}

static ssize_t num_writes_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.num_writes));
}

static ssize_t invalid_io_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.invalid_io));
}

static ssize_t notify_free_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.notify_free));
}

static ssize_t zero_pages_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

107
	return sprintf(buf, "%u\n", atomic_read(&zram->stats.pages_zero));
108 109 110 111 112 113 114 115
}

static ssize_t orig_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
116
		(u64)(atomic_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
}

static ssize_t compr_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.compr_size));
}

static ssize_t mem_used_total_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);
	struct zram_meta *meta = zram->meta;

	down_read(&zram->init_lock);
	if (zram->init_done)
		val = zs_get_total_size_bytes(meta->mem_pool);
	up_read(&zram->init_lock);

	return sprintf(buf, "%llu\n", val);
}

Minchan Kim's avatar
Minchan Kim committed
143
/* flag operations needs meta->tb_lock */
Minchan Kim's avatar
Minchan Kim committed
144
static int zram_test_flag(struct zram_meta *meta, u32 index,
145
			enum zram_pageflags flag)
146
{
Minchan Kim's avatar
Minchan Kim committed
147
	return meta->table[index].flags & BIT(flag);
148 149
}

Minchan Kim's avatar
Minchan Kim committed
150
static void zram_set_flag(struct zram_meta *meta, u32 index,
151
			enum zram_pageflags flag)
152
{
Minchan Kim's avatar
Minchan Kim committed
153
	meta->table[index].flags |= BIT(flag);
154 155
}

Minchan Kim's avatar
Minchan Kim committed
156
static void zram_clear_flag(struct zram_meta *meta, u32 index,
157
			enum zram_pageflags flag)
158
{
Minchan Kim's avatar
Minchan Kim committed
159
	meta->table[index].flags &= ~BIT(flag);
160 161
}

162 163 164 165 166 167 168 169 170 171 172
static inline int is_partial_io(struct bio_vec *bvec)
{
	return bvec->bv_len != PAGE_SIZE;
}

/*
 * Check if request is within bounds and aligned on zram logical blocks.
 */
static inline int valid_io_request(struct zram *zram, struct bio *bio)
{
	u64 start, end, bound;
173

174
	/* unaligned request */
175 176
	if (unlikely(bio->bi_iter.bi_sector &
		     (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
177
		return 0;
178
	if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
179 180
		return 0;

181 182
	start = bio->bi_iter.bi_sector;
	end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
183 184
	bound = zram->disksize >> SECTOR_SHIFT;
	/* out of range range */
185
	if (unlikely(start >= bound || end > bound || start > end))
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
		return 0;

	/* I/O request is valid */
	return 1;
}

static void zram_meta_free(struct zram_meta *meta)
{
	zs_destroy_pool(meta->mem_pool);
	kfree(meta->compress_workmem);
	free_pages((unsigned long)meta->compress_buffer, 1);
	vfree(meta->table);
	kfree(meta);
}

static struct zram_meta *zram_meta_alloc(u64 disksize)
{
	size_t num_pages;
	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
	if (!meta)
		goto out;

	meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
	if (!meta->compress_workmem)
		goto free_meta;

	meta->compress_buffer =
		(void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
	if (!meta->compress_buffer) {
		pr_err("Error allocating compressor buffer space\n");
		goto free_workmem;
	}

	num_pages = disksize >> PAGE_SHIFT;
	meta->table = vzalloc(num_pages * sizeof(*meta->table));
	if (!meta->table) {
		pr_err("Error allocating zram address table\n");
		goto free_buffer;
	}

	meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
	if (!meta->mem_pool) {
		pr_err("Error creating memory pool\n");
		goto free_table;
	}

Minchan Kim's avatar
Minchan Kim committed
232
	rwlock_init(&meta->tb_lock);
233
	mutex_init(&meta->buffer_lock);
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
	return meta;

free_table:
	vfree(meta->table);
free_buffer:
	free_pages((unsigned long)meta->compress_buffer, 1);
free_workmem:
	kfree(meta->compress_workmem);
free_meta:
	kfree(meta);
	meta = NULL;
out:
	return meta;
}

static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
{
	if (*offset + bvec->bv_len >= PAGE_SIZE)
		(*index)++;
	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
static int page_zero_filled(void *ptr)
{
	unsigned int pos;
	unsigned long *page;

	page = (unsigned long *)ptr;

	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
		if (page[pos])
			return 0;
	}

	return 1;
}

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
static void handle_zero_page(struct bio_vec *bvec)
{
	struct page *page = bvec->bv_page;
	void *user_mem;

	user_mem = kmap_atomic(page);
	if (is_partial_io(bvec))
		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
	else
		clear_page(user_mem);
	kunmap_atomic(user_mem);

	flush_dcache_page(page);
}

Minchan Kim's avatar
Minchan Kim committed
286
/* NOTE: caller should hold meta->tb_lock with write-side */
287
static void zram_free_page(struct zram *zram, size_t index)
288
{
Minchan Kim's avatar
Minchan Kim committed
289 290 291
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;
	u16 size = meta->table[index].size;
292

293
	if (unlikely(!handle)) {
294 295 296 297
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
Minchan Kim's avatar
Minchan Kim committed
298 299
		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
			zram_clear_flag(meta, index, ZRAM_ZERO);
300
			atomic_dec(&zram->stats.pages_zero);
301 302 303 304
		}
		return;
	}

305
	if (unlikely(size > max_zpage_size))
306
		atomic_dec(&zram->stats.bad_compress);
307

Minchan Kim's avatar
Minchan Kim committed
308
	zs_free(meta->mem_pool, handle);
309

310
	if (size <= PAGE_SIZE / 2)
311
		atomic_dec(&zram->stats.good_compress);
312

313
	atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
314
	atomic_dec(&zram->stats.pages_stored);
315

Minchan Kim's avatar
Minchan Kim committed
316 317
	meta->table[index].handle = 0;
	meta->table[index].size = 0;
318 319
}

320
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
321
{
322 323 324
	int ret = LZO_E_OK;
	size_t clen = PAGE_SIZE;
	unsigned char *cmem;
Minchan Kim's avatar
Minchan Kim committed
325
	struct zram_meta *meta = zram->meta;
Minchan Kim's avatar
Minchan Kim committed
326 327 328 329 330 331
	unsigned long handle;
	u16 size;

	read_lock(&meta->tb_lock);
	handle = meta->table[index].handle;
	size = meta->table[index].size;
332

Minchan Kim's avatar
Minchan Kim committed
333
	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
Minchan Kim's avatar
Minchan Kim committed
334
		read_unlock(&meta->tb_lock);
335
		clear_page(mem);
336 337
		return 0;
	}
338

Minchan Kim's avatar
Minchan Kim committed
339
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Minchan Kim's avatar
Minchan Kim committed
340
	if (size == PAGE_SIZE)
341
		copy_page(mem, cmem);
342
	else
Minchan Kim's avatar
Minchan Kim committed
343
		ret = lzo1x_decompress_safe(cmem, size,	mem, &clen);
Minchan Kim's avatar
Minchan Kim committed
344
	zs_unmap_object(meta->mem_pool, handle);
Minchan Kim's avatar
Minchan Kim committed
345
	read_unlock(&meta->tb_lock);
346

347 348 349
	/* Should NEVER happen. Return bio error if it does. */
	if (unlikely(ret != LZO_E_OK)) {
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
350
		atomic64_inc(&zram->stats.failed_reads);
351
		return ret;
352
	}
353

354
	return 0;
355 356
}

357 358
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
			  u32 index, int offset, struct bio *bio)
359 360
{
	int ret;
361 362
	struct page *page;
	unsigned char *user_mem, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
363
	struct zram_meta *meta = zram->meta;
364 365
	page = bvec->bv_page;

Minchan Kim's avatar
Minchan Kim committed
366
	read_lock(&meta->tb_lock);
Minchan Kim's avatar
Minchan Kim committed
367 368
	if (unlikely(!meta->table[index].handle) ||
			zram_test_flag(meta, index, ZRAM_ZERO)) {
Minchan Kim's avatar
Minchan Kim committed
369
		read_unlock(&meta->tb_lock);
370
		handle_zero_page(bvec);
371 372
		return 0;
	}
Minchan Kim's avatar
Minchan Kim committed
373
	read_unlock(&meta->tb_lock);
374

375 376
	if (is_partial_io(bvec))
		/* Use  a temporary buffer to decompress the page */
377 378 379 380
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);

	user_mem = kmap_atomic(page);
	if (!is_partial_io(bvec))
381 382 383 384 385 386 387
		uncmem = user_mem;

	if (!uncmem) {
		pr_info("Unable to allocate temp memory\n");
		ret = -ENOMEM;
		goto out_cleanup;
	}
388

389
	ret = zram_decompress_page(zram, uncmem, index);
390
	/* Should NEVER happen. Return bio error if it does. */
391
	if (unlikely(ret != LZO_E_OK))
392
		goto out_cleanup;
393

394 395 396 397 398 399 400 401 402 403 404
	if (is_partial_io(bvec))
		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
				bvec->bv_len);

	flush_dcache_page(page);
	ret = 0;
out_cleanup:
	kunmap_atomic(user_mem);
	if (is_partial_io(bvec))
		kfree(uncmem);
	return ret;
405 406 407 408
}

static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
409
{
410
	int ret = 0;
411
	size_t clen;
412
	unsigned long handle;
413
	struct page *page;
414
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
415
	struct zram_meta *meta = zram->meta;
416
	bool locked = false;
417

418
	page = bvec->bv_page;
Minchan Kim's avatar
Minchan Kim committed
419
	src = meta->compress_buffer;
420

421 422 423 424 425
	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
426
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
427 428 429 430
		if (!uncmem) {
			ret = -ENOMEM;
			goto out;
		}
431
		ret = zram_decompress_page(zram, uncmem, index);
432
		if (ret)
433 434 435
			goto out;
	}

436 437
	mutex_lock(&meta->buffer_lock);
	locked = true;
438
	user_mem = kmap_atomic(page);
439

440
	if (is_partial_io(bvec)) {
441 442
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
443 444 445
		kunmap_atomic(user_mem);
		user_mem = NULL;
	} else {
446
		uncmem = user_mem;
447
	}
448 449

	if (page_zero_filled(uncmem)) {
450
		kunmap_atomic(user_mem);
451
		/* Free memory associated with this sector now. */
Minchan Kim's avatar
Minchan Kim committed
452
		write_lock(&zram->meta->tb_lock);
453
		zram_free_page(zram, index);
Minchan Kim's avatar
Minchan Kim committed
454 455
		zram_set_flag(meta, index, ZRAM_ZERO);
		write_unlock(&zram->meta->tb_lock);
456

457
		atomic_inc(&zram->stats.pages_zero);
458 459
		ret = 0;
		goto out;
460
	}
461

462
	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
Minchan Kim's avatar
Minchan Kim committed
463
			       meta->compress_workmem);
464 465 466 467 468
	if (!is_partial_io(bvec)) {
		kunmap_atomic(user_mem);
		user_mem = NULL;
		uncmem = NULL;
	}
469

470 471
	if (unlikely(ret != LZO_E_OK)) {
		pr_err("Compression failed! err=%d\n", ret);
472
		goto out;
473
	}
474

475
	if (unlikely(clen > max_zpage_size)) {
476
		atomic_inc(&zram->stats.bad_compress);
477
		clen = PAGE_SIZE;
478 479 480
		src = NULL;
		if (is_partial_io(bvec))
			src = uncmem;
481
	}
482

Minchan Kim's avatar
Minchan Kim committed
483
	handle = zs_malloc(meta->mem_pool, clen);
484
	if (!handle) {
485 486
		pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
			index, clen);
487 488
		ret = -ENOMEM;
		goto out;
489
	}
Minchan Kim's avatar
Minchan Kim committed
490
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
491

492
	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
493
		src = kmap_atomic(page);
494
		copy_page(cmem, src);
495
		kunmap_atomic(src);
496 497 498
	} else {
		memcpy(cmem, src, clen);
	}
499

Minchan Kim's avatar
Minchan Kim committed
500
	zs_unmap_object(meta->mem_pool, handle);
501

502 503 504 505
	/*
	 * Free memory associated with this sector
	 * before overwriting unused sectors.
	 */
Minchan Kim's avatar
Minchan Kim committed
506
	write_lock(&zram->meta->tb_lock);
507 508
	zram_free_page(zram, index);

Minchan Kim's avatar
Minchan Kim committed
509 510
	meta->table[index].handle = handle;
	meta->table[index].size = clen;
Minchan Kim's avatar
Minchan Kim committed
511
	write_unlock(&zram->meta->tb_lock);
512

513
	/* Update stats */
514
	atomic64_add(clen, &zram->stats.compr_size);
515
	atomic_inc(&zram->stats.pages_stored);
516
	if (clen <= PAGE_SIZE / 2)
517
		atomic_inc(&zram->stats.good_compress);
518

519
out:
520 521
	if (locked)
		mutex_unlock(&meta->buffer_lock);
522 523 524
	if (is_partial_io(bvec))
		kfree(uncmem);

525
	if (ret)
526
		atomic64_inc(&zram->stats.failed_writes);
527
	return ret;
528 529 530
}

static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
531
			int offset, struct bio *bio, int rw)
532
{
533
	int ret;
534

535
	if (rw == READ)
536
		ret = zram_bvec_read(zram, bvec, index, offset, bio);
537
	else
538 539 540
		ret = zram_bvec_write(zram, bvec, index, offset);

	return ret;
541 542
}

Minchan Kim's avatar
Minchan Kim committed
543
static void zram_reset_device(struct zram *zram, bool reset_capacity)
544
{
545 546 547
	size_t index;
	struct zram_meta *meta;

548 549 550
	down_write(&zram->init_lock);
	if (!zram->init_done) {
		up_write(&zram->init_lock);
551
		return;
552
	}
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571

	meta = zram->meta;
	zram->init_done = 0;

	/* Free all pages that are still in this zram device */
	for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
		unsigned long handle = meta->table[index].handle;
		if (!handle)
			continue;

		zs_free(meta->mem_pool, handle);
	}

	zram_meta_free(zram->meta);
	zram->meta = NULL;
	/* Reset stats */
	memset(&zram->stats, 0, sizeof(zram->stats));

	zram->disksize = 0;
Minchan Kim's avatar
Minchan Kim committed
572 573
	if (reset_capacity)
		set_capacity(zram->disk, 0);
574
	up_write(&zram->init_lock);
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
}

static void zram_init_device(struct zram *zram, struct zram_meta *meta)
{
	if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
		pr_info(
		"There is little point creating a zram of greater than "
		"twice the size of memory since we expect a 2:1 compression "
		"ratio. Note that zram uses about 0.1%% of the size of "
		"the disk when not in use so a huge zram is "
		"wasteful.\n"
		"\tMemory Size: %lu kB\n"
		"\tSize you selected: %llu kB\n"
		"Continuing anyway ...\n",
		(totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
		);
	}

	/* zram devices sort of resembles non-rotational disks */
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);

	zram->meta = meta;
	zram->init_done = 1;

	pr_debug("Initialization done!\n");
}

static ssize_t disksize_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 disksize;
	struct zram_meta *meta;
	struct zram *zram = dev_to_zram(dev);

	disksize = memparse(buf, NULL);
	if (!disksize)
		return -EINVAL;

	disksize = PAGE_ALIGN(disksize);
	meta = zram_meta_alloc(disksize);
615 616
	if (!meta)
		return -ENOMEM;
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
	down_write(&zram->init_lock);
	if (zram->init_done) {
		up_write(&zram->init_lock);
		zram_meta_free(meta);
		pr_info("Cannot change disksize for initialized device\n");
		return -EBUSY;
	}

	zram->disksize = disksize;
	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
	zram_init_device(zram, meta);
	up_write(&zram->init_lock);

	return len;
}

static ssize_t reset_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int ret;
	unsigned short do_reset;
	struct zram *zram;
	struct block_device *bdev;

	zram = dev_to_zram(dev);
	bdev = bdget_disk(zram->disk, 0);

644 645 646
	if (!bdev)
		return -ENOMEM;

647
	/* Do not reset an active device! */
648 649 650 651
	if (bdev->bd_holders) {
		ret = -EBUSY;
		goto out;
	}
652 653 654

	ret = kstrtou16(buf, 10, &do_reset);
	if (ret)
655
		goto out;
656

657 658 659 660
	if (!do_reset) {
		ret = -EINVAL;
		goto out;
	}
661 662

	/* Make sure all pending I/O is finished */
663
	fsync_bdev(bdev);
664
	bdput(bdev);
665

Minchan Kim's avatar
Minchan Kim committed
666
	zram_reset_device(zram, true);
667
	return len;
668 669 670 671

out:
	bdput(bdev);
	return ret;
672 673 674 675
}

static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
{
676
	int offset;
677
	u32 index;
678 679
	struct bio_vec bvec;
	struct bvec_iter iter;
680 681 682

	switch (rw) {
	case READ:
683
		atomic64_inc(&zram->stats.num_reads);
684 685
		break;
	case WRITE:
686
		atomic64_inc(&zram->stats.num_writes);
687 688 689
		break;
	}

690 691 692
	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
	offset = (bio->bi_iter.bi_sector &
		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
693

694
	bio_for_each_segment(bvec, bio, iter) {
695 696
		int max_transfer_size = PAGE_SIZE - offset;

697
		if (bvec.bv_len > max_transfer_size) {
698 699 700 701 702 703
			/*
			 * zram_bvec_rw() can only make operation on a single
			 * zram page. Split the bio vector.
			 */
			struct bio_vec bv;

704
			bv.bv_page = bvec.bv_page;
705
			bv.bv_len = max_transfer_size;
706
			bv.bv_offset = bvec.bv_offset;
707 708 709 710

			if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
				goto out;

711
			bv.bv_len = bvec.bv_len - max_transfer_size;
712 713 714 715
			bv.bv_offset += max_transfer_size;
			if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
				goto out;
		} else
716
			if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
717 718 719
			    < 0)
				goto out;

720
		update_position(&index, &offset, &bvec);
721
	}
722 723 724

	set_bit(BIO_UPTODATE, &bio->bi_flags);
	bio_endio(bio, 0);
725
	return;
726 727 728 729 730 731

out:
	bio_io_error(bio);
}

/*
732
 * Handler function for all zram I/O requests.
733
 */
734
static void zram_make_request(struct request_queue *queue, struct bio *bio)
735
{
736
	struct zram *zram = queue->queuedata;
737

738 739
	down_read(&zram->init_lock);
	if (unlikely(!zram->init_done))
740
		goto error;
741

742
	if (!valid_io_request(zram, bio)) {
743
		atomic64_inc(&zram->stats.invalid_io);
744
		goto error;
745 746
	}

747
	__zram_make_request(zram, bio, bio_data_dir(bio));
748
	up_read(&zram->init_lock);
749

750
	return;
751 752

error:
753
	up_read(&zram->init_lock);
754
	bio_io_error(bio);
755 756
}

Nitin Gupta's avatar
Nitin Gupta committed
757 758
static void zram_slot_free_notify(struct block_device *bdev,
				unsigned long index)
759
{
760
	struct zram *zram;
761
	struct zram_meta *meta;
762

763
	zram = bdev->bd_disk->private_data;
764
	meta = zram->meta;
765

766 767 768 769
	write_lock(&meta->tb_lock);
	zram_free_page(zram, index);
	write_unlock(&meta->tb_lock);
	atomic64_inc(&zram->stats.notify_free);
770 771
}

772 773
static const struct block_device_operations zram_devops = {
	.swap_slot_free_notify = zram_slot_free_notify,
774
	.owner = THIS_MODULE
775 776
};

777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
		disksize_show, disksize_store);
static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);

static struct attribute *zram_disk_attrs[] = {
	&dev_attr_disksize.attr,
	&dev_attr_initstate.attr,
	&dev_attr_reset.attr,
	&dev_attr_num_reads.attr,
	&dev_attr_num_writes.attr,
	&dev_attr_invalid_io.attr,
	&dev_attr_notify_free.attr,
	&dev_attr_zero_pages.attr,
	&dev_attr_orig_data_size.attr,
	&dev_attr_compr_data_size.attr,
	&dev_attr_mem_used_total.attr,
	NULL,
};

static struct attribute_group zram_disk_attr_group = {
	.attrs = zram_disk_attrs,
};

809
static int create_device(struct zram *zram, int device_id)
810
{
811
	int ret = -ENOMEM;
812

813
	init_rwsem(&zram->init_lock);
814

815 816
	zram->queue = blk_alloc_queue(GFP_KERNEL);
	if (!zram->queue) {
817 818
		pr_err("Error allocating disk queue for device %d\n",
			device_id);
819
		goto out;
820 821
	}

822 823
	blk_queue_make_request(zram->queue, zram_make_request);
	zram->queue->queuedata = zram;
824 825

	 /* gendisk structure */
826 827
	zram->disk = alloc_disk(1);
	if (!zram->disk) {
828
		pr_warn("Error allocating disk structure for device %d\n",
829
			device_id);
830
		goto out_free_queue;
831 832
	}

833 834 835 836 837 838
	zram->disk->major = zram_major;
	zram->disk->first_minor = device_id;
	zram->disk->fops = &zram_devops;
	zram->disk->queue = zram->queue;
	zram->disk->private_data = zram;
	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
839

840
	/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
841
	set_capacity(zram->disk, 0);
842

843 844 845 846
	/*
	 * To ensure that we always get PAGE_SIZE aligned
	 * and n*PAGE_SIZED sized I/O requests.
	 */
847
	blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
848 849
	blk_queue_logical_block_size(zram->disk->queue,
					ZRAM_LOGICAL_BLOCK_SIZE);
850 851
	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
852

853
	add_disk(zram->disk);
854

855 856 857
	ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
				&zram_disk_attr_group);
	if (ret < 0) {
858
		pr_warn("Error creating sysfs group");
859
		goto out_free_disk;
860 861
	}

862
	zram->init_done = 0;
863
	return 0;
864

865 866 867 868 869
out_free_disk:
	del_gendisk(zram->disk);
	put_disk(zram->disk);
out_free_queue:
	blk_cleanup_queue(zram->queue);
870 871
out:
	return ret;
872 873
}

874
static void destroy_device(struct zram *zram)
875
{
876 877 878
	sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
			&zram_disk_attr_group);

879 880
	del_gendisk(zram->disk);
	put_disk(zram->disk);
881

882
	blk_cleanup_queue(zram->queue);
883 884
}

885
static int __init zram_init(void)
886
{
887
	int ret, dev_id;
888

889
	if (num_devices > max_num_devices) {
890
		pr_warn("Invalid value for num_devices: %u\n",
891
				num_devices);
892 893
		ret = -EINVAL;
		goto out;
894 895
	}

896 897
	zram_major = register_blkdev(0, "zram");
	if (zram_major <= 0) {
898
		pr_warn("Unable to get major number\n");
899 900
		ret = -EBUSY;
		goto out;
901 902 903
	}

	/* Allocate the device array and initialize each one */
904
	zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
905
	if (!zram_devices) {
906 907 908
		ret = -ENOMEM;
		goto unregister;
	}
909

910
	for (dev_id = 0; dev_id < num_devices; dev_id++) {
911
		ret = create_device(&zram_devices[dev_id], dev_id);
912
		if (ret)
913
			goto free_devices;
914 915
	}

916 917
	pr_info("Created %u device(s) ...\n", num_devices);

918
	return 0;
919

920
free_devices:
921
	while (dev_id)
922 923
		destroy_device(&zram_devices[--dev_id]);
	kfree(zram_devices);
924
unregister:
925
	unregister_blkdev(zram_major, "zram");
926
out:
927 928 929
	return ret;
}

930
static void __exit zram_exit(void)
931 932
{
	int i;
933
	struct zram *zram;
934

935
	for (i = 0; i < num_devices; i++) {
936
		zram = &zram_devices[i];
937

938
		destroy_device(zram);
Minchan Kim's avatar
Minchan Kim committed
939 940 941 942 943
		/*
		 * Shouldn't access zram->disk after destroy_device
		 * because destroy_device already released zram->disk.
		 */
		zram_reset_device(zram, false);
944 945
	}

946
	unregister_blkdev(zram_major, "zram");
947

948
	kfree(zram_devices);
949 950 951
	pr_debug("Cleanup done!\n");
}

952 953
module_init(zram_init);
module_exit(zram_exit);
954

955 956 957
module_param(num_devices, uint, 0);
MODULE_PARM_DESC(num_devices, "Number of zram devices");

958 959
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
960
MODULE_DESCRIPTION("Compressed RAM Block Device");