zram_drv.c 24.2 KB
Newer Older
1
/*
2
 * Compressed RAM block device
3
 *
4
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
Minchan Kim's avatar
Minchan Kim committed
5
 *               2012, 2013 Minchan Kim
6 7 8 9 10 11 12 13 14
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 *
 */

15
#define KMSG_COMPONENT "zram"
16 17
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

18 19 20 21
#ifdef CONFIG_ZRAM_DEBUG
#define DEBUG
#endif

22 23
#include <linux/module.h>
#include <linux/kernel.h>
24
#include <linux/bio.h>
25 26 27 28 29 30
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/highmem.h>
31
#include <linux/slab.h>
32 33
#include <linux/string.h>
#include <linux/vmalloc.h>
34
#include <linux/err.h>
35

36
#include "zram_drv.h"
37 38

/* Globals */
39
static int zram_major;
40
static struct zram *zram_devices;
41
static const char *default_compressor = "lzo";
42 43

/* Module params (documentation at end) */
44
static unsigned int num_devices = 1;
45

46 47 48 49 50
#define ZRAM_ATTR_RO(name)						\
static ssize_t zram_attr_##name##_show(struct device *d,		\
				struct device_attribute *attr, char *b)	\
{									\
	struct zram *zram = dev_to_zram(d);				\
51
	return scnprintf(b, PAGE_SIZE, "%llu\n",			\
52 53 54 55 56
		(u64)atomic64_read(&zram->stats.name));			\
}									\
static struct device_attribute dev_attr_##name =			\
	__ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);

57 58 59 60 61
static inline int init_done(struct zram *zram)
{
	return zram->meta != NULL;
}

62 63 64 65 66 67 68 69 70 71
static inline struct zram *dev_to_zram(struct device *dev)
{
	return (struct zram *)dev_to_disk(dev)->private_data;
}

static ssize_t disksize_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

72
	return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
73 74 75 76 77
}

static ssize_t initstate_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
78
	u32 val;
79 80
	struct zram *zram = dev_to_zram(dev);

81 82 83
	down_read(&zram->init_lock);
	val = init_done(zram);
	up_read(&zram->init_lock);
84

85
	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
86 87 88 89 90 91 92
}

static ssize_t orig_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

93
	return scnprintf(buf, PAGE_SIZE, "%llu\n",
94
		(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
95 96 97 98 99 100 101 102 103 104
}

static ssize_t mem_used_total_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);
	struct zram_meta *meta = zram->meta;

	down_read(&zram->init_lock);
105
	if (init_done(zram))
106 107 108
		val = zs_get_total_size_bytes(meta->mem_pool);
	up_read(&zram->init_lock);

109
	return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
110 111
}

112 113 114 115 116 117 118 119 120 121
static ssize_t max_comp_streams_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	int val;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	val = zram->max_comp_streams;
	up_read(&zram->init_lock);

122
	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
123 124 125 126 127 128 129
}

static ssize_t max_comp_streams_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int num;
	struct zram *zram = dev_to_zram(dev);
Minchan Kim's avatar
Minchan Kim committed
130
	int ret;
131

Minchan Kim's avatar
Minchan Kim committed
132 133 134
	ret = kstrtoint(buf, 0, &num);
	if (ret < 0)
		return ret;
135 136
	if (num < 1)
		return -EINVAL;
Minchan Kim's avatar
Minchan Kim committed
137

138 139
	down_write(&zram->init_lock);
	if (init_done(zram)) {
Minchan Kim's avatar
Minchan Kim committed
140
		if (!zcomp_set_max_streams(zram->comp, num)) {
141
			pr_info("Cannot change max compression streams\n");
Minchan Kim's avatar
Minchan Kim committed
142 143 144
			ret = -EINVAL;
			goto out;
		}
145
	}
Minchan Kim's avatar
Minchan Kim committed
146

147
	zram->max_comp_streams = num;
Minchan Kim's avatar
Minchan Kim committed
148 149
	ret = len;
out:
150
	up_write(&zram->init_lock);
Minchan Kim's avatar
Minchan Kim committed
151
	return ret;
152 153
}

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
static ssize_t comp_algorithm_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	size_t sz;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	sz = zcomp_available_show(zram->compressor, buf);
	up_read(&zram->init_lock);

	return sz;
}

static ssize_t comp_algorithm_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	struct zram *zram = dev_to_zram(dev);
	down_write(&zram->init_lock);
	if (init_done(zram)) {
		up_write(&zram->init_lock);
		pr_info("Can't change algorithm for initialized device\n");
		return -EBUSY;
	}
	strlcpy(zram->compressor, buf, sizeof(zram->compressor));
	up_write(&zram->init_lock);
	return len;
}

Minchan Kim's avatar
Minchan Kim committed
182
/* flag operations needs meta->tb_lock */
Minchan Kim's avatar
Minchan Kim committed
183
static int zram_test_flag(struct zram_meta *meta, u32 index,
184
			enum zram_pageflags flag)
185
{
Minchan Kim's avatar
Minchan Kim committed
186
	return meta->table[index].flags & BIT(flag);
187 188
}

Minchan Kim's avatar
Minchan Kim committed
189
static void zram_set_flag(struct zram_meta *meta, u32 index,
190
			enum zram_pageflags flag)
191
{
Minchan Kim's avatar
Minchan Kim committed
192
	meta->table[index].flags |= BIT(flag);
193 194
}

Minchan Kim's avatar
Minchan Kim committed
195
static void zram_clear_flag(struct zram_meta *meta, u32 index,
196
			enum zram_pageflags flag)
197
{
Minchan Kim's avatar
Minchan Kim committed
198
	meta->table[index].flags &= ~BIT(flag);
199 200
}

201 202 203 204 205 206 207 208 209 210 211
static inline int is_partial_io(struct bio_vec *bvec)
{
	return bvec->bv_len != PAGE_SIZE;
}

/*
 * Check if request is within bounds and aligned on zram logical blocks.
 */
static inline int valid_io_request(struct zram *zram, struct bio *bio)
{
	u64 start, end, bound;
212

213
	/* unaligned request */
214 215
	if (unlikely(bio->bi_iter.bi_sector &
		     (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
216
		return 0;
217
	if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
218 219
		return 0;

220 221
	start = bio->bi_iter.bi_sector;
	end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
222 223
	bound = zram->disksize >> SECTOR_SHIFT;
	/* out of range range */
224
	if (unlikely(start >= bound || end > bound || start > end))
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
		return 0;

	/* I/O request is valid */
	return 1;
}

static void zram_meta_free(struct zram_meta *meta)
{
	zs_destroy_pool(meta->mem_pool);
	vfree(meta->table);
	kfree(meta);
}

static struct zram_meta *zram_meta_alloc(u64 disksize)
{
	size_t num_pages;
	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
	if (!meta)
		goto out;

	num_pages = disksize >> PAGE_SHIFT;
	meta->table = vzalloc(num_pages * sizeof(*meta->table));
	if (!meta->table) {
		pr_err("Error allocating zram address table\n");
249
		goto free_meta;
250 251 252 253 254 255 256 257
	}

	meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
	if (!meta->mem_pool) {
		pr_err("Error creating memory pool\n");
		goto free_table;
	}

Minchan Kim's avatar
Minchan Kim committed
258
	rwlock_init(&meta->tb_lock);
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
	return meta;

free_table:
	vfree(meta->table);
free_meta:
	kfree(meta);
	meta = NULL;
out:
	return meta;
}

static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
{
	if (*offset + bvec->bv_len >= PAGE_SIZE)
		(*index)++;
	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}

277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
static int page_zero_filled(void *ptr)
{
	unsigned int pos;
	unsigned long *page;

	page = (unsigned long *)ptr;

	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
		if (page[pos])
			return 0;
	}

	return 1;
}

292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
static void handle_zero_page(struct bio_vec *bvec)
{
	struct page *page = bvec->bv_page;
	void *user_mem;

	user_mem = kmap_atomic(page);
	if (is_partial_io(bvec))
		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
	else
		clear_page(user_mem);
	kunmap_atomic(user_mem);

	flush_dcache_page(page);
}

Minchan Kim's avatar
Minchan Kim committed
307
/* NOTE: caller should hold meta->tb_lock with write-side */
308
static void zram_free_page(struct zram *zram, size_t index)
309
{
Minchan Kim's avatar
Minchan Kim committed
310 311
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;
312

313
	if (unlikely(!handle)) {
314 315 316 317
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
Minchan Kim's avatar
Minchan Kim committed
318 319
		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
			zram_clear_flag(meta, index, ZRAM_ZERO);
320
			atomic64_dec(&zram->stats.zero_pages);
321 322 323 324
		}
		return;
	}

Minchan Kim's avatar
Minchan Kim committed
325
	zs_free(meta->mem_pool, handle);
326

327 328
	atomic64_sub(meta->table[index].size, &zram->stats.compr_data_size);
	atomic64_dec(&zram->stats.pages_stored);
329

Minchan Kim's avatar
Minchan Kim committed
330 331
	meta->table[index].handle = 0;
	meta->table[index].size = 0;
332 333
}

334
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
335
{
336
	int ret = 0;
337
	unsigned char *cmem;
Minchan Kim's avatar
Minchan Kim committed
338
	struct zram_meta *meta = zram->meta;
Minchan Kim's avatar
Minchan Kim committed
339
	unsigned long handle;
Minchan Kim's avatar
Minchan Kim committed
340
	size_t size;
Minchan Kim's avatar
Minchan Kim committed
341 342 343 344

	read_lock(&meta->tb_lock);
	handle = meta->table[index].handle;
	size = meta->table[index].size;
345

Minchan Kim's avatar
Minchan Kim committed
346
	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
Minchan Kim's avatar
Minchan Kim committed
347
		read_unlock(&meta->tb_lock);
348
		clear_page(mem);
349 350
		return 0;
	}
351

Minchan Kim's avatar
Minchan Kim committed
352
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Minchan Kim's avatar
Minchan Kim committed
353
	if (size == PAGE_SIZE)
354
		copy_page(mem, cmem);
355
	else
356
		ret = zcomp_decompress(zram->comp, cmem, size, mem);
Minchan Kim's avatar
Minchan Kim committed
357
	zs_unmap_object(meta->mem_pool, handle);
Minchan Kim's avatar
Minchan Kim committed
358
	read_unlock(&meta->tb_lock);
359

360
	/* Should NEVER happen. Return bio error if it does. */
361
	if (unlikely(ret)) {
362
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
363
		atomic64_inc(&zram->stats.failed_reads);
364
		return ret;
365
	}
366

367
	return 0;
368 369
}

370 371
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
			  u32 index, int offset, struct bio *bio)
372 373
{
	int ret;
374 375
	struct page *page;
	unsigned char *user_mem, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
376
	struct zram_meta *meta = zram->meta;
377 378
	page = bvec->bv_page;

Minchan Kim's avatar
Minchan Kim committed
379
	read_lock(&meta->tb_lock);
Minchan Kim's avatar
Minchan Kim committed
380 381
	if (unlikely(!meta->table[index].handle) ||
			zram_test_flag(meta, index, ZRAM_ZERO)) {
Minchan Kim's avatar
Minchan Kim committed
382
		read_unlock(&meta->tb_lock);
383
		handle_zero_page(bvec);
384 385
		return 0;
	}
Minchan Kim's avatar
Minchan Kim committed
386
	read_unlock(&meta->tb_lock);
387

388 389
	if (is_partial_io(bvec))
		/* Use  a temporary buffer to decompress the page */
390 391 392 393
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);

	user_mem = kmap_atomic(page);
	if (!is_partial_io(bvec))
394 395 396 397 398 399 400
		uncmem = user_mem;

	if (!uncmem) {
		pr_info("Unable to allocate temp memory\n");
		ret = -ENOMEM;
		goto out_cleanup;
	}
401

402
	ret = zram_decompress_page(zram, uncmem, index);
403
	/* Should NEVER happen. Return bio error if it does. */
404
	if (unlikely(ret))
405
		goto out_cleanup;
406

407 408 409 410 411 412 413 414 415 416 417
	if (is_partial_io(bvec))
		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
				bvec->bv_len);

	flush_dcache_page(page);
	ret = 0;
out_cleanup:
	kunmap_atomic(user_mem);
	if (is_partial_io(bvec))
		kfree(uncmem);
	return ret;
418 419 420 421
}

static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
422
{
423
	int ret = 0;
424
	size_t clen;
425
	unsigned long handle;
426
	struct page *page;
427
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
428
	struct zram_meta *meta = zram->meta;
429
	struct zcomp_strm *zstrm;
430
	bool locked = false;
431

432
	page = bvec->bv_page;
433 434 435 436 437
	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
438
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
439 440 441 442
		if (!uncmem) {
			ret = -ENOMEM;
			goto out;
		}
443
		ret = zram_decompress_page(zram, uncmem, index);
444
		if (ret)
445 446 447
			goto out;
	}

448
	zstrm = zcomp_strm_find(zram->comp);
449
	locked = true;
450
	user_mem = kmap_atomic(page);
451

452
	if (is_partial_io(bvec)) {
453 454
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
455 456 457
		kunmap_atomic(user_mem);
		user_mem = NULL;
	} else {
458
		uncmem = user_mem;
459
	}
460 461

	if (page_zero_filled(uncmem)) {
462
		kunmap_atomic(user_mem);
463
		/* Free memory associated with this sector now. */
Minchan Kim's avatar
Minchan Kim committed
464
		write_lock(&zram->meta->tb_lock);
465
		zram_free_page(zram, index);
Minchan Kim's avatar
Minchan Kim committed
466 467
		zram_set_flag(meta, index, ZRAM_ZERO);
		write_unlock(&zram->meta->tb_lock);
468

469
		atomic64_inc(&zram->stats.zero_pages);
470 471
		ret = 0;
		goto out;
472
	}
473

474
	ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
475 476 477 478 479
	if (!is_partial_io(bvec)) {
		kunmap_atomic(user_mem);
		user_mem = NULL;
		uncmem = NULL;
	}
480

481
	if (unlikely(ret)) {
482
		pr_err("Compression failed! err=%d\n", ret);
483
		goto out;
484
	}
485
	src = zstrm->buffer;
486 487
	if (unlikely(clen > max_zpage_size)) {
		clen = PAGE_SIZE;
488 489
		if (is_partial_io(bvec))
			src = uncmem;
490
	}
491

Minchan Kim's avatar
Minchan Kim committed
492
	handle = zs_malloc(meta->mem_pool, clen);
493
	if (!handle) {
494 495
		pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
			index, clen);
496 497
		ret = -ENOMEM;
		goto out;
498
	}
Minchan Kim's avatar
Minchan Kim committed
499
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
500

501
	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
502
		src = kmap_atomic(page);
503
		copy_page(cmem, src);
504
		kunmap_atomic(src);
505 506 507
	} else {
		memcpy(cmem, src, clen);
	}
508

509 510
	zcomp_strm_release(zram->comp, zstrm);
	locked = false;
Minchan Kim's avatar
Minchan Kim committed
511
	zs_unmap_object(meta->mem_pool, handle);
512

513 514 515 516
	/*
	 * Free memory associated with this sector
	 * before overwriting unused sectors.
	 */
Minchan Kim's avatar
Minchan Kim committed
517
	write_lock(&zram->meta->tb_lock);
518 519
	zram_free_page(zram, index);

Minchan Kim's avatar
Minchan Kim committed
520 521
	meta->table[index].handle = handle;
	meta->table[index].size = clen;
Minchan Kim's avatar
Minchan Kim committed
522
	write_unlock(&zram->meta->tb_lock);
523

524
	/* Update stats */
525 526
	atomic64_add(clen, &zram->stats.compr_data_size);
	atomic64_inc(&zram->stats.pages_stored);
527
out:
528
	if (locked)
529
		zcomp_strm_release(zram->comp, zstrm);
530 531
	if (is_partial_io(bvec))
		kfree(uncmem);
532
	if (ret)
533
		atomic64_inc(&zram->stats.failed_writes);
534
	return ret;
535 536 537
}

static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
538
			int offset, struct bio *bio)
539
{
540
	int ret;
541
	int rw = bio_data_dir(bio);
542

543 544
	if (rw == READ) {
		atomic64_inc(&zram->stats.num_reads);
545
		ret = zram_bvec_read(zram, bvec, index, offset, bio);
546 547
	} else {
		atomic64_inc(&zram->stats.num_writes);
548
		ret = zram_bvec_write(zram, bvec, index, offset);
549
	}
550 551

	return ret;
552 553
}

Joonsoo Kim's avatar
Joonsoo Kim committed
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
/*
 * zram_bio_discard - handler on discard request
 * @index: physical block index in PAGE_SIZE units
 * @offset: byte offset within physical block
 */
static void zram_bio_discard(struct zram *zram, u32 index,
			     int offset, struct bio *bio)
{
	size_t n = bio->bi_iter.bi_size;

	/*
	 * zram manages data in physical block size units. Because logical block
	 * size isn't identical with physical block size on some arch, we
	 * could get a discard request pointing to a specific offset within a
	 * certain physical block.  Although we can handle this request by
	 * reading that physiclal block and decompressing and partially zeroing
	 * and re-compressing and then re-storing it, this isn't reasonable
	 * because our intent with a discard request is to save memory.  So
	 * skipping this logical block is appropriate here.
	 */
	if (offset) {
575
		if (n <= (PAGE_SIZE - offset))
Joonsoo Kim's avatar
Joonsoo Kim committed
576 577
			return;

578
		n -= (PAGE_SIZE - offset);
Joonsoo Kim's avatar
Joonsoo Kim committed
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
		index++;
	}

	while (n >= PAGE_SIZE) {
		/*
		 * Discard request can be large so the lock hold times could be
		 * lengthy.  So take the lock once per page.
		 */
		write_lock(&zram->meta->tb_lock);
		zram_free_page(zram, index);
		write_unlock(&zram->meta->tb_lock);
		index++;
		n -= PAGE_SIZE;
	}
}

Minchan Kim's avatar
Minchan Kim committed
595
static void zram_reset_device(struct zram *zram, bool reset_capacity)
596
{
597 598 599
	size_t index;
	struct zram_meta *meta;

600
	down_write(&zram->init_lock);
601
	if (!init_done(zram)) {
602
		up_write(&zram->init_lock);
603
		return;
604
	}
605 606 607 608 609 610 611 612 613 614 615

	meta = zram->meta;
	/* Free all pages that are still in this zram device */
	for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
		unsigned long handle = meta->table[index].handle;
		if (!handle)
			continue;

		zs_free(meta->mem_pool, handle);
	}

616
	zcomp_destroy(zram->comp);
617 618
	zram->max_comp_streams = 1;

619 620 621 622 623 624
	zram_meta_free(zram->meta);
	zram->meta = NULL;
	/* Reset stats */
	memset(&zram->stats, 0, sizeof(zram->stats));

	zram->disksize = 0;
625
	if (reset_capacity)
Minchan Kim's avatar
Minchan Kim committed
626
		set_capacity(zram->disk, 0);
627

628
	up_write(&zram->init_lock);
629 630 631 632 633 634 635 636

	/*
	 * Revalidate disk out of the init_lock to avoid lockdep splat.
	 * It's okay because disk's capacity is protected by init_lock
	 * so that revalidate_disk always sees up-to-date capacity.
	 */
	if (reset_capacity)
		revalidate_disk(zram->disk);
637 638 639 640 641 642
}

static ssize_t disksize_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 disksize;
643
	struct zcomp *comp;
644 645
	struct zram_meta *meta;
	struct zram *zram = dev_to_zram(dev);
646
	int err;
647 648 649 650 651 652 653

	disksize = memparse(buf, NULL);
	if (!disksize)
		return -EINVAL;

	disksize = PAGE_ALIGN(disksize);
	meta = zram_meta_alloc(disksize);
654 655
	if (!meta)
		return -ENOMEM;
656

657
	comp = zcomp_create(zram->compressor, zram->max_comp_streams);
658
	if (IS_ERR(comp)) {
659 660
		pr_info("Cannot initialise %s compressing backend\n",
				zram->compressor);
661 662
		err = PTR_ERR(comp);
		goto out_free_meta;
663 664
	}

665
	down_write(&zram->init_lock);
666
	if (init_done(zram)) {
667
		pr_info("Cannot change disksize for initialized device\n");
668
		err = -EBUSY;
669
		goto out_destroy_comp;
670 671
	}

672
	zram->meta = meta;
673
	zram->comp = comp;
674 675 676
	zram->disksize = disksize;
	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
	up_write(&zram->init_lock);
677 678 679 680 681 682 683 684

	/*
	 * Revalidate disk out of the init_lock to avoid lockdep splat.
	 * It's okay because disk's capacity is protected by init_lock
	 * so that revalidate_disk always sees up-to-date capacity.
	 */
	revalidate_disk(zram->disk);

685
	return len;
686

687 688 689 690
out_destroy_comp:
	up_write(&zram->init_lock);
	zcomp_destroy(comp);
out_free_meta:
691 692
	zram_meta_free(meta);
	return err;
693 694 695 696 697 698 699 700 701 702 703 704 705
}

static ssize_t reset_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int ret;
	unsigned short do_reset;
	struct zram *zram;
	struct block_device *bdev;

	zram = dev_to_zram(dev);
	bdev = bdget_disk(zram->disk, 0);

706 707 708
	if (!bdev)
		return -ENOMEM;

709
	/* Do not reset an active device! */
710 711 712 713
	if (bdev->bd_holders) {
		ret = -EBUSY;
		goto out;
	}
714 715 716

	ret = kstrtou16(buf, 10, &do_reset);
	if (ret)
717
		goto out;
718

719 720 721 722
	if (!do_reset) {
		ret = -EINVAL;
		goto out;
	}
723 724

	/* Make sure all pending I/O is finished */
725
	fsync_bdev(bdev);
726
	bdput(bdev);
727

Minchan Kim's avatar
Minchan Kim committed
728
	zram_reset_device(zram, true);
729
	return len;
730 731 732 733

out:
	bdput(bdev);
	return ret;
734 735
}

736
static void __zram_make_request(struct zram *zram, struct bio *bio)
737
{
738
	int offset;
739
	u32 index;
740 741
	struct bio_vec bvec;
	struct bvec_iter iter;
742

743 744 745
	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
	offset = (bio->bi_iter.bi_sector &
		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
746

Joonsoo Kim's avatar
Joonsoo Kim committed
747 748 749 750 751 752
	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
		zram_bio_discard(zram, index, offset, bio);
		bio_endio(bio, 0);
		return;
	}

753
	bio_for_each_segment(bvec, bio, iter) {
754 755
		int max_transfer_size = PAGE_SIZE - offset;

756
		if (bvec.bv_len > max_transfer_size) {
757 758 759 760 761 762
			/*
			 * zram_bvec_rw() can only make operation on a single
			 * zram page. Split the bio vector.
			 */
			struct bio_vec bv;

763
			bv.bv_page = bvec.bv_page;
764
			bv.bv_len = max_transfer_size;
765
			bv.bv_offset = bvec.bv_offset;
766

767
			if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
768 769
				goto out;

770
			bv.bv_len = bvec.bv_len - max_transfer_size;
771
			bv.bv_offset += max_transfer_size;
772
			if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
773 774
				goto out;
		} else
775
			if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
776 777
				goto out;

778
		update_position(&index, &offset, &bvec);
779
	}
780 781 782

	set_bit(BIO_UPTODATE, &bio->bi_flags);
	bio_endio(bio, 0);
783
	return;
784 785 786 787 788 789

out:
	bio_io_error(bio);
}

/*
790
 * Handler function for all zram I/O requests.
791
 */
792
static void zram_make_request(struct request_queue *queue, struct bio *bio)
793
{
794
	struct zram *zram = queue->queuedata;
795

796
	down_read(&zram->init_lock);
797
	if (unlikely(!init_done(zram)))
798
		goto error;
799

800
	if (!valid_io_request(zram, bio)) {
801
		atomic64_inc(&zram->stats.invalid_io);
802
		goto error;
803 804
	}

805
	__zram_make_request(zram, bio);
806
	up_read(&zram->init_lock);
807

808
	return;
809 810

error:
811
	up_read(&zram->init_lock);
812
	bio_io_error(bio);
813 814
}

Nitin Gupta's avatar
Nitin Gupta committed
815 816
static void zram_slot_free_notify(struct block_device *bdev,
				unsigned long index)
817
{
818
	struct zram *zram;
819
	struct zram_meta *meta;
820

821
	zram = bdev->bd_disk->private_data;
822
	meta = zram->meta;
823

824 825 826 827
	write_lock(&meta->tb_lock);
	zram_free_page(zram, index);
	write_unlock(&meta->tb_lock);
	atomic64_inc(&zram->stats.notify_free);
828 829
}

830 831
static const struct block_device_operations zram_devops = {
	.swap_slot_free_notify = zram_slot_free_notify,
832
	.owner = THIS_MODULE
833 834
};

835 836 837 838 839 840
static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
		disksize_show, disksize_store);
static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
841 842
static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
		max_comp_streams_show, max_comp_streams_store);
843 844
static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
		comp_algorithm_show, comp_algorithm_store);
845

846 847
ZRAM_ATTR_RO(num_reads);
ZRAM_ATTR_RO(num_writes);
848 849
ZRAM_ATTR_RO(failed_reads);
ZRAM_ATTR_RO(failed_writes);
850 851 852 853 854
ZRAM_ATTR_RO(invalid_io);
ZRAM_ATTR_RO(notify_free);
ZRAM_ATTR_RO(zero_pages);
ZRAM_ATTR_RO(compr_data_size);

855 856 857 858 859 860
static struct attribute *zram_disk_attrs[] = {
	&dev_attr_disksize.attr,
	&dev_attr_initstate.attr,
	&dev_attr_reset.attr,
	&dev_attr_num_reads.attr,
	&dev_attr_num_writes.attr,
861 862
	&dev_attr_failed_reads.attr,
	&dev_attr_failed_writes.attr,
863 864 865 866 867 868
	&dev_attr_invalid_io.attr,
	&dev_attr_notify_free.attr,
	&dev_attr_zero_pages.attr,
	&dev_attr_orig_data_size.attr,
	&dev_attr_compr_data_size.attr,
	&dev_attr_mem_used_total.attr,
869
	&dev_attr_max_comp_streams.attr,
870
	&dev_attr_comp_algorithm.attr,
871 872 873 874 875 876 877
	NULL,
};

static struct attribute_group zram_disk_attr_group = {
	.attrs = zram_disk_attrs,
};

878
static int create_device(struct zram *zram, int device_id)
879
{
880
	int ret = -ENOMEM;
881

882
	init_rwsem(&zram->init_lock);
883

884 885
	zram->queue = blk_alloc_queue(GFP_KERNEL);
	if (!zram->queue) {
886 887
		pr_err("Error allocating disk queue for device %d\n",
			device_id);
888
		goto out;
889 890
	}

891 892
	blk_queue_make_request(zram->queue, zram_make_request);
	zram->queue->queuedata = zram;
893 894

	 /* gendisk structure */
895 896
	zram->disk = alloc_disk(1);
	if (!zram->disk) {
897
		pr_warn("Error allocating disk structure for device %d\n",
898
			device_id);
899
		goto out_free_queue;
900 901
	}

902 903 904 905 906 907
	zram->disk->major = zram_major;
	zram->disk->first_minor = device_id;
	zram->disk->fops = &zram_devops;
	zram->disk->queue = zram->queue;
	zram->disk->private_data = zram;
	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
908

909
	/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
910
	set_capacity(zram->disk, 0);
911 912
	/* zram devices sort of resembles non-rotational disks */
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);