zram_drv.c 27.3 KB
Newer Older
1
/*
2
 * Compressed RAM block device
3
 *
4
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
Minchan Kim's avatar
Minchan Kim committed
5
 *               2012, 2013 Minchan Kim
6 7 8 9 10 11 12 13 14
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 *
 */

15
#define KMSG_COMPONENT "zram"
16 17
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

18 19 20 21
#ifdef CONFIG_ZRAM_DEBUG
#define DEBUG
#endif

22 23
#include <linux/module.h>
#include <linux/kernel.h>
24
#include <linux/bio.h>
25 26 27 28 29 30
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/highmem.h>
31
#include <linux/slab.h>
32 33
#include <linux/string.h>
#include <linux/vmalloc.h>
34
#include <linux/err.h>
35

36
#include "zram_drv.h"
37 38

/* Globals */
39
static int zram_major;
40
static struct zram *zram_devices;
41
static const char *default_compressor = "lzo";
42 43

/* Module params (documentation at end) */
44
static unsigned int num_devices = 1;
45

46 47 48 49 50
#define ZRAM_ATTR_RO(name)						\
static ssize_t zram_attr_##name##_show(struct device *d,		\
				struct device_attribute *attr, char *b)	\
{									\
	struct zram *zram = dev_to_zram(d);				\
51
	return scnprintf(b, PAGE_SIZE, "%llu\n",			\
52 53 54 55 56
		(u64)atomic64_read(&zram->stats.name));			\
}									\
static struct device_attribute dev_attr_##name =			\
	__ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);

57 58 59 60 61
static inline int init_done(struct zram *zram)
{
	return zram->meta != NULL;
}

62 63 64 65 66 67 68 69 70 71
static inline struct zram *dev_to_zram(struct device *dev)
{
	return (struct zram *)dev_to_disk(dev)->private_data;
}

static ssize_t disksize_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

72
	return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
73 74 75 76 77
}

static ssize_t initstate_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
78
	u32 val;
79 80
	struct zram *zram = dev_to_zram(dev);

81 82 83
	down_read(&zram->init_lock);
	val = init_done(zram);
	up_read(&zram->init_lock);
84

85
	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
86 87 88 89 90 91 92
}

static ssize_t orig_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

93
	return scnprintf(buf, PAGE_SIZE, "%llu\n",
94
		(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
95 96 97 98 99 100 101 102 103 104
}

static ssize_t mem_used_total_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);
	struct zram_meta *meta = zram->meta;

	down_read(&zram->init_lock);
105
	if (init_done(zram))
106
		val = zs_get_total_pages(meta->mem_pool);
107 108
	up_read(&zram->init_lock);

109
	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
110 111
}

112 113 114 115 116 117 118 119 120 121
static ssize_t max_comp_streams_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	int val;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	val = zram->max_comp_streams;
	up_read(&zram->init_lock);

122
	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
123 124
}

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
static ssize_t mem_limit_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	val = zram->limit_pages;
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
}

static ssize_t mem_limit_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 limit;
	char *tmp;
	struct zram *zram = dev_to_zram(dev);

	limit = memparse(buf, &tmp);
	if (buf == tmp) /* no chars parsed, invalid input */
		return -EINVAL;

	down_write(&zram->init_lock);
	zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
	up_write(&zram->init_lock);

	return len;
}

Minchan Kim's avatar
Minchan Kim committed
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
static ssize_t mem_used_max_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	if (init_done(zram))
		val = atomic_long_read(&zram->stats.max_used_pages);
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
}

static ssize_t mem_used_max_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int err;
	unsigned long val;
	struct zram *zram = dev_to_zram(dev);
	struct zram_meta *meta = zram->meta;

	err = kstrtoul(buf, 10, &val);
	if (err || val != 0)
		return -EINVAL;

	down_read(&zram->init_lock);
	if (init_done(zram))
		atomic_long_set(&zram->stats.max_used_pages,
				zs_get_total_pages(meta->mem_pool));
	up_read(&zram->init_lock);

	return len;
}

191 192 193 194 195
static ssize_t max_comp_streams_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int num;
	struct zram *zram = dev_to_zram(dev);
Minchan Kim's avatar
Minchan Kim committed
196
	int ret;
197

Minchan Kim's avatar
Minchan Kim committed
198 199 200
	ret = kstrtoint(buf, 0, &num);
	if (ret < 0)
		return ret;
201 202
	if (num < 1)
		return -EINVAL;
Minchan Kim's avatar
Minchan Kim committed
203

204 205
	down_write(&zram->init_lock);
	if (init_done(zram)) {
Minchan Kim's avatar
Minchan Kim committed
206
		if (!zcomp_set_max_streams(zram->comp, num)) {
207
			pr_info("Cannot change max compression streams\n");
Minchan Kim's avatar
Minchan Kim committed
208 209 210
			ret = -EINVAL;
			goto out;
		}
211
	}
Minchan Kim's avatar
Minchan Kim committed
212

213
	zram->max_comp_streams = num;
Minchan Kim's avatar
Minchan Kim committed
214 215
	ret = len;
out:
216
	up_write(&zram->init_lock);
Minchan Kim's avatar
Minchan Kim committed
217
	return ret;
218 219
}

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
static ssize_t comp_algorithm_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	size_t sz;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	sz = zcomp_available_show(zram->compressor, buf);
	up_read(&zram->init_lock);

	return sz;
}

static ssize_t comp_algorithm_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	struct zram *zram = dev_to_zram(dev);
	down_write(&zram->init_lock);
	if (init_done(zram)) {
		up_write(&zram->init_lock);
		pr_info("Can't change algorithm for initialized device\n");
		return -EBUSY;
	}
	strlcpy(zram->compressor, buf, sizeof(zram->compressor));
	up_write(&zram->init_lock);
	return len;
}

Minchan Kim's avatar
Minchan Kim committed
248
/* flag operations needs meta->tb_lock */
Minchan Kim's avatar
Minchan Kim committed
249
static int zram_test_flag(struct zram_meta *meta, u32 index,
250
			enum zram_pageflags flag)
251
{
252
	return meta->table[index].value & BIT(flag);
253 254
}

Minchan Kim's avatar
Minchan Kim committed
255
static void zram_set_flag(struct zram_meta *meta, u32 index,
256
			enum zram_pageflags flag)
257
{
258
	meta->table[index].value |= BIT(flag);
259 260
}

Minchan Kim's avatar
Minchan Kim committed
261
static void zram_clear_flag(struct zram_meta *meta, u32 index,
262
			enum zram_pageflags flag)
263
{
264 265 266 267 268 269 270 271 272 273 274 275 276 277
	meta->table[index].value &= ~BIT(flag);
}

static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
{
	return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
}

static void zram_set_obj_size(struct zram_meta *meta,
					u32 index, size_t size)
{
	unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;

	meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
278 279
}

280 281 282 283 284 285 286 287 288 289 290
static inline int is_partial_io(struct bio_vec *bvec)
{
	return bvec->bv_len != PAGE_SIZE;
}

/*
 * Check if request is within bounds and aligned on zram logical blocks.
 */
static inline int valid_io_request(struct zram *zram, struct bio *bio)
{
	u64 start, end, bound;
291

292
	/* unaligned request */
293 294
	if (unlikely(bio->bi_iter.bi_sector &
		     (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
295
		return 0;
296
	if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
297 298
		return 0;

299 300
	start = bio->bi_iter.bi_sector;
	end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
301 302
	bound = zram->disksize >> SECTOR_SHIFT;
	/* out of range range */
303
	if (unlikely(start >= bound || end > bound || start > end))
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
		return 0;

	/* I/O request is valid */
	return 1;
}

static void zram_meta_free(struct zram_meta *meta)
{
	zs_destroy_pool(meta->mem_pool);
	vfree(meta->table);
	kfree(meta);
}

static struct zram_meta *zram_meta_alloc(u64 disksize)
{
	size_t num_pages;
	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
	if (!meta)
		goto out;

	num_pages = disksize >> PAGE_SHIFT;
	meta->table = vzalloc(num_pages * sizeof(*meta->table));
	if (!meta->table) {
		pr_err("Error allocating zram address table\n");
328
		goto free_meta;
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
	}

	meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
	if (!meta->mem_pool) {
		pr_err("Error creating memory pool\n");
		goto free_table;
	}

	return meta;

free_table:
	vfree(meta->table);
free_meta:
	kfree(meta);
	meta = NULL;
out:
	return meta;
}

static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
{
	if (*offset + bvec->bv_len >= PAGE_SIZE)
		(*index)++;
	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}

355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
static int page_zero_filled(void *ptr)
{
	unsigned int pos;
	unsigned long *page;

	page = (unsigned long *)ptr;

	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
		if (page[pos])
			return 0;
	}

	return 1;
}

370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
static void handle_zero_page(struct bio_vec *bvec)
{
	struct page *page = bvec->bv_page;
	void *user_mem;

	user_mem = kmap_atomic(page);
	if (is_partial_io(bvec))
		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
	else
		clear_page(user_mem);
	kunmap_atomic(user_mem);

	flush_dcache_page(page);
}

385 386 387 388 389 390

/*
 * To protect concurrent access to the same index entry,
 * caller should hold this table index entry's bit_spinlock to
 * indicate this index entry is accessing.
 */
391
static void zram_free_page(struct zram *zram, size_t index)
392
{
Minchan Kim's avatar
Minchan Kim committed
393 394
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;
395

396
	if (unlikely(!handle)) {
397 398 399 400
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
Minchan Kim's avatar
Minchan Kim committed
401 402
		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
			zram_clear_flag(meta, index, ZRAM_ZERO);
403
			atomic64_dec(&zram->stats.zero_pages);
404 405 406 407
		}
		return;
	}

Minchan Kim's avatar
Minchan Kim committed
408
	zs_free(meta->mem_pool, handle);
409

410 411
	atomic64_sub(zram_get_obj_size(meta, index),
			&zram->stats.compr_data_size);
412
	atomic64_dec(&zram->stats.pages_stored);
413

Minchan Kim's avatar
Minchan Kim committed
414
	meta->table[index].handle = 0;
415
	zram_set_obj_size(meta, index, 0);
416 417
}

418
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
419
{
420
	int ret = 0;
421
	unsigned char *cmem;
Minchan Kim's avatar
Minchan Kim committed
422
	struct zram_meta *meta = zram->meta;
Minchan Kim's avatar
Minchan Kim committed
423
	unsigned long handle;
Minchan Kim's avatar
Minchan Kim committed
424
	size_t size;
Minchan Kim's avatar
Minchan Kim committed
425

426
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim's avatar
Minchan Kim committed
427
	handle = meta->table[index].handle;
428
	size = zram_get_obj_size(meta, index);
429

Minchan Kim's avatar
Minchan Kim committed
430
	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
431
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
432
		clear_page(mem);
433 434
		return 0;
	}
435

Minchan Kim's avatar
Minchan Kim committed
436
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Minchan Kim's avatar
Minchan Kim committed
437
	if (size == PAGE_SIZE)
438
		copy_page(mem, cmem);
439
	else
440
		ret = zcomp_decompress(zram->comp, cmem, size, mem);
Minchan Kim's avatar
Minchan Kim committed
441
	zs_unmap_object(meta->mem_pool, handle);
442
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
443

444
	/* Should NEVER happen. Return bio error if it does. */
445
	if (unlikely(ret)) {
446 447
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
		return ret;
448
	}
449

450
	return 0;
451 452
}

453 454
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
			  u32 index, int offset, struct bio *bio)
455 456
{
	int ret;
457 458
	struct page *page;
	unsigned char *user_mem, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
459
	struct zram_meta *meta = zram->meta;
460 461
	page = bvec->bv_page;

462
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim's avatar
Minchan Kim committed
463 464
	if (unlikely(!meta->table[index].handle) ||
			zram_test_flag(meta, index, ZRAM_ZERO)) {
465
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
466
		handle_zero_page(bvec);
467 468
		return 0;
	}
469
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
470

471 472
	if (is_partial_io(bvec))
		/* Use  a temporary buffer to decompress the page */
473 474 475 476
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);

	user_mem = kmap_atomic(page);
	if (!is_partial_io(bvec))
477 478 479 480 481 482 483
		uncmem = user_mem;

	if (!uncmem) {
		pr_info("Unable to allocate temp memory\n");
		ret = -ENOMEM;
		goto out_cleanup;
	}
484

485
	ret = zram_decompress_page(zram, uncmem, index);
486
	/* Should NEVER happen. Return bio error if it does. */
487
	if (unlikely(ret))
488
		goto out_cleanup;
489

490 491 492 493 494 495 496 497 498 499 500
	if (is_partial_io(bvec))
		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
				bvec->bv_len);

	flush_dcache_page(page);
	ret = 0;
out_cleanup:
	kunmap_atomic(user_mem);
	if (is_partial_io(bvec))
		kfree(uncmem);
	return ret;
501 502
}

Minchan Kim's avatar
Minchan Kim committed
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
static inline void update_used_max(struct zram *zram,
					const unsigned long pages)
{
	int old_max, cur_max;

	old_max = atomic_long_read(&zram->stats.max_used_pages);

	do {
		cur_max = old_max;
		if (pages > cur_max)
			old_max = atomic_long_cmpxchg(
				&zram->stats.max_used_pages, cur_max, pages);
	} while (old_max != cur_max);
}

518 519
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
520
{
521
	int ret = 0;
522
	size_t clen;
523
	unsigned long handle;
524
	struct page *page;
525
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim's avatar
Minchan Kim committed
526
	struct zram_meta *meta = zram->meta;
527
	struct zcomp_strm *zstrm;
528
	bool locked = false;
Minchan Kim's avatar
Minchan Kim committed
529
	unsigned long alloced_pages;
530

531
	page = bvec->bv_page;
532 533 534 535 536
	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
537
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
538 539 540 541
		if (!uncmem) {
			ret = -ENOMEM;
			goto out;
		}
542
		ret = zram_decompress_page(zram, uncmem, index);
543
		if (ret)
544 545 546
			goto out;
	}

547
	zstrm = zcomp_strm_find(zram->comp);
548
	locked = true;
549
	user_mem = kmap_atomic(page);
550

551
	if (is_partial_io(bvec)) {
552 553
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
554 555 556
		kunmap_atomic(user_mem);
		user_mem = NULL;
	} else {
557
		uncmem = user_mem;
558
	}
559 560

	if (page_zero_filled(uncmem)) {
561
		kunmap_atomic(user_mem);
562
		/* Free memory associated with this sector now. */
563
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
564
		zram_free_page(zram, index);
Minchan Kim's avatar
Minchan Kim committed
565
		zram_set_flag(meta, index, ZRAM_ZERO);
566
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
567

568
		atomic64_inc(&zram->stats.zero_pages);
569 570
		ret = 0;
		goto out;
571
	}
572

573
	ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
574 575 576 577 578
	if (!is_partial_io(bvec)) {
		kunmap_atomic(user_mem);
		user_mem = NULL;
		uncmem = NULL;
	}
579

580
	if (unlikely(ret)) {
581
		pr_err("Compression failed! err=%d\n", ret);
582
		goto out;
583
	}
584
	src = zstrm->buffer;
585 586
	if (unlikely(clen > max_zpage_size)) {
		clen = PAGE_SIZE;
587 588
		if (is_partial_io(bvec))
			src = uncmem;
589
	}
590

Minchan Kim's avatar
Minchan Kim committed
591
	handle = zs_malloc(meta->mem_pool, clen);
592
	if (!handle) {
593 594
		pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
			index, clen);
595 596
		ret = -ENOMEM;
		goto out;
597
	}
598

Minchan Kim's avatar
Minchan Kim committed
599 600
	alloced_pages = zs_get_total_pages(meta->mem_pool);
	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
601 602 603 604 605
		zs_free(meta->mem_pool, handle);
		ret = -ENOMEM;
		goto out;
	}

Minchan Kim's avatar
Minchan Kim committed
606 607
	update_used_max(zram, alloced_pages);

Minchan Kim's avatar
Minchan Kim committed
608
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
609

610
	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
611
		src = kmap_atomic(page);
612
		copy_page(cmem, src);
613
		kunmap_atomic(src);
614 615 616
	} else {
		memcpy(cmem, src, clen);
	}
617

618 619
	zcomp_strm_release(zram->comp, zstrm);
	locked = false;
Minchan Kim's avatar
Minchan Kim committed
620
	zs_unmap_object(meta->mem_pool, handle);
621

622 623 624 625
	/*
	 * Free memory associated with this sector
	 * before overwriting unused sectors.
	 */
626
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
627 628
	zram_free_page(zram, index);

Minchan Kim's avatar
Minchan Kim committed
629
	meta->table[index].handle = handle;
630 631
	zram_set_obj_size(meta, index, clen);
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
632

633
	/* Update stats */
634 635
	atomic64_add(clen, &zram->stats.compr_data_size);
	atomic64_inc(&zram->stats.pages_stored);
636
out:
637
	if (locked)
638
		zcomp_strm_release(zram->comp, zstrm);
639 640
	if (is_partial_io(bvec))
		kfree(uncmem);
641
	return ret;
642 643 644
}

static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
645
			int offset, struct bio *bio)
646
{
647
	int ret;
648
	int rw = bio_data_dir(bio);
649

650 651
	if (rw == READ) {
		atomic64_inc(&zram->stats.num_reads);
652
		ret = zram_bvec_read(zram, bvec, index, offset, bio);
653 654
	} else {
		atomic64_inc(&zram->stats.num_writes);
655
		ret = zram_bvec_write(zram, bvec, index, offset);
656
	}
657

658 659 660 661 662 663 664
	if (unlikely(ret)) {
		if (rw == READ)
			atomic64_inc(&zram->stats.failed_reads);
		else
			atomic64_inc(&zram->stats.failed_writes);
	}

665
	return ret;
666 667
}

Joonsoo Kim's avatar
Joonsoo Kim committed
668 669 670 671 672 673 674 675 676
/*
 * zram_bio_discard - handler on discard request
 * @index: physical block index in PAGE_SIZE units
 * @offset: byte offset within physical block
 */
static void zram_bio_discard(struct zram *zram, u32 index,
			     int offset, struct bio *bio)
{
	size_t n = bio->bi_iter.bi_size;
677
	struct zram_meta *meta = zram->meta;
Joonsoo Kim's avatar
Joonsoo Kim committed
678 679 680 681 682 683 684 685 686 687 688 689

	/*
	 * zram manages data in physical block size units. Because logical block
	 * size isn't identical with physical block size on some arch, we
	 * could get a discard request pointing to a specific offset within a
	 * certain physical block.  Although we can handle this request by
	 * reading that physiclal block and decompressing and partially zeroing
	 * and re-compressing and then re-storing it, this isn't reasonable
	 * because our intent with a discard request is to save memory.  So
	 * skipping this logical block is appropriate here.
	 */
	if (offset) {
690
		if (n <= (PAGE_SIZE - offset))
Joonsoo Kim's avatar
Joonsoo Kim committed
691 692
			return;

693
		n -= (PAGE_SIZE - offset);
Joonsoo Kim's avatar
Joonsoo Kim committed
694 695 696 697
		index++;
	}

	while (n >= PAGE_SIZE) {
698
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kim's avatar
Joonsoo Kim committed
699
		zram_free_page(zram, index);
700
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kim's avatar
Joonsoo Kim committed
701 702 703 704 705
		index++;
		n -= PAGE_SIZE;
	}
}

Minchan Kim's avatar
Minchan Kim committed
706
static void zram_reset_device(struct zram *zram, bool reset_capacity)
707
{
708 709 710
	size_t index;
	struct zram_meta *meta;

711
	down_write(&zram->init_lock);
712 713 714

	zram->limit_pages = 0;

715
	if (!init_done(zram)) {
716
		up_write(&zram->init_lock);
717
		return;
718
	}
719 720 721 722 723 724 725 726 727 728 729

	meta = zram->meta;
	/* Free all pages that are still in this zram device */
	for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
		unsigned long handle = meta->table[index].handle;
		if (!handle)
			continue;

		zs_free(meta->mem_pool, handle);
	}

730
	zcomp_destroy(zram->comp);
731 732
	zram->max_comp_streams = 1;

733 734 735 736 737 738
	zram_meta_free(zram->meta);
	zram->meta = NULL;
	/* Reset stats */
	memset(&zram->stats, 0, sizeof(zram->stats));

	zram->disksize = 0;
739
	if (reset_capacity)
Minchan Kim's avatar
Minchan Kim committed
740
		set_capacity(zram->disk, 0);
741

742
	up_write(&zram->init_lock);
743 744 745 746 747 748 749 750

	/*
	 * Revalidate disk out of the init_lock to avoid lockdep splat.
	 * It's okay because disk's capacity is protected by init_lock
	 * so that revalidate_disk always sees up-to-date capacity.
	 */
	if (reset_capacity)
		revalidate_disk(zram->disk);
751 752 753 754 755 756
}

static ssize_t disksize_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 disksize;
757
	struct zcomp *comp;
758 759
	struct zram_meta *meta;
	struct zram *zram = dev_to_zram(dev);
760
	int err;
761 762 763 764 765 766 767

	disksize = memparse(buf, NULL);
	if (!disksize)
		return -EINVAL;

	disksize = PAGE_ALIGN(disksize);
	meta = zram_meta_alloc(disksize);
768 769
	if (!meta)
		return -ENOMEM;
770

771
	comp = zcomp_create(zram->compressor, zram->max_comp_streams);
772
	if (IS_ERR(comp)) {
773 774
		pr_info("Cannot initialise %s compressing backend\n",
				zram->compressor);
775 776
		err = PTR_ERR(comp);
		goto out_free_meta;
777 778
	}

779
	down_write(&zram->init_lock);
780
	if (init_done(zram)) {
781
		pr_info("Cannot change disksize for initialized device\n");
782
		err = -EBUSY;
783
		goto out_destroy_comp;
784 785
	}

786
	zram->meta = meta;
787
	zram->comp = comp;
788 789 790
	zram->disksize = disksize;
	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
	up_write(&zram->init_lock);
791 792 793 794 795 796 797 798

	/*
	 * Revalidate disk out of the init_lock to avoid lockdep splat.
	 * It's okay because disk's capacity is protected by init_lock
	 * so that revalidate_disk always sees up-to-date capacity.
	 */
	revalidate_disk(zram->disk);

799
	return len;
800

801 802 803 804
out_destroy_comp:
	up_write(&zram->init_lock);
	zcomp_destroy(comp);
out_free_meta:
805 806
	zram_meta_free(meta);
	return err;
807 808 809 810 811 812 813 814 815 816 817 818 819
}

static ssize_t reset_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int ret;
	unsigned short do_reset;
	struct zram *zram;
	struct block_device *bdev;

	zram = dev_to_zram(dev);
	bdev = bdget_disk(zram->disk, 0);

820 821 822
	if (!bdev)
		return -ENOMEM;

823
	/* Do not reset an active device! */
824 825 826 827
	if (bdev->bd_holders) {
		ret = -EBUSY;
		goto out;
	}
828 829 830

	ret = kstrtou16(buf, 10, &do_reset);
	if (ret)
831
		goto out;
832

833 834 835 836
	if (!do_reset) {
		ret = -EINVAL;
		goto out;
	}
837 838

	/* Make sure all pending I/O is finished */
839
	fsync_bdev(bdev);
840
	bdput(bdev);
841

Minchan Kim's avatar
Minchan Kim committed
842
	zram_reset_device(zram, true);
843
	return len;
844 845 846 847

out:
	bdput(bdev);
	return ret;
848 849
}

850
static void __zram_make_request(struct zram *zram, struct bio *bio)
851
{
852
	int offset;
853
	u32 index;
854 855
	struct bio_vec bvec;
	struct bvec_iter iter;
856

857 858 859
	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
	offset = (bio->bi_iter.bi_sector &
		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
860

Joonsoo Kim's avatar
Joonsoo Kim committed
861 862 863 864 865 866
	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
		zram_bio_discard(zram, index, offset, bio);
		bio_endio(bio, 0);
		return;
	}

867
	bio_for_each_segment(bvec, bio, iter) {
868 869
		int max_transfer_size = PAGE_SIZE - offset;

870
		if (bvec.bv_len > max_transfer_size) {
871 872 873 874 875 876
			/*
			 * zram_bvec_rw() can only make operation on a single
			 * zram page. Split the bio vector.
			 */
			struct bio_vec bv;

877
			bv.bv_page = bvec.bv_page;
878
			bv.bv_len = max_transfer_size;
879
			bv.bv_offset = bvec.bv_offset;
880

881
			if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
882 883
				goto out;

884
			bv.bv_len = bvec.bv_len - max_transfer_size;
885
			bv.bv_offset += max_transfer_size;
886
			if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
887 888
				goto out;
		} else
889
			if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
890 891
				goto out;

892
		update_position(&index, &offset, &bvec);
893
	}
894 895 896

	set_bit(BIO_UPTODATE, &bio->bi_flags);
	bio_endio(bio, 0);
897
	return;
898 899 900 901 902 903

out:
	bio_io_error(bio);
}

/*
904
 * Handler function for all zram I/O requests.
905
 */
906
static void zram_make_request(struct request_queue *queue, struct bio *bio)
907
{
908
	struct zram *zram = queue->queuedata;
909

910
	down_read(&zram->init_lock);
911
	if (unlikely(!init_done(zram)))
912
		goto error;
913

914
	if (!valid_io_request(zram, bio)) {
915
		atomic64_inc(&zram->stats.invalid_io);
916
		goto error;
917 918
	}

919
	__zram_make_request(zram, bio);
920
	up_read(&zram->init_lock);
921

922
	return;
923 924

error:
925
	up_read(&zram->init_lock);
926
	bio_io_error(bio);
927 928
}

Nitin Gupta's avatar
Nitin Gupta committed
929 930
static void zram_slot_free_notify(struct block_device *bdev,
				unsigned long index)
931
{
932
	struct zram *zram;
933
	struct zram_meta *meta;
934

935
	zram = bdev->bd_disk->private_data;
936
	meta = zram->meta;
937

938
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
939
	zram_free_page(zram, index);
940
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
941
	atomic64_inc(&zram->stats.notify_free);
942 943
}

944 945
static const struct block_device_operations zram_devops = {
	.swap_slot_free_notify = zram_slot_free_notify,
946
	.owner = THIS_MODULE
947 948
};

949 950 951 952 953 954
static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
		disksize_show, disksize_store