Commit fff648da authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Here's the second round of block updates for this merge window.

  It's a mix of fixes for changes that went in previously in this round,
  and fixes in general.  This pull request contains:

   - Fixes for loop from Christoph

   - A bdi vs gendisk lifetime fix from Dan, worth two cookies.

   - A blk-mq timeout fix, when on frozen queues.  From Gabriel.

   - Writeback fix from Jan, ensuring that __writeback_single_inode()
     does the right thing.

   - Fix for bio->bi_rw usage in f2fs from me.

   - Error path deadlock fix in blk-mq sysfs registration from me.

   - Floppy O_ACCMODE fix from Jiri.

   - Fix to the new bio op methods from Mike.

     One more followup will be coming here, ensuring that we don't
     propagate the block types outside of block.  That, and a rename of
     bio->bi_rw is coming right after -rc1 is cut.

   - Various little fixes"

* 'for-linus' of git://git.kernel.dk/linux-block:
  mm/block: convert rw_page users to bio op use
  loop: make do_req_filebacked more robust
  loop: don't try to use AIO for discards
  blk-mq: fix deadlock in blk_mq_register_disk() error path
  Include: blkdev: Removed duplicate 'struct request;' declaration.
  Fixup direct bi_rw modifiers
  block: fix bdi vs gendisk lifetime mismatch
  blk-mq: Allow timeouts to run while queue is freezing
  nbd: fix race in ioctl
  block: fix use-after-free in seq file
  f2fs: drop bio->bi_rw manual assignment
  block: add missing group association in bio-cloning functions
  blkcg: kill unused field nr_undestroyed_grps
  writeback: Write dirty times for WB_SYNC_ALL writeback
  floppy: fix open(O_ACCMODE) for ioctl-only open
parents 62e6e9ba abf54548
...@@ -583,6 +583,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) ...@@ -583,6 +583,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
bio->bi_rw = bio_src->bi_rw; bio->bi_rw = bio_src->bi_rw;
bio->bi_iter = bio_src->bi_iter; bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec; bio->bi_io_vec = bio_src->bi_io_vec;
bio_clone_blkcg_association(bio, bio_src);
} }
EXPORT_SYMBOL(__bio_clone_fast); EXPORT_SYMBOL(__bio_clone_fast);
...@@ -687,6 +689,8 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, ...@@ -687,6 +689,8 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
} }
} }
bio_clone_blkcg_association(bio, bio_src);
return bio; return bio;
} }
EXPORT_SYMBOL(bio_clone_bioset); EXPORT_SYMBOL(bio_clone_bioset);
...@@ -2004,6 +2008,17 @@ void bio_disassociate_task(struct bio *bio) ...@@ -2004,6 +2008,17 @@ void bio_disassociate_task(struct bio *bio)
} }
} }
/**
* bio_clone_blkcg_association - clone blkcg association from src to dst bio
* @dst: destination bio
* @src: source bio
*/
void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
{
if (src->bi_css)
WARN_ON(bio_associate_blkcg(dst, src->bi_css));
}
#endif /* CONFIG_BLK_CGROUP */ #endif /* CONFIG_BLK_CGROUP */
static void __init biovec_init_slabs(void) static void __init biovec_init_slabs(void)
......
...@@ -380,15 +380,13 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) ...@@ -380,15 +380,13 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
return ret; return ret;
} }
void blk_mq_unregister_disk(struct gendisk *disk) static void __blk_mq_unregister_disk(struct gendisk *disk)
{ {
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx; struct blk_mq_ctx *ctx;
int i, j; int i, j;
blk_mq_disable_hotplug();
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
blk_mq_unregister_hctx(hctx); blk_mq_unregister_hctx(hctx);
...@@ -405,6 +403,12 @@ void blk_mq_unregister_disk(struct gendisk *disk) ...@@ -405,6 +403,12 @@ void blk_mq_unregister_disk(struct gendisk *disk)
kobject_put(&disk_to_dev(disk)->kobj); kobject_put(&disk_to_dev(disk)->kobj);
q->mq_sysfs_init_done = false; q->mq_sysfs_init_done = false;
}
void blk_mq_unregister_disk(struct gendisk *disk)
{
blk_mq_disable_hotplug();
__blk_mq_unregister_disk(disk);
blk_mq_enable_hotplug(); blk_mq_enable_hotplug();
} }
...@@ -450,7 +454,7 @@ int blk_mq_register_disk(struct gendisk *disk) ...@@ -450,7 +454,7 @@ int blk_mq_register_disk(struct gendisk *disk)
} }
if (ret) if (ret)
blk_mq_unregister_disk(disk); __blk_mq_unregister_disk(disk);
else else
q->mq_sysfs_init_done = true; q->mq_sysfs_init_done = true;
out: out:
......
...@@ -672,7 +672,20 @@ static void blk_mq_timeout_work(struct work_struct *work) ...@@ -672,7 +672,20 @@ static void blk_mq_timeout_work(struct work_struct *work)
}; };
int i; int i;
if (blk_queue_enter(q, true)) /* A deadlock might occur if a request is stuck requiring a
* timeout at the same time a queue freeze is waiting
* completion, since the timeout code would not be able to
* acquire the queue reference here.
*
* That's why we don't use blk_queue_enter here; instead, we use
* percpu_ref_tryget directly, because we need to be able to
* obtain a reference even in the short window between the queue
* starting to freeze, by dropping the first reference in
* blk_mq_freeze_queue_start, and the moment the last request is
* consumed, marked by the instant q_usage_counter reaches
* zero.
*/
if (!percpu_ref_tryget(&q->q_usage_counter))
return; return;
blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
......
...@@ -145,11 +145,6 @@ struct throtl_data ...@@ -145,11 +145,6 @@ struct throtl_data
/* Total Number of queued bios on READ and WRITE lists */ /* Total Number of queued bios on READ and WRITE lists */
unsigned int nr_queued[2]; unsigned int nr_queued[2];
/*
* number of total undestroyed groups
*/
unsigned int nr_undestroyed_grps;
/* Work for dispatching throttled bios */ /* Work for dispatching throttled bios */
struct work_struct dispatch_work; struct work_struct dispatch_work;
}; };
......
...@@ -614,7 +614,7 @@ void device_add_disk(struct device *parent, struct gendisk *disk) ...@@ -614,7 +614,7 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
/* Register BDI before referencing it from bdev */ /* Register BDI before referencing it from bdev */
bdi = &disk->queue->backing_dev_info; bdi = &disk->queue->backing_dev_info;
bdi_register_dev(bdi, disk_devt(disk)); bdi_register_owner(bdi, disk_to_dev(disk));
blk_register_region(disk_devt(disk), disk->minors, NULL, blk_register_region(disk_devt(disk), disk->minors, NULL,
exact_match, exact_lock, disk); exact_match, exact_lock, disk);
...@@ -856,6 +856,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v) ...@@ -856,6 +856,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
if (iter) { if (iter) {
class_dev_iter_exit(iter); class_dev_iter_exit(iter);
kfree(iter); kfree(iter);
seqf->private = NULL;
} }
} }
......
...@@ -300,20 +300,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd, ...@@ -300,20 +300,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
* Process a single bvec of a bio. * Process a single bvec of a bio.
*/ */
static int brd_do_bvec(struct brd_device *brd, struct page *page, static int brd_do_bvec(struct brd_device *brd, struct page *page,
unsigned int len, unsigned int off, int rw, unsigned int len, unsigned int off, int op,
sector_t sector) sector_t sector)
{ {
void *mem; void *mem;
int err = 0; int err = 0;
if (rw != READ) { if (op_is_write(op)) {
err = copy_to_brd_setup(brd, sector, len); err = copy_to_brd_setup(brd, sector, len);
if (err) if (err)
goto out; goto out;
} }
mem = kmap_atomic(page); mem = kmap_atomic(page);
if (rw == READ) { if (!op_is_write(op)) {
copy_from_brd(mem + off, brd, sector, len); copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page); flush_dcache_page(page);
} else { } else {
...@@ -330,7 +330,6 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) ...@@ -330,7 +330,6 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
{ {
struct block_device *bdev = bio->bi_bdev; struct block_device *bdev = bio->bi_bdev;
struct brd_device *brd = bdev->bd_disk->private_data; struct brd_device *brd = bdev->bd_disk->private_data;
int rw;
struct bio_vec bvec; struct bio_vec bvec;
sector_t sector; sector_t sector;
struct bvec_iter iter; struct bvec_iter iter;
...@@ -347,14 +346,12 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) ...@@ -347,14 +346,12 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
goto out; goto out;
} }
rw = bio_data_dir(bio);
bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len; unsigned int len = bvec.bv_len;
int err; int err;
err = brd_do_bvec(brd, bvec.bv_page, len, err = brd_do_bvec(brd, bvec.bv_page, len,
bvec.bv_offset, rw, sector); bvec.bv_offset, bio_op(bio), sector);
if (err) if (err)
goto io_error; goto io_error;
sector += len >> SECTOR_SHIFT; sector += len >> SECTOR_SHIFT;
...@@ -369,11 +366,11 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) ...@@ -369,11 +366,11 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
} }
static int brd_rw_page(struct block_device *bdev, sector_t sector, static int brd_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int rw) struct page *page, int op)
{ {
struct brd_device *brd = bdev->bd_disk->private_data; struct brd_device *brd = bdev->bd_disk->private_data;
int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector); int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
page_endio(page, rw & WRITE, err); page_endio(page, op, err);
return err; return err;
} }
......
...@@ -3663,11 +3663,6 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) ...@@ -3663,11 +3663,6 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
opened_bdev[drive] = bdev; opened_bdev[drive] = bdev;
if (!(mode & (FMODE_READ|FMODE_WRITE))) {
res = -EINVAL;
goto out;
}
res = -ENXIO; res = -ENXIO;
if (!floppy_track_buffer) { if (!floppy_track_buffer) {
...@@ -3711,13 +3706,15 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) ...@@ -3711,13 +3706,15 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (UFDCS->rawcmd == 1) if (UFDCS->rawcmd == 1)
UFDCS->rawcmd = 2; UFDCS->rawcmd = 2;
UDRS->last_checked = 0; if (mode & (FMODE_READ|FMODE_WRITE)) {
clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); UDRS->last_checked = 0;
check_disk_change(bdev); clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) check_disk_change(bdev);
goto out; if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) goto out;
goto out; if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
goto out;
}
res = -EROFS; res = -EROFS;
......
...@@ -510,14 +510,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, ...@@ -510,14 +510,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
return 0; return 0;
} }
static int do_req_filebacked(struct loop_device *lo, struct request *rq)
static inline int lo_rw_simple(struct loop_device *lo,
struct request *rq, loff_t pos, bool rw)
{ {
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
if (cmd->use_aio)
return lo_rw_aio(lo, cmd, pos, rw);
/* /*
* lo_write_simple and lo_read_simple should have been covered * lo_write_simple and lo_read_simple should have been covered
...@@ -528,37 +524,30 @@ static inline int lo_rw_simple(struct loop_device *lo, ...@@ -528,37 +524,30 @@ static inline int lo_rw_simple(struct loop_device *lo,
* of the req at one time. And direct read IO doesn't need to * of the req at one time. And direct read IO doesn't need to
* run flush_dcache_page(). * run flush_dcache_page().
*/ */
if (rw == WRITE) switch (req_op(rq)) {
return lo_write_simple(lo, rq, pos); case REQ_OP_FLUSH:
else return lo_req_flush(lo, rq);
return lo_read_simple(lo, rq, pos); case REQ_OP_DISCARD:
} return lo_discard(lo, rq, pos);
case REQ_OP_WRITE:
static int do_req_filebacked(struct loop_device *lo, struct request *rq) if (lo->transfer)
{ return lo_write_transfer(lo, rq, pos);
loff_t pos; else if (cmd->use_aio)
int ret; return lo_rw_aio(lo, cmd, pos, WRITE);
pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
if (op_is_write(req_op(rq))) {
if (req_op(rq) == REQ_OP_FLUSH)
ret = lo_req_flush(lo, rq);
else if (req_op(rq) == REQ_OP_DISCARD)
ret = lo_discard(lo, rq, pos);
else if (lo->transfer)
ret = lo_write_transfer(lo, rq, pos);
else else
ret = lo_rw_simple(lo, rq, pos, WRITE); return lo_write_simple(lo, rq, pos);
case REQ_OP_READ:
} else {
if (lo->transfer) if (lo->transfer)
ret = lo_read_transfer(lo, rq, pos); return lo_read_transfer(lo, rq, pos);
else if (cmd->use_aio)
return lo_rw_aio(lo, cmd, pos, READ);
else else
ret = lo_rw_simple(lo, rq, pos, READ); return lo_read_simple(lo, rq, pos);
default:
WARN_ON_ONCE(1);
return -EIO;
break;
} }
return ret;
} }
struct switch_request { struct switch_request {
...@@ -1659,11 +1648,15 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -1659,11 +1648,15 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (lo->lo_state != Lo_bound) if (lo->lo_state != Lo_bound)
return -EIO; return -EIO;
if (lo->use_dio && (req_op(cmd->rq) != REQ_OP_FLUSH || switch (req_op(cmd->rq)) {
req_op(cmd->rq) == REQ_OP_DISCARD)) case REQ_OP_FLUSH:
cmd->use_aio = true; case REQ_OP_DISCARD:
else
cmd->use_aio = false; cmd->use_aio = false;
break;
default:
cmd->use_aio = lo->use_dio;
break;
}
queue_kthread_work(&lo->worker, &cmd->work); queue_kthread_work(&lo->worker, &cmd->work);
......
...@@ -451,14 +451,9 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev) ...@@ -451,14 +451,9 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
sk_set_memalloc(nbd->sock->sk); sk_set_memalloc(nbd->sock->sk);
nbd->task_recv = current;
ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr); ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
if (ret) { if (ret) {
dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
nbd->task_recv = NULL;
return ret; return ret;
} }
...@@ -477,9 +472,6 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev) ...@@ -477,9 +472,6 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
nbd_size_clear(nbd, bdev); nbd_size_clear(nbd, bdev);
device_remove_file(disk_to_dev(nbd->disk), &pid_attr); device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
nbd->task_recv = NULL;
return ret; return ret;
} }
...@@ -788,6 +780,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, ...@@ -788,6 +780,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
if (!nbd->sock) if (!nbd->sock)
return -EINVAL; return -EINVAL;
/* We have to claim the device under the lock */
nbd->task_recv = current;
mutex_unlock(&nbd->tx_lock); mutex_unlock(&nbd->tx_lock);
nbd_parse_flags(nbd, bdev); nbd_parse_flags(nbd, bdev);
...@@ -796,6 +790,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, ...@@ -796,6 +790,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd_name(nbd)); nbd_name(nbd));
if (IS_ERR(thread)) { if (IS_ERR(thread)) {
mutex_lock(&nbd->tx_lock); mutex_lock(&nbd->tx_lock);
nbd->task_recv = NULL;
return PTR_ERR(thread); return PTR_ERR(thread);
} }
...@@ -805,6 +800,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, ...@@ -805,6 +800,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
kthread_stop(thread); kthread_stop(thread);
mutex_lock(&nbd->tx_lock); mutex_lock(&nbd->tx_lock);
nbd->task_recv = NULL;
sock_shutdown(nbd); sock_shutdown(nbd);
nbd_clear_que(nbd); nbd_clear_que(nbd);
......
...@@ -843,15 +843,15 @@ static void zram_bio_discard(struct zram *zram, u32 index, ...@@ -843,15 +843,15 @@ static void zram_bio_discard(struct zram *zram, u32 index,
} }
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, int rw) int offset, int op)
{ {
unsigned long start_time = jiffies; unsigned long start_time = jiffies;
int ret; int ret;
generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT, generic_start_io_acct(op, bvec->bv_len >> SECTOR_SHIFT,
&zram->disk->part0); &zram->disk->part0);
if (rw == READ) { if (!op_is_write(op)) {
atomic64_inc(&zram->stats.num_reads); atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset); ret = zram_bvec_read(zram, bvec, index, offset);
} else { } else {
...@@ -859,10 +859,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -859,10 +859,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = zram_bvec_write(zram, bvec, index, offset); ret = zram_bvec_write(zram, bvec, index, offset);
} }
generic_end_io_acct(rw, &zram->disk->part0, start_time); generic_end_io_acct(op, &zram->disk->part0, start_time);
if (unlikely(ret)) { if (unlikely(ret)) {
if (rw == READ) if (!op_is_write(op))
atomic64_inc(&zram->stats.failed_reads); atomic64_inc(&zram->stats.failed_reads);
else else
atomic64_inc(&zram->stats.failed_writes); atomic64_inc(&zram->stats.failed_writes);
...@@ -873,7 +873,7 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -873,7 +873,7 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
static void __zram_make_request(struct zram *zram, struct bio *bio) static void __zram_make_request(struct zram *zram, struct bio *bio)
{ {
int offset, rw; int offset;
u32 index; u32 index;
struct bio_vec bvec; struct bio_vec bvec;
struct bvec_iter iter; struct bvec_iter iter;
...@@ -888,7 +888,6 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) ...@@ -888,7 +888,6 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
return; return;
} }
rw = bio_data_dir(bio);
bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment(bvec, bio, iter) {
int max_transfer_size = PAGE_SIZE - offset; int max_transfer_size = PAGE_SIZE - offset;
...@@ -903,15 +902,18 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) ...@@ -903,15 +902,18 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_len = max_transfer_size; bv.bv_len = max_transfer_size;
bv.bv_offset = bvec.bv_offset; bv.bv_offset = bvec.bv_offset;
if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0) if (zram_bvec_rw(zram, &bv, index, offset,
bio_op(bio)) < 0)
goto out; goto out;
bv.bv_len = bvec.bv_len - max_transfer_size; bv.bv_len = bvec.bv_len - max_transfer_size;
bv.bv_offset += max_transfer_size; bv.bv_offset += max_transfer_size;
if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0) if (zram_bvec_rw(zram, &bv, index + 1, 0,
bio_op(bio)) < 0)
goto out; goto out;
} else } else
if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0) if (zram_bvec_rw(zram, &bvec, index, offset,
bio_op(bio)) < 0)
goto out; goto out;
update_position(&index, &offset, &bvec); update_position(&index, &offset, &bvec);
...@@ -968,7 +970,7 @@ static void zram_slot_free_notify(struct block_device *bdev, ...@@ -968,7 +970,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
} }
static int zram_rw_page(struct block_device *bdev, sector_t sector, static int zram_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int rw) struct page *page, int op)
{ {
int offset, err = -EIO; int offset, err = -EIO;
u32 index; u32 index;
...@@ -992,7 +994,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector, ...@@ -992,7 +994,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_len = PAGE_SIZE; bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0; bv.bv_offset = 0;
err = zram_bvec_rw(zram, &bv, index, offset, rw); err = zram_bvec_rw(zram, &bv, index, offset, op);
put_zram: put_zram:
zram_meta_put(zram); zram_meta_put(zram);
out: out:
...@@ -1005,7 +1007,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector, ...@@ -1005,7 +1007,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
* (e.g., SetPageError, set_page_dirty and extra works). * (e.g., SetPageError, set_page_dirty and extra works).
*/ */
if (err == 0) if (err == 0)
page_endio(page, rw, 0); page_endio(page, op, 0);
return err; return err;
} }
......
...@@ -1133,11 +1133,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, ...@@ -1133,11 +1133,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
struct page *page, unsigned int len, unsigned int off, struct page *page, unsigned int len, unsigned int off,
int rw, sector_t sector) int op, sector_t sector)
{ {
int ret; int ret;
if (rw == READ) { if (!op_is_write(op)) {
ret = btt_read_pg(btt, bip, page, off, sector, len); ret = btt_read_pg(btt, bip, page, off, sector, len);
flush_dcache_page(page); flush_dcache_page(page);
} else { } else {
...@@ -1155,7 +1155,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) ...@@ -1155,7 +1155,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
struct bvec_iter iter; struct bvec_iter iter;
unsigned long start; unsigned long start;
struct bio_vec bvec; struct bio_vec bvec;
int err = 0, rw; int err = 0;
bool do_acct; bool do_acct;
/* /*
...@@ -1170,7 +1170,6 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) ...@@ -1170,7 +1170,6 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
} }
do_acct = nd_iostat_start(bio, &start);