Commit 7988613b authored by Kent Overstreet's avatar Kent Overstreet

block: Convert bio_for_each_segment() to bvec_iter

More prep work for immutable biovecs - with immutable bvecs drivers
won't be able to use the biovec directly, they'll need to use helpers
that take into account bio->bi_iter.bi_bvec_done.

This updates callers for the new usage without changing the
implementation yet.
Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: "Ed L. Cashin" <ecashin@coraid.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Lars Ellenberg <drbd-dev@lists.linbit.com>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Paul Clements <Paul.Clements@steeleye.com>
Cc: Jim Paris <jim@jtan.com>
Cc: Geoff Levand <geoff@infradead.org>
Cc: Yehuda Sadeh <yehuda@inktank.com>
Cc: Sage Weil <sage@inktank.com>
Cc: Alex Elder <elder@inktank.com>
Cc: ceph-devel@vger.kernel.org
Cc: Joshua Morris <josh.h.morris@us.ibm.com>
Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Neil Brown <neilb@suse.de>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: linux390@de.ibm.com
Cc: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com>
Cc: Sreekanth Reddy <Sreekanth.Reddy@lsi.com>
Cc: support@lsi.com
Cc: "James E.J. Bottomley" <JBottomley@parallels.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Guo Chao <yan@linux.vnet.ibm.com>
Cc: Asai Thambi S P <asamymuthupa@micron.com>
Cc: Selvan Mani <smani@micron.com>
Cc: Sam Bradshaw <sbradshaw@micron.com>
Cc: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Stephen Hemminger <shemminger@vyatta.com>
Cc: Quoc-Son Anh <quoc-sonx.anh@intel.com>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Jerome Marchand <jmarchan@redhat.com>
Cc: Seth Jennings <sjenning@linux.vnet.ibm.com>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: "Darrick J. Wong" <darrick.wong@oracle.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Jan Kara <jack@suse.cz>
Cc: linux-m68k@lists.linux-m68k.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: drbd-user@lists.linbit.com
Cc: nbd-general@lists.sourceforge.net
Cc: cbe-oss-dev@lists.ozlabs.org
Cc: xen-devel@lists.xensource.com
Cc: virtualization@lists.linux-foundation.org
Cc: linux-raid@vger.kernel.org
Cc: linux-s390@vger.kernel.org
Cc: DL-MPTFusionLinux@lsi.com
Cc: linux-scsi@vger.kernel.org
Cc: devel@driverdev.osuosl.org
Cc: linux-fsdevel@vger.kernel.org
Cc: cluster-devel@redhat.com
Cc: linux-mm@kvack.org
Acked-by: default avatarGeoff Levand <geoff@infradead.org>
parent a4ad39b1
......@@ -62,17 +62,18 @@ struct nfhd_device {
static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
{
struct nfhd_device *dev = queue->queuedata;
struct bio_vec *bvec;
int i, dir, len, shift;
struct bio_vec bvec;
struct bvec_iter iter;
int dir, len, shift;
sector_t sec = bio->bi_iter.bi_sector;
dir = bio_data_dir(bio);
shift = dev->bshift;
bio_for_each_segment(bvec, bio, i) {
len = bvec->bv_len;
bio_for_each_segment(bvec, bio, iter) {
len = bvec.bv_len;
len >>= 9;
nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift,
bvec_to_phys(bvec));
bvec_to_phys(&bvec));
sec += len;
}
bio_endio(bio, 0);
......
......@@ -109,28 +109,28 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
unsigned long phys_mem, phys_end;
void *user_mem;
struct bio_vec *vec;
struct bio_vec vec;
unsigned int transfered;
unsigned short idx;
struct bvec_iter iter;
phys_mem = bank->io_addr + (bio->bi_iter.bi_sector <<
AXON_RAM_SECTOR_SHIFT);
phys_end = bank->io_addr + bank->size;
transfered = 0;
bio_for_each_segment(vec, bio, idx) {
if (unlikely(phys_mem + vec->bv_len > phys_end)) {
bio_for_each_segment(vec, bio, iter) {
if (unlikely(phys_mem + vec.bv_len > phys_end)) {
bio_io_error(bio);
return;
}
user_mem = page_address(vec->bv_page) + vec->bv_offset;
user_mem = page_address(vec.bv_page) + vec.bv_offset;
if (bio_data_dir(bio) == READ)
memcpy(user_mem, (void *) phys_mem, vec->bv_len);
memcpy(user_mem, (void *) phys_mem, vec.bv_len);
else
memcpy((void *) phys_mem, user_mem, vec->bv_len);
memcpy((void *) phys_mem, user_mem, vec.bv_len);
phys_mem += vec->bv_len;
transfered += vec->bv_len;
phys_mem += vec.bv_len;
transfered += vec.bv_len;
}
bio_endio(bio, 0);
}
......
......@@ -2746,10 +2746,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
void rq_flush_dcache_pages(struct request *rq)
{
struct req_iterator iter;
struct bio_vec *bvec;
struct bio_vec bvec;
rq_for_each_segment(bvec, rq, iter)
flush_dcache_page(bvec->bv_page);
flush_dcache_page(bvec.bv_page);
}
EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
#endif
......
......@@ -12,10 +12,11 @@
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio)
{
struct bio_vec *bv, *bvprv = NULL;
int cluster, i, high, highprv = 1;
struct bio_vec bv, bvprv = { NULL };
int cluster, high, highprv = 1;
unsigned int seg_size, nr_phys_segs;
struct bio *fbio, *bbio;
struct bvec_iter iter;
if (!bio)
return 0;
......@@ -25,25 +26,23 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
seg_size = 0;
nr_phys_segs = 0;
for_each_bio(bio) {
bio_for_each_segment(bv, bio, i) {
bio_for_each_segment(bv, bio, iter) {
/*
* the trick here is making sure that a high page is
* never considered part of another segment, since that
* might change with the bounce page.
*/
high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
if (high || highprv)
goto new_segment;
if (cluster) {
if (seg_size + bv->bv_len
high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
if (!high && !highprv && cluster) {
if (seg_size + bv.bv_len
> queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
goto new_segment;
seg_size += bv->bv_len;
seg_size += bv.bv_len;
bvprv = bv;
continue;
}
......@@ -54,7 +53,7 @@ new_segment:
nr_phys_segs++;
bvprv = bv;
seg_size = bv->bv_len;
seg_size = bv.bv_len;
highprv = high;
}
bbio = bio;
......@@ -110,21 +109,21 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
return 0;
}
static void
static inline void
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
struct scatterlist *sglist, struct bio_vec **bvprv,
struct scatterlist *sglist, struct bio_vec *bvprv,
struct scatterlist **sg, int *nsegs, int *cluster)
{
int nbytes = bvec->bv_len;
if (*bvprv && *cluster) {
if (*sg && *cluster) {
if ((*sg)->length + nbytes > queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
goto new_segment;
(*sg)->length += nbytes;
......@@ -150,7 +149,7 @@ new_segment:
sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
(*nsegs)++;
}
*bvprv = bvec;
*bvprv = *bvec;
}
/*
......@@ -160,7 +159,7 @@ new_segment:
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sglist)
{
struct bio_vec *bvec, *bvprv;
struct bio_vec bvec, bvprv;
struct req_iterator iter;
struct scatterlist *sg;
int nsegs, cluster;
......@@ -171,10 +170,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
/*
* for each bio in rq
*/
bvprv = NULL;
sg = NULL;
rq_for_each_segment(bvec, rq, iter) {
__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
__blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
&nsegs, &cluster);
} /* segments in rq */
......@@ -223,18 +221,17 @@ EXPORT_SYMBOL(blk_rq_map_sg);
int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist)
{
struct bio_vec *bvec, *bvprv;
struct bio_vec bvec, bvprv;
struct scatterlist *sg;
int nsegs, cluster;
unsigned long i;
struct bvec_iter iter;
nsegs = 0;
cluster = blk_queue_cluster(q);
bvprv = NULL;
sg = NULL;
bio_for_each_segment(bvec, bio, i) {
__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
bio_for_each_segment(bvec, bio, iter) {
__blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
&nsegs, &cluster);
} /* segments in bio */
......
......@@ -897,15 +897,15 @@ rqbiocnt(struct request *r)
static void
bio_pageinc(struct bio *bio)
{
struct bio_vec *bv;
struct bio_vec bv;
struct page *page;
int i;
struct bvec_iter iter;
bio_for_each_segment(bv, bio, i) {
bio_for_each_segment(bv, bio, iter) {
/* Non-zero page count for non-head members of
* compound pages is no longer allowed by the kernel.
*/
page = compound_trans_head(bv->bv_page);
page = compound_trans_head(bv.bv_page);
atomic_inc(&page->_count);
}
}
......@@ -913,12 +913,12 @@ bio_pageinc(struct bio *bio)
static void
bio_pagedec(struct bio *bio)
{
struct bio_vec *bv;
struct page *page;
int i;
struct bio_vec bv;
struct bvec_iter iter;
bio_for_each_segment(bv, bio, i) {
page = compound_trans_head(bv->bv_page);
bio_for_each_segment(bv, bio, iter) {
page = compound_trans_head(bv.bv_page);
atomic_dec(&page->_count);
}
}
......
......@@ -328,9 +328,9 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
struct block_device *bdev = bio->bi_bdev;
struct brd_device *brd = bdev->bd_disk->private_data;
int rw;
struct bio_vec *bvec;
struct bio_vec bvec;
sector_t sector;
int i;
struct bvec_iter iter;
int err = -EIO;
sector = bio->bi_iter.bi_sector;
......@@ -347,10 +347,10 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
if (rw == READA)
rw = READ;
bio_for_each_segment(bvec, bio, i) {
unsigned int len = bvec->bv_len;
err = brd_do_bvec(brd, bvec->bv_page, len,
bvec->bv_offset, rw, sector);
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
err = brd_do_bvec(brd, bvec.bv_page, len,
bvec.bv_offset, rw, sector);
if (err)
break;
sector += len >> SECTOR_SHIFT;
......
......@@ -1537,15 +1537,17 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
{
struct bio_vec *bvec;
int i;
struct bio_vec bvec;
struct bvec_iter iter;
/* hint all but last page with MSG_MORE */
bio_for_each_segment(bvec, bio, i) {
bio_for_each_segment(bvec, bio, iter) {
int err;
err = _drbd_no_send_page(mdev, bvec->bv_page,
bvec->bv_offset, bvec->bv_len,
i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
err = _drbd_no_send_page(mdev, bvec.bv_page,
bvec.bv_offset, bvec.bv_len,
bio_iter_last(bio, iter)
? 0 : MSG_MORE);
if (err)
return err;
}
......@@ -1554,15 +1556,16 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
{
struct bio_vec *bvec;
int i;
struct bio_vec bvec;
struct bvec_iter iter;
/* hint all but last page with MSG_MORE */
bio_for_each_segment(bvec, bio, i) {
bio_for_each_segment(bvec, bio, iter) {
int err;
err = _drbd_send_page(mdev, bvec->bv_page,
bvec->bv_offset, bvec->bv_len,
i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
err = _drbd_send_page(mdev, bvec.bv_page,
bvec.bv_offset, bvec.bv_len,
bio_iter_last(bio, iter) ? 0 : MSG_MORE);
if (err)
return err;
}
......
......@@ -1595,9 +1595,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
sector_t sector, int data_size)
{
struct bio_vec *bvec;
struct bio_vec bvec;
struct bvec_iter iter;
struct bio *bio;
int dgs, err, i, expect;
int dgs, err, expect;
void *dig_in = mdev->tconn->int_dig_in;
void *dig_vv = mdev->tconn->int_dig_vv;
......@@ -1617,11 +1618,11 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
bio = req->master_bio;
D_ASSERT(sector == bio->bi_iter.bi_sector);
bio_for_each_segment(bvec, bio, i) {
void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
expect = min_t(int, data_size, bvec->bv_len);
bio_for_each_segment(bvec, bio, iter) {
void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
expect = min_t(int, data_size, bvec.bv_len);
err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
kunmap(bvec->bv_page);
kunmap(bvec.bv_page);
if (err)
return err;
data_size -= expect;
......
......@@ -313,8 +313,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
{
struct hash_desc desc;
struct scatterlist sg;
struct bio_vec *bvec;
int i;
struct bio_vec bvec;
struct bvec_iter iter;
desc.tfm = tfm;
desc.flags = 0;
......@@ -322,8 +322,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
sg_init_table(&sg, 1);
crypto_hash_init(&desc);
bio_for_each_segment(bvec, bio, i) {
sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
bio_for_each_segment(bvec, bio, iter) {
sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
crypto_hash_update(&desc, &sg, sg.length);
}
crypto_hash_final(&desc, digest);
......
......@@ -2351,7 +2351,7 @@ static void rw_interrupt(void)
/* Compute maximal contiguous buffer size. */
static int buffer_chain_size(void)
{
struct bio_vec *bv;
struct bio_vec bv;
int size;
struct req_iterator iter;
char *base;
......@@ -2360,10 +2360,10 @@ static int buffer_chain_size(void)
size = 0;
rq_for_each_segment(bv, current_req, iter) {
if (page_address(bv->bv_page) + bv->bv_offset != base + size)
if (page_address(bv.bv_page) + bv.bv_offset != base + size)
break;
size += bv->bv_len;
size += bv.bv_len;
}
return size >> 9;
......@@ -2389,7 +2389,7 @@ static int transfer_size(int ssize, int max_sector, int max_size)
static void copy_buffer(int ssize, int max_sector, int max_sector_2)
{
int remaining; /* number of transferred 512-byte sectors */
struct bio_vec *bv;
struct bio_vec bv;
char *buffer;
char *dma_buffer;
int size;
......@@ -2427,10 +2427,10 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
if (!remaining)
break;
size = bv->bv_len;
size = bv.bv_len;
SUPBOUND(size, remaining);
buffer = page_address(bv->bv_page) + bv->bv_offset;
buffer = page_address(bv.bv_page) + bv.bv_offset;
if (dma_buffer + size >
floppy_track_buffer + (max_buffer_sectors << 10) ||
dma_buffer < floppy_track_buffer) {
......
......@@ -288,9 +288,10 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
{
int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
struct page *page);
struct bio_vec *bvec;
struct bio_vec bvec;
struct bvec_iter iter;
struct page *page = NULL;
int i, ret = 0;
int ret = 0;
if (lo->transfer != transfer_none) {
page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
......@@ -302,11 +303,11 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
do_lo_send = do_lo_send_direct_write;
}
bio_for_each_segment(bvec, bio, i) {
ret = do_lo_send(lo, bvec, pos, page);
bio_for_each_segment(bvec, bio, iter) {
ret = do_lo_send(lo, &bvec, pos, page);
if (ret < 0)
break;
pos += bvec->bv_len;
pos += bvec.bv_len;
}
if (page) {
kunmap(page);
......@@ -392,20 +393,20 @@ do_lo_receive(struct loop_device *lo,
static int
lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
{
struct bio_vec *bvec;
struct bio_vec bvec;
struct bvec_iter iter;
ssize_t s;
int i;
bio_for_each_segment(bvec, bio, i) {
s = do_lo_receive(lo, bvec, bsize, pos);
bio_for_each_segment(bvec, bio, iter) {
s = do_lo_receive(lo, &bvec, bsize, pos);
if (s < 0)
return s;
if (s != bvec->bv_len) {
if (s != bvec.bv_len) {
zero_fill_bio(bio);
break;
}
pos += bvec->bv_len;
pos += bvec.bv_len;
}
return 0;
}
......
......@@ -3962,8 +3962,9 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
{
struct driver_data *dd = queue->queuedata;
struct scatterlist *sg;
struct bio_vec *bvec;
int i, nents = 0;
struct bio_vec bvec;
struct bvec_iter iter;
int nents = 0;
int tag = 0, unaligned = 0;
if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
......@@ -4026,11 +4027,11 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
}
/* Create the scatter list for this bio. */
bio_for_each_segment(bvec, bio, i) {
bio_for_each_segment(bvec, bio, iter) {
sg_set_page(&sg[nents],
bvec->bv_page,
bvec->bv_len,
bvec->bv_offset);
bvec.bv_page,
bvec.bv_len,
bvec.bv_offset);
nents++;
}
......
......@@ -271,7 +271,7 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
if (nbd_cmd(req) == NBD_CMD_WRITE) {
struct req_iterator iter;
struct bio_vec *bvec;
struct bio_vec bvec;
/*
* we are really probing at internals to determine
* whether to set MSG_MORE or not...
......@@ -281,8 +281,8 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
if (!rq_iter_last(req, iter))
flags = MSG_MORE;
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
nbd->disk->disk_name, req, bvec->bv_len);
result = sock_send_bvec(nbd, bvec, flags);
nbd->disk->disk_name, req, bvec.bv_len);
result = sock_send_bvec(nbd, &bvec, flags);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
......@@ -378,10 +378,10 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
nbd->disk->disk_name, req);
if (nbd_cmd(req) == NBD_CMD_READ) {
struct req_iterator iter;
struct bio_vec *bvec;
struct bio_vec bvec;
rq_for_each_segment(bvec, req, iter) {
result = sock_recv_bvec(nbd, bvec);
result = sock_recv_bvec(nbd, &bvec);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
......@@ -389,7 +389,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
return req;
}
dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
nbd->disk->disk_name, req, bvec->bv_len);
nbd->disk->disk_name, req, bvec.bv_len);
}
}
return req;
......
......@@ -550,9 +550,11 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
struct bio *bio, enum dma_data_direction dma_dir, int psegs)
{
struct bio_vec *bvec, *bvprv = NULL;
struct bio_vec bvec, bvprv;
struct bvec_iter iter;
struct scatterlist *sg = NULL;
int i, length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
int first = 1;
if (nvmeq->dev->stripe_size)
split_len = nvmeq->dev->stripe_size -
......@@ -560,25 +562,28 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
(nvmeq->dev->stripe_size - 1));
sg_init_table(iod->sg, psegs);
bio_for_each_segment(bvec, bio, i) {
if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
sg->length += bvec->bv_len;
bio_for_each_segment(bvec, bio, iter) {
if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
sg->length += bvec.bv_len;
} else {
if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
return nvme_split_and_submit(bio, nvmeq, i,
length, 0);
if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
return nvme_split_and_submit(bio, nvmeq,
iter.bi_idx,
length, 0);
sg = sg ? sg + 1 : iod->sg;
sg_set_page(sg, bvec->bv_page, bvec->bv_len,
bvec->bv_offset);
sg_set_page(sg, bvec.bv_page,
bvec.bv_len, bvec.bv_offset);
nsegs++;
}
if (split_len - length < bvec->bv_len)
return nvme_split_and_submit(bio, nvmeq, i, split_len,
split_len - length);
length += bvec->bv_len;
if (split_len - length < bvec.bv_len)
return nvme_split_and_submit(bio, nvmeq, iter.bi_idx,
split_len,
split_len - length);
length += bvec.bv_len;
bvprv = bvec;
first = 0;
}
iod->nents = nsegs;
sg_mark_end(sg);
......
......@@ -94,7 +94,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
{
unsigned int offset = 0;
struct req_iterator iter;
struct bio_vec *bvec;
struct bio_vec bvec;
unsigned int i = 0;
size_t size;
void *buf;
......@@ -106,14 +106,14 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
__func__, __LINE__, i, bio_segments(iter.bio),
bio_sectors(iter.bio), iter.bio->bi_iter.bi_sector);
size = bvec->bv_len;
buf = bvec_kmap_irq(bvec, &flags);
size = bvec.bv_len;
buf = bvec_kmap_irq(&bvec, &flags);
if (gather)
memcpy(dev->bounce_buf+offset, buf, size);
else
memcpy(buf, dev->bounce_buf+offset, size);
offset += size;
flush_kernel_dcache_page(bvec->bv_page);
flush_kernel_dcache_page(bvec.bv_page);
bvec_kunmap_irq(buf, &flags);
i++;
}
......@@ -130,7 +130,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
#ifdef DEBUG
unsigned int n = 0;
struct bio_vec *bv;
struct bio_vec bv;
struct req_iterator iter;
rq_for_each_segment(bv, req, iter)
......
......@@ -555,14 +555,14 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
const char *op = write ? "write" : "read";
loff_t offset = bio->bi_iter.bi_sector << 9;
int error = 0;
struct bio_vec *bvec;
unsigned int i;
struct bio_vec bvec;
struct bvec_iter iter;
struct bio *next;
bio_for_each_segment(bvec, bio, i) {
bio_for_each_segment(bvec, bio, iter) {
/* PS3 is ppc64, so we don't handle highmem */
char *ptr = page_address(bvec->bv_page) + bvec->bv_offset;
size_t len = bvec->bv_len, retlen;
char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
size_t len = bvec.bv_len, retlen;
dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op,
len, offset);
......
......@@ -1109,23 +1109,23 @@ static void bio_chain_put(struct bio *chain)
*/
static void zero_bio_chain(struct bio *chain, int start_ofs)
{
struct bio_vec *bv;
struct bio_vec bv;
struct bvec_iter iter;
unsigned long flags;
void *buf;
int i;
int pos = 0;
while (chain) {
bio_for_each_segment(bv, chain, i) {
if (pos + bv->bv_len > start_ofs) {
bio_for_each_segment(bv, chain, iter) {
if (pos + bv.bv_len > start_ofs) {
int remainder = max(start_ofs - pos, 0);
buf = bvec_kmap_irq(bv, &flags);
buf = bvec_kmap_irq(&bv, &flags);
memset(buf + remainder, 0,
bv->bv_len - remainder);
flush_dcache_page(bv->bv_page);
bv.bv_len - remainder);
flush_dcache_page(bv.bv_page);
bvec_kunmap_irq(buf, &flags);
}
pos += bv->bv_len;
pos += bv.bv_len;
}
chain = chain->bi_next;
......@@ -1173,11 +1173,11 @@ static struct bio *bio_clone_range(struct bio *bio_src,
unsigned int len,
gfp_t gfpmask)
{
struct bio_vec *bv;
struct bio_vec bv;
struct bvec_iter