1
0
Fork 0

block: reuse __blk_bvec_map_sg() for mapping page sized bvec

Inside __blk_segment_map_sg(), page sized bvec mapping is optimized
a bit with one standalone branch.

So reuse __blk_bvec_map_sg() to do that.

Cc: Omar Sandoval <osandov@fb.com>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
hifive-unleashed-5.2
Ming Lei 2019-03-17 18:01:11 +08:00 committed by Jens Axboe
parent cae6c2e54c
commit 16e3e41877
1 changed files with 9 additions and 11 deletions

View File

@ -493,6 +493,14 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
return nsegs;
}
static inline int __blk_bvec_map_sg(struct bio_vec bv,
struct scatterlist *sglist, struct scatterlist **sg)
{
*sg = blk_next_sg(sg, sglist);
sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
return 1;
}
static inline void
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
struct scatterlist *sglist, struct bio_vec *bvprv,
@ -511,23 +519,13 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
} else {
new_segment:
if (bvec->bv_offset + bvec->bv_len <= PAGE_SIZE) {
*sg = blk_next_sg(sg, sglist);
sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
(*nsegs) += 1;
(*nsegs) += __blk_bvec_map_sg(*bvec, sglist, sg);
} else
(*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg);
}
*bvprv = *bvec;
}
static inline int __blk_bvec_map_sg(struct bio_vec bv,
struct scatterlist *sglist, struct scatterlist **sg)
{
*sg = sglist;
sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
return 1;
}
static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist,
struct scatterlist **sg)