mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
block: enable multipage bvecs
This patch pulls the trigger for multi-page bvecs. Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
6dc4f100c1
commit
07173c3ec2
22
block/bio.c
22
block/bio.c
@ -753,6 +753,8 @@ EXPORT_SYMBOL(bio_add_pc_page);
|
|||||||
* @page: page to add
|
* @page: page to add
|
||||||
* @len: length of the data to add
|
* @len: length of the data to add
|
||||||
* @off: offset of the data in @page
|
* @off: offset of the data in @page
|
||||||
|
* @same_page: if %true only merge if the new data is in the same physical
|
||||||
|
* page as the last segment of the bio.
|
||||||
*
|
*
|
||||||
* Try to add the data at @page + @off to the last bvec of @bio. This is a
|
* Try to add the data at @page + @off to the last bvec of @bio. This is a
|
||||||
* a useful optimisation for file systems with a block size smaller than the
|
* a useful optimisation for file systems with a block size smaller than the
|
||||||
@ -761,19 +763,25 @@ EXPORT_SYMBOL(bio_add_pc_page);
|
|||||||
* Return %true on success or %false on failure.
|
* Return %true on success or %false on failure.
|
||||||
*/
|
*/
|
||||||
bool __bio_try_merge_page(struct bio *bio, struct page *page,
|
bool __bio_try_merge_page(struct bio *bio, struct page *page,
|
||||||
unsigned int len, unsigned int off)
|
unsigned int len, unsigned int off, bool same_page)
|
||||||
{
|
{
|
||||||
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
|
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (bio->bi_vcnt > 0) {
|
if (bio->bi_vcnt > 0) {
|
||||||
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
|
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
|
||||||
|
phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
|
||||||
|
bv->bv_offset + bv->bv_len - 1;
|
||||||
|
phys_addr_t page_addr = page_to_phys(page);
|
||||||
|
|
||||||
if (page == bv->bv_page && off == bv->bv_offset + bv->bv_len) {
|
if (vec_end_addr + 1 != page_addr + off)
|
||||||
bv->bv_len += len;
|
return false;
|
||||||
bio->bi_iter.bi_size += len;
|
if (same_page && (vec_end_addr & PAGE_MASK) != page_addr)
|
||||||
return true;
|
return false;
|
||||||
}
|
|
||||||
|
bv->bv_len += len;
|
||||||
|
bio->bi_iter.bi_size += len;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -819,7 +827,7 @@ EXPORT_SYMBOL_GPL(__bio_add_page);
|
|||||||
int bio_add_page(struct bio *bio, struct page *page,
|
int bio_add_page(struct bio *bio, struct page *page,
|
||||||
unsigned int len, unsigned int offset)
|
unsigned int len, unsigned int offset)
|
||||||
{
|
{
|
||||||
if (!__bio_try_merge_page(bio, page, len, offset)) {
|
if (!__bio_try_merge_page(bio, page, len, offset, false)) {
|
||||||
if (bio_full(bio))
|
if (bio_full(bio))
|
||||||
return 0;
|
return 0;
|
||||||
__bio_add_page(bio, page, len, offset);
|
__bio_add_page(bio, page, len, offset);
|
||||||
|
@ -318,7 +318,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|||||||
*/
|
*/
|
||||||
sector = iomap_sector(iomap, pos);
|
sector = iomap_sector(iomap, pos);
|
||||||
if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
|
if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
|
||||||
if (__bio_try_merge_page(ctx->bio, page, plen, poff))
|
if (__bio_try_merge_page(ctx->bio, page, plen, poff, true))
|
||||||
goto done;
|
goto done;
|
||||||
is_contig = true;
|
is_contig = true;
|
||||||
}
|
}
|
||||||
@ -349,7 +349,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|||||||
ctx->bio->bi_end_io = iomap_read_end_io;
|
ctx->bio->bi_end_io = iomap_read_end_io;
|
||||||
}
|
}
|
||||||
|
|
||||||
__bio_add_page(ctx->bio, page, plen, poff);
|
bio_add_page(ctx->bio, page, plen, poff);
|
||||||
done:
|
done:
|
||||||
/*
|
/*
|
||||||
* Move the caller beyond our range so that it keeps making progress.
|
* Move the caller beyond our range so that it keeps making progress.
|
||||||
|
@ -616,12 +616,12 @@ xfs_add_to_ioend(
|
|||||||
bdev, sector);
|
bdev, sector);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!__bio_try_merge_page(wpc->ioend->io_bio, page, len, poff)) {
|
if (!__bio_try_merge_page(wpc->ioend->io_bio, page, len, poff, true)) {
|
||||||
if (iop)
|
if (iop)
|
||||||
atomic_inc(&iop->write_count);
|
atomic_inc(&iop->write_count);
|
||||||
if (bio_full(wpc->ioend->io_bio))
|
if (bio_full(wpc->ioend->io_bio))
|
||||||
xfs_chain_bio(wpc->ioend, wbc, bdev, sector);
|
xfs_chain_bio(wpc->ioend, wbc, bdev, sector);
|
||||||
__bio_add_page(wpc->ioend->io_bio, page, len, poff);
|
bio_add_page(wpc->ioend->io_bio, page, len, poff);
|
||||||
}
|
}
|
||||||
|
|
||||||
wpc->ioend->io_size += len;
|
wpc->ioend->io_size += len;
|
||||||
|
@ -441,7 +441,7 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
|
|||||||
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
|
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
|
||||||
unsigned int, unsigned int);
|
unsigned int, unsigned int);
|
||||||
bool __bio_try_merge_page(struct bio *bio, struct page *page,
|
bool __bio_try_merge_page(struct bio *bio, struct page *page,
|
||||||
unsigned int len, unsigned int off);
|
unsigned int len, unsigned int off, bool same_page);
|
||||||
void __bio_add_page(struct bio *bio, struct page *page,
|
void __bio_add_page(struct bio *bio, struct page *page,
|
||||||
unsigned int len, unsigned int off);
|
unsigned int len, unsigned int off);
|
||||||
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
|
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
|
||||||
|
Loading…
Reference in New Issue
Block a user