fs/direct-io: Reduce the size of struct dio

Reduce the size of struct dio by combining the 'op' and 'op_flags' into
the new 'opf' member. Use the new blk_opf_t type to improve static type
checking. This patch does not change any functionality.

Reviewed-by: Jan Kara <jack@suse.cz>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20220714180729.1065367-49-bvanassche@acm.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Bart Van Assche 2022-07-14 11:07:14 -07:00 committed by Jens Axboe
parent 1420c4a549
commit c6293eacfc

View File

@ -117,8 +117,7 @@ struct dio_submit {
/* dio_state communicated between submission path and end_io */ /* dio_state communicated between submission path and end_io */
struct dio { struct dio {
int flags; /* doesn't change */ int flags; /* doesn't change */
int op; blk_opf_t opf; /* request operation type and flags */
int op_flags;
struct gendisk *bio_disk; struct gendisk *bio_disk;
struct inode *inode; struct inode *inode;
loff_t i_size; /* i_size when submitted */ loff_t i_size; /* i_size when submitted */
@ -167,12 +166,13 @@ static inline unsigned dio_pages_present(struct dio_submit *sdio)
*/ */
static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
{ {
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
ssize_t ret; ssize_t ret;
ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
&sdio->from); &sdio->from);
if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) { if (ret < 0 && sdio->blocks_available && dio_op == REQ_OP_WRITE) {
struct page *page = ZERO_PAGE(0); struct page *page = ZERO_PAGE(0);
/* /*
* A memory fault, but the filesystem has some outstanding * A memory fault, but the filesystem has some outstanding
@ -234,6 +234,7 @@ static inline struct page *dio_get_page(struct dio *dio,
*/ */
static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
{ {
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
loff_t offset = dio->iocb->ki_pos; loff_t offset = dio->iocb->ki_pos;
ssize_t transferred = 0; ssize_t transferred = 0;
int err; int err;
@ -251,7 +252,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
transferred = dio->result; transferred = dio->result;
/* Check for short read case */ /* Check for short read case */
if ((dio->op == REQ_OP_READ) && if (dio_op == REQ_OP_READ &&
((offset + transferred) > dio->i_size)) ((offset + transferred) > dio->i_size))
transferred = dio->i_size - offset; transferred = dio->i_size - offset;
/* ignore EFAULT if some IO has been done */ /* ignore EFAULT if some IO has been done */
@ -286,7 +287,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
* zeros from unwritten extents. * zeros from unwritten extents.
*/ */
if (flags & DIO_COMPLETE_INVALIDATE && if (flags & DIO_COMPLETE_INVALIDATE &&
ret > 0 && dio->op == REQ_OP_WRITE && ret > 0 && dio_op == REQ_OP_WRITE &&
dio->inode->i_mapping->nrpages) { dio->inode->i_mapping->nrpages) {
err = invalidate_inode_pages2_range(dio->inode->i_mapping, err = invalidate_inode_pages2_range(dio->inode->i_mapping,
offset >> PAGE_SHIFT, offset >> PAGE_SHIFT,
@ -305,7 +306,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
*/ */
dio->iocb->ki_pos += transferred; dio->iocb->ki_pos += transferred;
if (ret > 0 && dio->op == REQ_OP_WRITE) if (ret > 0 && dio_op == REQ_OP_WRITE)
ret = generic_write_sync(dio->iocb, ret); ret = generic_write_sync(dio->iocb, ret);
dio->iocb->ki_complete(dio->iocb, ret); dio->iocb->ki_complete(dio->iocb, ret);
} }
@ -329,6 +330,7 @@ static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
static void dio_bio_end_aio(struct bio *bio) static void dio_bio_end_aio(struct bio *bio)
{ {
struct dio *dio = bio->bi_private; struct dio *dio = bio->bi_private;
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
unsigned long remaining; unsigned long remaining;
unsigned long flags; unsigned long flags;
bool defer_completion = false; bool defer_completion = false;
@ -353,7 +355,7 @@ static void dio_bio_end_aio(struct bio *bio)
*/ */
if (dio->result) if (dio->result)
defer_completion = dio->defer_completion || defer_completion = dio->defer_completion ||
(dio->op == REQ_OP_WRITE && (dio_op == REQ_OP_WRITE &&
dio->inode->i_mapping->nrpages); dio->inode->i_mapping->nrpages);
if (defer_completion) { if (defer_completion) {
INIT_WORK(&dio->complete_work, dio_aio_complete_work); INIT_WORK(&dio->complete_work, dio_aio_complete_work);
@ -396,7 +398,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
* bio_alloc() is guaranteed to return a bio when allowed to sleep and * bio_alloc() is guaranteed to return a bio when allowed to sleep and
* we request a valid number of vectors. * we request a valid number of vectors.
*/ */
bio = bio_alloc(bdev, nr_vecs, dio->op | dio->op_flags, GFP_KERNEL); bio = bio_alloc(bdev, nr_vecs, dio->opf, GFP_KERNEL);
bio->bi_iter.bi_sector = first_sector; bio->bi_iter.bi_sector = first_sector;
if (dio->is_async) if (dio->is_async)
bio->bi_end_io = dio_bio_end_aio; bio->bi_end_io = dio_bio_end_aio;
@ -415,6 +417,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
*/ */
static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
{ {
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
struct bio *bio = sdio->bio; struct bio *bio = sdio->bio;
unsigned long flags; unsigned long flags;
@ -426,7 +429,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
dio->refcount++; dio->refcount++;
spin_unlock_irqrestore(&dio->bio_lock, flags); spin_unlock_irqrestore(&dio->bio_lock, flags);
if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) if (dio->is_async && dio_op == REQ_OP_READ && dio->should_dirty)
bio_set_pages_dirty(bio); bio_set_pages_dirty(bio);
dio->bio_disk = bio->bi_bdev->bd_disk; dio->bio_disk = bio->bi_bdev->bd_disk;
@ -492,7 +495,8 @@ static struct bio *dio_await_one(struct dio *dio)
static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio) static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
{ {
blk_status_t err = bio->bi_status; blk_status_t err = bio->bi_status;
bool should_dirty = dio->op == REQ_OP_READ && dio->should_dirty; const enum req_op dio_op = dio->opf & REQ_OP_MASK;
bool should_dirty = dio_op == REQ_OP_READ && dio->should_dirty;
if (err) { if (err) {
if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT)) if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT))
@ -619,6 +623,7 @@ static int dio_set_defer_completion(struct dio *dio)
static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
struct buffer_head *map_bh) struct buffer_head *map_bh)
{ {
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
int ret; int ret;
sector_t fs_startblk; /* Into file, in filesystem-sized blocks */ sector_t fs_startblk; /* Into file, in filesystem-sized blocks */
sector_t fs_endblk; /* Into file, in filesystem-sized blocks */ sector_t fs_endblk; /* Into file, in filesystem-sized blocks */
@ -653,7 +658,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
* which may decide to handle it or also return an unmapped * which may decide to handle it or also return an unmapped
* buffer head. * buffer head.
*/ */
create = dio->op == REQ_OP_WRITE; create = dio_op == REQ_OP_WRITE;
if (dio->flags & DIO_SKIP_HOLES) { if (dio->flags & DIO_SKIP_HOLES) {
i_size = i_size_read(dio->inode); i_size = i_size_read(dio->inode);
if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits) if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits)
@ -801,10 +806,11 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
unsigned offset, unsigned len, sector_t blocknr, unsigned offset, unsigned len, sector_t blocknr,
struct buffer_head *map_bh) struct buffer_head *map_bh)
{ {
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
int ret = 0; int ret = 0;
int boundary = sdio->boundary; /* dio_send_cur_page may clear it */ int boundary = sdio->boundary; /* dio_send_cur_page may clear it */
if (dio->op == REQ_OP_WRITE) { if (dio_op == REQ_OP_WRITE) {
/* /*
* Read accounting is performed in submit_bio() * Read accounting is performed in submit_bio()
*/ */
@ -917,6 +923,7 @@ static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
struct buffer_head *map_bh) struct buffer_head *map_bh)
{ {
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
const unsigned blkbits = sdio->blkbits; const unsigned blkbits = sdio->blkbits;
const unsigned i_blkbits = blkbits + sdio->blkfactor; const unsigned i_blkbits = blkbits + sdio->blkfactor;
int ret = 0; int ret = 0;
@ -992,7 +999,7 @@ do_holes:
loff_t i_size_aligned; loff_t i_size_aligned;
/* AKPM: eargh, -ENOTBLK is a hack */ /* AKPM: eargh, -ENOTBLK is a hack */
if (dio->op == REQ_OP_WRITE) { if (dio_op == REQ_OP_WRITE) {
put_page(page); put_page(page);
return -ENOTBLK; return -ENOTBLK;
} }
@ -1196,12 +1203,11 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
dio->inode = inode; dio->inode = inode;
if (iov_iter_rw(iter) == WRITE) { if (iov_iter_rw(iter) == WRITE) {
dio->op = REQ_OP_WRITE; dio->opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
dio->op_flags = REQ_SYNC | REQ_IDLE;
if (iocb->ki_flags & IOCB_NOWAIT) if (iocb->ki_flags & IOCB_NOWAIT)
dio->op_flags |= REQ_NOWAIT; dio->opf |= REQ_NOWAIT;
} else { } else {
dio->op = REQ_OP_READ; dio->opf = REQ_OP_READ;
} }
/* /*