block: get rid of bio_rw and READA

These two are confusing leftover of the old world order, combining
values of the REQ_OP_ and REQ_ namespaces.  For callers that don't
special case we mostly just replace bi_rw with bio_data_dir or
op_is_write, except for the few cases where a switch over the REQ_OP_
values makes more sense.  Any check for READA is replaced with an
explicit check for REQ_RAHEAD.  Also remove the READA alias for
REQ_RAHEAD.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Christoph Hellwig 2016-07-19 11:28:41 +02:00 committed by Jens Axboe
parent 3f40bf2c89
commit 70246286e9
26 changed files with 95 additions and 96 deletions

View File

@ -347,9 +347,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
goto out; goto out;
} }
rw = bio_rw(bio); rw = bio_data_dir(bio);
if (rw == READA)
rw = READ;
bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len; unsigned int len = bvec.bv_len;

View File

@ -219,7 +219,6 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
{ {
const unsigned s = req->rq_state; const unsigned s = req->rq_state;
struct drbd_device *device = req->device; struct drbd_device *device = req->device;
int rw;
int error, ok; int error, ok;
/* we must not complete the master bio, while it is /* we must not complete the master bio, while it is
@ -243,8 +242,6 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
return; return;
} }
rw = bio_rw(req->master_bio);
/* /*
* figure out whether to report success or failure. * figure out whether to report success or failure.
* *
@ -268,7 +265,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
* epoch number. If they match, increase the current_tle_nr, * epoch number. If they match, increase the current_tle_nr,
* and reset the transfer log epoch write_cnt. * and reset the transfer log epoch write_cnt.
*/ */
if (rw == WRITE && if (op_is_write(bio_op(req->master_bio)) &&
req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr)) req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
start_new_tl_epoch(first_peer_device(device)->connection); start_new_tl_epoch(first_peer_device(device)->connection);
@ -285,11 +282,14 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
* because no path was available, in which case * because no path was available, in which case
* it was not even added to the transfer_log. * it was not even added to the transfer_log.
* *
* READA may fail, and will not be retried. * read-ahead may fail, and will not be retried.
* *
* WRITE should have used all available paths already. * WRITE should have used all available paths already.
*/ */
if (!ok && rw == READ && !list_empty(&req->tl_requests)) if (!ok &&
bio_op(req->master_bio) == REQ_OP_READ &&
!(req->master_bio->bi_rw & REQ_RAHEAD) &&
!list_empty(&req->tl_requests))
req->rq_state |= RQ_POSTPONED; req->rq_state |= RQ_POSTPONED;
if (!(req->rq_state & RQ_POSTPONED)) { if (!(req->rq_state & RQ_POSTPONED)) {
@ -645,7 +645,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
__drbd_chk_io_error(device, DRBD_READ_ERROR); __drbd_chk_io_error(device, DRBD_READ_ERROR);
/* fall through. */ /* fall through. */
case READ_AHEAD_COMPLETED_WITH_ERROR: case READ_AHEAD_COMPLETED_WITH_ERROR:
/* it is legal to fail READA, no __drbd_chk_io_error in that case. */ /* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break; break;
@ -657,7 +657,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break; break;
case QUEUE_FOR_NET_READ: case QUEUE_FOR_NET_READ:
/* READ or READA, and /* READ, and
* no local disk, * no local disk,
* or target area marked as invalid, * or target area marked as invalid,
* or just got an io-error. */ * or just got an io-error. */
@ -1172,7 +1172,14 @@ drbd_submit_req_private_bio(struct drbd_request *req)
{ {
struct drbd_device *device = req->device; struct drbd_device *device = req->device;
struct bio *bio = req->private_bio; struct bio *bio = req->private_bio;
const int rw = bio_rw(bio); unsigned int type;
if (bio_op(bio) != REQ_OP_READ)
type = DRBD_FAULT_DT_WR;
else if (bio->bi_rw & REQ_RAHEAD)
type = DRBD_FAULT_DT_RA;
else
type = DRBD_FAULT_DT_RD;
bio->bi_bdev = device->ldev->backing_bdev; bio->bi_bdev = device->ldev->backing_bdev;
@ -1182,10 +1189,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
* stable storage, and this is a WRITE, we may not even submit * stable storage, and this is a WRITE, we may not even submit
* this bio. */ * this bio. */
if (get_ldev(device)) { if (get_ldev(device)) {
if (drbd_insert_fault(device, if (drbd_insert_fault(device, type))
rw == WRITE ? DRBD_FAULT_DT_WR
: rw == READ ? DRBD_FAULT_DT_RD
: DRBD_FAULT_DT_RA))
bio_io_error(bio); bio_io_error(bio);
else if (bio_op(bio) == REQ_OP_DISCARD) else if (bio_op(bio) == REQ_OP_DISCARD)
drbd_process_discard_req(req); drbd_process_discard_req(req);
@ -1278,7 +1282,7 @@ static bool may_do_writes(struct drbd_device *device)
static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
{ {
struct drbd_resource *resource = device->resource; struct drbd_resource *resource = device->resource;
const int rw = bio_rw(req->master_bio); const int rw = bio_data_dir(req->master_bio);
struct bio_and_error m = { NULL, }; struct bio_and_error m = { NULL, };
bool no_remote = false; bool no_remote = false;
bool submit_private_bio = false; bool submit_private_bio = false;
@ -1308,7 +1312,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
goto out; goto out;
} }
/* We fail READ/READA early, if we can not serve it. /* We fail READ early, if we can not serve it.
* We must do this before req is registered on any lists. * We must do this before req is registered on any lists.
* Otherwise, drbd_req_complete() will queue failed READ for retry. */ * Otherwise, drbd_req_complete() will queue failed READ for retry. */
if (rw != WRITE) { if (rw != WRITE) {

View File

@ -248,18 +248,26 @@ void drbd_request_endio(struct bio *bio)
/* to avoid recursion in __req_mod */ /* to avoid recursion in __req_mod */
if (unlikely(bio->bi_error)) { if (unlikely(bio->bi_error)) {
if (bio_op(bio) == REQ_OP_DISCARD) switch (bio_op(bio)) {
what = (bio->bi_error == -EOPNOTSUPP) case REQ_OP_DISCARD:
? DISCARD_COMPLETED_NOTSUPP if (bio->bi_error == -EOPNOTSUPP)
: DISCARD_COMPLETED_WITH_ERROR; what = DISCARD_COMPLETED_NOTSUPP;
else else
what = (bio_data_dir(bio) == WRITE) what = DISCARD_COMPLETED_WITH_ERROR;
? WRITE_COMPLETED_WITH_ERROR break;
: (bio_rw(bio) == READ) case REQ_OP_READ:
? READ_COMPLETED_WITH_ERROR if (bio->bi_rw & REQ_RAHEAD)
: READ_AHEAD_COMPLETED_WITH_ERROR; what = READ_AHEAD_COMPLETED_WITH_ERROR;
} else else
what = READ_COMPLETED_WITH_ERROR;
break;
default:
what = WRITE_COMPLETED_WITH_ERROR;
break;
}
} else {
what = COMPLETED_OK; what = COMPLETED_OK;
}
bio_put(req->private_bio); bio_put(req->private_bio);
req->private_bio = ERR_PTR(bio->bi_error); req->private_bio = ERR_PTR(bio->bi_error);

View File

@ -448,7 +448,7 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
struct request *rq; struct request *rq;
struct bio *bio = rqd->bio; struct bio *bio = rqd->bio;
rq = blk_mq_alloc_request(q, bio_rw(bio), 0); rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
if (IS_ERR(rq)) if (IS_ERR(rq))
return -ENOMEM; return -ENOMEM;

View File

@ -344,7 +344,6 @@ static int add_bio(struct cardinfo *card)
int offset; int offset;
struct bio *bio; struct bio *bio;
struct bio_vec vec; struct bio_vec vec;
int rw;
bio = card->currentbio; bio = card->currentbio;
if (!bio && card->bio) { if (!bio && card->bio) {
@ -359,7 +358,6 @@ static int add_bio(struct cardinfo *card)
if (!bio) if (!bio)
return 0; return 0;
rw = bio_rw(bio);
if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE) if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
return 0; return 0;
@ -369,7 +367,7 @@ static int add_bio(struct cardinfo *card)
vec.bv_page, vec.bv_page,
vec.bv_offset, vec.bv_offset,
vec.bv_len, vec.bv_len,
(rw == READ) ? bio_op(bio) == REQ_OP_READ ?
PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
p = &card->mm_pages[card->Ready]; p = &card->mm_pages[card->Ready];
@ -398,7 +396,7 @@ static int add_bio(struct cardinfo *card)
DMASCR_CHAIN_EN | DMASCR_CHAIN_EN |
DMASCR_SEM_EN | DMASCR_SEM_EN |
pci_cmds); pci_cmds);
if (rw == WRITE) if (bio_op(bio) == REQ_OP_WRITE)
desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ); desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
desc->sem_control_bits = desc->control_bits; desc->sem_control_bits = desc->control_bits;

View File

@ -853,14 +853,14 @@ static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
return NVM_IO_ERR; return NVM_IO_ERR;
} }
if (bio_rw(bio) == WRITE) if (bio_op(bio) == REQ_OP_WRITE)
return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags, return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
npages); npages);
return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages); return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
} }
if (bio_rw(bio) == WRITE) if (bio_op(bio) == REQ_OP_WRITE)
return rrpc_write_rq(rrpc, bio, rqd, flags); return rrpc_write_rq(rrpc, bio, rqd, flags);
return rrpc_read_rq(rrpc, bio, rqd, flags); return rrpc_read_rq(rrpc, bio, rqd, flags);

View File

@ -528,7 +528,7 @@ static void read_callback(unsigned long error, void *context)
DMWARN_LIMIT("Read failure on mirror device %s. " DMWARN_LIMIT("Read failure on mirror device %s. "
"Trying alternative device.", "Trying alternative device.",
m->dev->name); m->dev->name);
queue_bio(m->ms, bio, bio_rw(bio)); queue_bio(m->ms, bio, bio_data_dir(bio));
return; return;
} }
@ -1193,7 +1193,7 @@ static void mirror_dtr(struct dm_target *ti)
*/ */
static int mirror_map(struct dm_target *ti, struct bio *bio) static int mirror_map(struct dm_target *ti, struct bio *bio)
{ {
int r, rw = bio_rw(bio); int r, rw = bio_data_dir(bio);
struct mirror *m; struct mirror *m;
struct mirror_set *ms = ti->private; struct mirror_set *ms = ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
@ -1217,7 +1217,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
* If region is not in-sync queue the bio. * If region is not in-sync queue the bio.
*/ */
if (!r || (r == -EWOULDBLOCK)) { if (!r || (r == -EWOULDBLOCK)) {
if (rw == READA) if (bio->bi_rw & REQ_RAHEAD)
return -EWOULDBLOCK; return -EWOULDBLOCK;
queue_bio(ms, bio, rw); queue_bio(ms, bio, rw);
@ -1242,7 +1242,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
{ {
int rw = bio_rw(bio); int rw = bio_data_dir(bio);
struct mirror_set *ms = (struct mirror_set *) ti->private; struct mirror_set *ms = (struct mirror_set *) ti->private;
struct mirror *m = NULL; struct mirror *m = NULL;
struct dm_bio_details *bd = NULL; struct dm_bio_details *bd = NULL;

View File

@ -1696,7 +1696,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
* to copy an exception */ * to copy an exception */
down_write(&s->lock); down_write(&s->lock);
if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) { if (!s->valid || (unlikely(s->snapshot_overflowed) &&
bio_data_dir(bio) == WRITE)) {
r = -EIO; r = -EIO;
goto out_unlock; goto out_unlock;
} }
@ -1713,7 +1714,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
* flags so we should only get this if we are * flags so we should only get this if we are
* writeable. * writeable.
*/ */
if (bio_rw(bio) == WRITE) { if (bio_data_dir(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk); pe = __lookup_pending_exception(s, chunk);
if (!pe) { if (!pe) {
up_write(&s->lock); up_write(&s->lock);
@ -1819,7 +1820,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
e = dm_lookup_exception(&s->complete, chunk); e = dm_lookup_exception(&s->complete, chunk);
if (e) { if (e) {
/* Queue writes overlapping with chunks being merged */ /* Queue writes overlapping with chunks being merged */
if (bio_rw(bio) == WRITE && if (bio_data_dir(bio) == WRITE &&
chunk >= s->first_merging_chunk && chunk >= s->first_merging_chunk &&
chunk < (s->first_merging_chunk + chunk < (s->first_merging_chunk +
s->num_merging_chunks)) { s->num_merging_chunks)) {
@ -1831,7 +1832,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
remap_exception(s, e, bio, chunk); remap_exception(s, e, bio, chunk);
if (bio_rw(bio) == WRITE) if (bio_data_dir(bio) == WRITE)
track_chunk(s, bio, chunk); track_chunk(s, bio, chunk);
goto out_unlock; goto out_unlock;
} }
@ -1839,7 +1840,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
redirect_to_origin: redirect_to_origin:
bio->bi_bdev = s->origin->bdev; bio->bi_bdev = s->origin->bdev;
if (bio_rw(bio) == WRITE) { if (bio_data_dir(bio) == WRITE) {
up_write(&s->lock); up_write(&s->lock);
return do_origin(s->origin, bio); return do_origin(s->origin, bio);
} }
@ -2288,7 +2289,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
if (unlikely(bio->bi_rw & REQ_PREFLUSH)) if (unlikely(bio->bi_rw & REQ_PREFLUSH))
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
if (bio_rw(bio) != WRITE) if (bio_data_dir(bio) != WRITE)
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
available_sectors = o->split_boundary - available_sectors = o->split_boundary -

View File

@ -35,16 +35,19 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
*/ */
static int zero_map(struct dm_target *ti, struct bio *bio) static int zero_map(struct dm_target *ti, struct bio *bio)
{ {
switch(bio_rw(bio)) { switch (bio_op(bio)) {
case READ: case REQ_OP_READ:
zero_fill_bio(bio); if (bio->bi_rw & REQ_RAHEAD) {
break;
case READA:
/* readahead of null bytes only wastes buffer cache */ /* readahead of null bytes only wastes buffer cache */
return -EIO; return -EIO;
case WRITE: }
zero_fill_bio(bio);
break;
case REQ_OP_WRITE:
/* writes get silently dropped */ /* writes get silently dropped */
break; break;
default:
return -EIO;
} }
bio_endio(bio); bio_endio(bio);

View File

@ -1833,7 +1833,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
dm_put_live_table(md, srcu_idx); dm_put_live_table(md, srcu_idx);
if (bio_rw(bio) != READA) if (!(bio->bi_rw & REQ_RAHEAD))
queue_io(md, bio); queue_io(md, bio);
else else
bio_io_error(bio); bio_io_error(bio);

View File

@ -1105,7 +1105,7 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
bitmap = mddev->bitmap; bitmap = mddev->bitmap;
/* /*
* make_request() can abort the operation when READA is being * make_request() can abort the operation when read-ahead is being
* used and no empty request is available. * used and no empty request is available.
* *
*/ */

View File

@ -5233,7 +5233,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
(unsigned long long)logical_sector); (unsigned long long)logical_sector);
sh = raid5_get_active_stripe(conf, new_sector, previous, sh = raid5_get_active_stripe(conf, new_sector, previous,
(bi->bi_rw&RWA_MASK), 0); (bi->bi_rw & REQ_RAHEAD), 0);
if (sh) { if (sh) {
if (unlikely(previous)) { if (unlikely(previous)) {
/* expansion might have moved on while waiting for a /* expansion might have moved on while waiting for a

View File

@ -500,7 +500,7 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
struct bio *bio = rqd->bio; struct bio *bio = rqd->bio;
struct nvme_nvm_command *cmd; struct nvme_nvm_command *cmd;
rq = blk_mq_alloc_request(q, bio_rw(bio), 0); rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
if (IS_ERR(rq)) if (IS_ERR(rq))
return -ENOMEM; return -ENOMEM;

View File

@ -336,7 +336,6 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
static blk_qc_t loop_make_request(struct request_queue *q, struct bio *old_bio) static blk_qc_t loop_make_request(struct request_queue *q, struct bio *old_bio)
{ {
struct lloop_device *lo = q->queuedata; struct lloop_device *lo = q->queuedata;
int rw = bio_rw(old_bio);
int inactive; int inactive;
blk_queue_split(q, &old_bio, q->bio_split); blk_queue_split(q, &old_bio, q->bio_split);
@ -354,13 +353,15 @@ static blk_qc_t loop_make_request(struct request_queue *q, struct bio *old_bio)
if (inactive) if (inactive)
goto err; goto err;
if (rw == WRITE) { switch (bio_op(old_bio)) {
case REQ_OP_WRITE:
if (lo->lo_flags & LO_FLAGS_READ_ONLY) if (lo->lo_flags & LO_FLAGS_READ_ONLY)
goto err; goto err;
} else if (rw == READA) { break;
rw = READ; case REQ_OP_READ:
} else if (rw != READ) { break;
CERROR("lloop: unknown command (%x)\n", rw); default:
CERROR("lloop: unknown command (%x)\n", bio_op(old_bio));
goto err; goto err;
} }
loop_add_bio(lo, old_bio); loop_add_bio(lo, old_bio);

View File

@ -153,7 +153,7 @@ static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
if (uptodate) { if (uptodate) {
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} else { } else {
/* This happens, due to failed READA attempts. */ /* This happens, due to failed read-ahead attempts. */
clear_buffer_uptodate(bh); clear_buffer_uptodate(bh);
} }
unlock_buffer(bh); unlock_buffer(bh);
@ -1395,7 +1395,7 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
{ {
struct buffer_head *bh = __getblk(bdev, block, size); struct buffer_head *bh = __getblk(bdev, block, size);
if (likely(bh)) { if (likely(bh)) {
ll_rw_block(REQ_OP_READ, READA, 1, &bh); ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
brelse(bh); brelse(bh);
} }
} }
@ -3052,14 +3052,14 @@ EXPORT_SYMBOL(submit_bh);
/** /**
* ll_rw_block: low-level access to block devices (DEPRECATED) * ll_rw_block: low-level access to block devices (DEPRECATED)
* @op: whether to %READ or %WRITE * @op: whether to %READ or %WRITE
* @op_flags: rq_flag_bits or %READA (readahead) * @op_flags: rq_flag_bits
* @nr: number of &struct buffer_heads in the array * @nr: number of &struct buffer_heads in the array
* @bhs: array of pointers to &struct buffer_head * @bhs: array of pointers to &struct buffer_head
* *
* ll_rw_block() takes an array of pointers to &struct buffer_heads, and * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
* requests an I/O operation on them, either a %READ or a %WRITE. The third * requests an I/O operation on them, either a %REQ_OP_READ or a %REQ_OP_WRITE.
* %READA option is described in the documentation for generic_make_request() * @op_flags contains flags modifying the detailed I/O behavior, most notably
* which ll_rw_block() calls. * %REQ_RAHEAD.
* *
* This function drops any buffer that it cannot get a lock on (with the * This function drops any buffer that it cannot get a lock on (with the
* BH_Lock state bit), any buffer that appears to be clean when doing a write * BH_Lock state bit), any buffer that appears to be clean when doing a write

View File

@ -159,7 +159,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
.sbi = sbi, .sbi = sbi,
.type = META, .type = META,
.op = REQ_OP_READ, .op = REQ_OP_READ,
.op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA, .op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : REQ_RAHEAD,
.encrypted_page = NULL, .encrypted_page = NULL,
}; };
struct blk_plug plug; struct blk_plug plug;

View File

@ -733,7 +733,8 @@ next_step:
start_bidx = start_bidx_of_node(nofs, inode); start_bidx = start_bidx_of_node(nofs, inode);
data_page = get_read_data_page(inode, data_page = get_read_data_page(inode,
start_bidx + ofs_in_node, READA, true); start_bidx + ofs_in_node, REQ_RAHEAD,
true);
if (IS_ERR(data_page)) { if (IS_ERR(data_page)) {
iput(inode); iput(inode);
continue; continue;

View File

@ -1119,7 +1119,7 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
if (!apage) if (!apage)
return; return;
err = read_node_page(apage, READA); err = read_node_page(apage, REQ_RAHEAD);
f2fs_put_page(apage, err ? 1 : 0); f2fs_put_page(apage, err ? 1 : 0);
} }

View File

@ -285,7 +285,8 @@ static void gfs2_metapath_ra(struct gfs2_glock *gl,
if (trylock_buffer(rabh)) { if (trylock_buffer(rabh)) {
if (!buffer_uptodate(rabh)) { if (!buffer_uptodate(rabh)) {
rabh->b_end_io = end_buffer_read_sync; rabh->b_end_io = end_buffer_read_sync;
submit_bh(REQ_OP_READ, READA | REQ_META, rabh); submit_bh(REQ_OP_READ, REQ_RAHEAD | REQ_META,
rabh);
continue; continue;
} }
unlock_buffer(rabh); unlock_buffer(rabh);

View File

@ -1513,7 +1513,7 @@ static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index,
continue; continue;
} }
bh->b_end_io = end_buffer_read_sync; bh->b_end_io = end_buffer_read_sync;
submit_bh(REQ_OP_READ, READA | REQ_META, bh); submit_bh(REQ_OP_READ, REQ_RAHEAD | REQ_META, bh);
continue; continue;
} }
brelse(bh); brelse(bh);

View File

@ -459,7 +459,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
bh = gfs2_getbuf(gl, dblock, CREATE); bh = gfs2_getbuf(gl, dblock, CREATE);
if (!buffer_uptodate(bh) && !buffer_locked(bh)) if (!buffer_uptodate(bh) && !buffer_locked(bh))
ll_rw_block(REQ_OP_READ, READA | REQ_META, 1, &bh); ll_rw_block(REQ_OP_READ, REQ_RAHEAD | REQ_META, 1, &bh);
brelse(bh); brelse(bh);
dblock++; dblock++;
extlen--; extlen--;

View File

@ -551,7 +551,7 @@ static int search_by_key_reada(struct super_block *s,
if (!buffer_uptodate(bh[j])) { if (!buffer_uptodate(bh[j])) {
if (depth == -1) if (depth == -1)
depth = reiserfs_write_unlock_nested(s); depth = reiserfs_write_unlock_nested(s);
ll_rw_block(REQ_OP_READ, READA, 1, bh + j); ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, bh + j);
} }
brelse(bh[j]); brelse(bh[j]);
} }

View File

@ -113,7 +113,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
brelse(tmp); brelse(tmp);
} }
if (num) { if (num) {
ll_rw_block(REQ_OP_READ, READA, num, bha); ll_rw_block(REQ_OP_READ, REQ_RAHEAD, num, bha);
for (i = 0; i < num; i++) for (i = 0; i < num; i++)
brelse(bha[i]); brelse(bha[i]);
} }

View File

@ -87,7 +87,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
brelse(tmp); brelse(tmp);
} }
if (num) { if (num) {
ll_rw_block(REQ_OP_READ, READA, num, bha); ll_rw_block(REQ_OP_READ, REQ_RAHEAD, num, bha);
for (i = 0; i < num; i++) for (i = 0; i < num; i++)
brelse(bha[i]); brelse(bha[i]);
} }

View File

@ -178,9 +178,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
* READ_SYNC A synchronous read. Device is not plugged, caller can * READ_SYNC A synchronous read. Device is not plugged, caller can
* immediately wait on this read without caring about * immediately wait on this read without caring about
* unplugging. * unplugging.
* READA Used for read-ahead operations. Lower priority, and the
* block layer could (in theory) choose to ignore this
* request if it runs into resource problems.
* WRITE A normal async write. Device will be plugged. * WRITE A normal async write. Device will be plugged.
* WRITE_SYNC Synchronous write. Identical to WRITE, but passes down * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down
* the hint that someone will be waiting on this IO * the hint that someone will be waiting on this IO
@ -195,11 +192,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
* *
*/ */
#define RW_MASK REQ_OP_WRITE #define RW_MASK REQ_OP_WRITE
#define RWA_MASK REQ_RAHEAD
#define READ REQ_OP_READ #define READ REQ_OP_READ
#define WRITE RW_MASK #define WRITE REQ_OP_WRITE
#define READA RWA_MASK
#define READ_SYNC REQ_SYNC #define READ_SYNC REQ_SYNC
#define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE) #define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE)
@ -2470,17 +2465,6 @@ static inline bool op_is_write(unsigned int op)
return op == REQ_OP_READ ? false : true; return op == REQ_OP_READ ? false : true;
} }
/*
* return READ, READA, or WRITE
*/
static inline int bio_rw(struct bio *bio)
{
if (op_is_write(bio_op(bio)))
return WRITE;
return bio->bi_rw & RWA_MASK;
}
/* /*
* return data direction, READ or WRITE * return data direction, READ or WRITE
*/ */

View File

@ -55,7 +55,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
{ IPU, "IN-PLACE" }, \ { IPU, "IN-PLACE" }, \
{ OPU, "OUT-OF-PLACE" }) { OPU, "OUT-OF-PLACE" })
#define F2FS_BIO_FLAG_MASK(t) (t & (READA | WRITE_FLUSH_FUA)) #define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | WRITE_FLUSH_FUA))
#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO)) #define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO))
#define show_bio_type(op, op_flags) show_bio_op(op), \ #define show_bio_type(op, op_flags) show_bio_op(op), \
@ -68,7 +68,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
#define show_bio_op_flags(flags) \ #define show_bio_op_flags(flags) \
__print_symbolic(F2FS_BIO_FLAG_MASK(flags), \ __print_symbolic(F2FS_BIO_FLAG_MASK(flags), \
{ READA, "READAHEAD" }, \ { REQ_RAHEAD, "READAHEAD" }, \
{ READ_SYNC, "READ_SYNC" }, \ { READ_SYNC, "READ_SYNC" }, \
{ WRITE_SYNC, "WRITE_SYNC" }, \ { WRITE_SYNC, "WRITE_SYNC" }, \
{ WRITE_FLUSH, "WRITE_FLUSH" }, \ { WRITE_FLUSH, "WRITE_FLUSH" }, \