[PATCH] Split struct request ->flags into two parts

Right now ->flags is a bit of a mess: some are request types, and
others are just modifiers. Clean this up by splitting it into
->cmd_type and ->cmd_flags. This allows introduction of generic
Linux block message types, useful for sending generic Linux commands
to block devices.

Signed-off-by: Jens Axboe <axboe@suse.de>
This commit is contained in:
Jens Axboe 2006-08-10 08:44:47 +02:00 committed by Jens Axboe
parent 77ed74da26
commit 4aff5e2333
39 changed files with 295 additions and 301 deletions

View File

@ -1335,7 +1335,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
arq->state = AS_RQ_NEW; arq->state = AS_RQ_NEW;
if (rq_data_dir(arq->request) == READ if (rq_data_dir(arq->request) == READ
|| (arq->request->flags & REQ_RW_SYNC)) || (arq->request->cmd_flags & REQ_RW_SYNC))
arq->is_sync = 1; arq->is_sync = 1;
else else
arq->is_sync = 0; arq->is_sync = 0;

View File

@ -242,7 +242,7 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq)
list_for_each_prev(entry, &q->queue_head) { list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry); struct request *pos = list_entry_rq(entry);
if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
break; break;
if (rq->sector >= boundary) { if (rq->sector >= boundary) {
if (pos->sector < boundary) if (pos->sector < boundary)
@ -313,7 +313,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
e->ops->elevator_deactivate_req_fn(q, rq); e->ops->elevator_deactivate_req_fn(q, rq);
} }
rq->flags &= ~REQ_STARTED; rq->cmd_flags &= ~REQ_STARTED;
elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
} }
@ -344,13 +344,13 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
switch (where) { switch (where) {
case ELEVATOR_INSERT_FRONT: case ELEVATOR_INSERT_FRONT:
rq->flags |= REQ_SOFTBARRIER; rq->cmd_flags |= REQ_SOFTBARRIER;
list_add(&rq->queuelist, &q->queue_head); list_add(&rq->queuelist, &q->queue_head);
break; break;
case ELEVATOR_INSERT_BACK: case ELEVATOR_INSERT_BACK:
rq->flags |= REQ_SOFTBARRIER; rq->cmd_flags |= REQ_SOFTBARRIER;
elv_drain_elevator(q); elv_drain_elevator(q);
list_add_tail(&rq->queuelist, &q->queue_head); list_add_tail(&rq->queuelist, &q->queue_head);
/* /*
@ -369,7 +369,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
case ELEVATOR_INSERT_SORT: case ELEVATOR_INSERT_SORT:
BUG_ON(!blk_fs_request(rq)); BUG_ON(!blk_fs_request(rq));
rq->flags |= REQ_SORTED; rq->cmd_flags |= REQ_SORTED;
q->nr_sorted++; q->nr_sorted++;
if (q->last_merge == NULL && rq_mergeable(rq)) if (q->last_merge == NULL && rq_mergeable(rq))
q->last_merge = rq; q->last_merge = rq;
@ -387,7 +387,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
* insertion; otherwise, requests should be requeued * insertion; otherwise, requests should be requeued
* in ordseq order. * in ordseq order.
*/ */
rq->flags |= REQ_SOFTBARRIER; rq->cmd_flags |= REQ_SOFTBARRIER;
if (q->ordseq == 0) { if (q->ordseq == 0) {
list_add(&rq->queuelist, &q->queue_head); list_add(&rq->queuelist, &q->queue_head);
@ -429,9 +429,9 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
int plug) int plug)
{ {
if (q->ordcolor) if (q->ordcolor)
rq->flags |= REQ_ORDERED_COLOR; rq->cmd_flags |= REQ_ORDERED_COLOR;
if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
/* /*
* toggle ordered color * toggle ordered color
*/ */
@ -452,7 +452,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
q->end_sector = rq_end_sector(rq); q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq; q->boundary_rq = rq;
} }
} else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
where = ELEVATOR_INSERT_BACK; where = ELEVATOR_INSERT_BACK;
if (plug) if (plug)
@ -493,7 +493,7 @@ struct request *elv_next_request(request_queue_t *q)
int ret; int ret;
while ((rq = __elv_next_request(q)) != NULL) { while ((rq = __elv_next_request(q)) != NULL) {
if (!(rq->flags & REQ_STARTED)) { if (!(rq->cmd_flags & REQ_STARTED)) {
elevator_t *e = q->elevator; elevator_t *e = q->elevator;
/* /*
@ -510,7 +510,7 @@ struct request *elv_next_request(request_queue_t *q)
* it, a request that has been delayed should * it, a request that has been delayed should
* not be passed by new incoming requests * not be passed by new incoming requests
*/ */
rq->flags |= REQ_STARTED; rq->cmd_flags |= REQ_STARTED;
blk_add_trace_rq(q, rq, BLK_TA_ISSUE); blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
} }
@ -519,7 +519,7 @@ struct request *elv_next_request(request_queue_t *q)
q->boundary_rq = NULL; q->boundary_rq = NULL;
} }
if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
break; break;
ret = q->prep_rq_fn(q, rq); ret = q->prep_rq_fn(q, rq);
@ -541,7 +541,7 @@ struct request *elv_next_request(request_queue_t *q)
nr_bytes = rq->data_len; nr_bytes = rq->data_len;
blkdev_dequeue_request(rq); blkdev_dequeue_request(rq);
rq->flags |= REQ_QUIET; rq->cmd_flags |= REQ_QUIET;
end_that_request_chunk(rq, 0, nr_bytes); end_that_request_chunk(rq, 0, nr_bytes);
end_that_request_last(rq, 0); end_that_request_last(rq, 0);
} else { } else {

View File

@ -382,8 +382,8 @@ unsigned blk_ordered_req_seq(struct request *rq)
if (rq == &q->post_flush_rq) if (rq == &q->post_flush_rq)
return QUEUE_ORDSEQ_POSTFLUSH; return QUEUE_ORDSEQ_POSTFLUSH;
if ((rq->flags & REQ_ORDERED_COLOR) == if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
(q->orig_bar_rq->flags & REQ_ORDERED_COLOR)) (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
return QUEUE_ORDSEQ_DRAIN; return QUEUE_ORDSEQ_DRAIN;
else else
return QUEUE_ORDSEQ_DONE; return QUEUE_ORDSEQ_DONE;
@ -446,8 +446,8 @@ static void queue_flush(request_queue_t *q, unsigned which)
end_io = post_flush_end_io; end_io = post_flush_end_io;
} }
rq->cmd_flags = REQ_HARDBARRIER;
rq_init(q, rq); rq_init(q, rq);
rq->flags = REQ_HARDBARRIER;
rq->elevator_private = NULL; rq->elevator_private = NULL;
rq->rq_disk = q->bar_rq.rq_disk; rq->rq_disk = q->bar_rq.rq_disk;
rq->rl = NULL; rq->rl = NULL;
@ -471,9 +471,11 @@ static inline struct request *start_ordered(request_queue_t *q,
blkdev_dequeue_request(rq); blkdev_dequeue_request(rq);
q->orig_bar_rq = rq; q->orig_bar_rq = rq;
rq = &q->bar_rq; rq = &q->bar_rq;
rq->cmd_flags = 0;
rq_init(q, rq); rq_init(q, rq);
rq->flags = bio_data_dir(q->orig_bar_rq->bio); if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
rq->flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; rq->cmd_flags |= REQ_RW;
rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
rq->elevator_private = NULL; rq->elevator_private = NULL;
rq->rl = NULL; rq->rl = NULL;
init_request_from_bio(rq, q->orig_bar_rq->bio); init_request_from_bio(rq, q->orig_bar_rq->bio);
@ -1124,7 +1126,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
} }
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
rq->flags &= ~REQ_QUEUED; rq->cmd_flags &= ~REQ_QUEUED;
rq->tag = -1; rq->tag = -1;
if (unlikely(bqt->tag_index[tag] == NULL)) if (unlikely(bqt->tag_index[tag] == NULL))
@ -1160,7 +1162,7 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq)
struct blk_queue_tag *bqt = q->queue_tags; struct blk_queue_tag *bqt = q->queue_tags;
int tag; int tag;
if (unlikely((rq->flags & REQ_QUEUED))) { if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
printk(KERN_ERR printk(KERN_ERR
"%s: request %p for device [%s] already tagged %d", "%s: request %p for device [%s] already tagged %d",
__FUNCTION__, rq, __FUNCTION__, rq,
@ -1174,7 +1176,7 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq)
__set_bit(tag, bqt->tag_map); __set_bit(tag, bqt->tag_map);
rq->flags |= REQ_QUEUED; rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag; rq->tag = tag;
bqt->tag_index[tag] = rq; bqt->tag_index[tag] = rq;
blkdev_dequeue_request(rq); blkdev_dequeue_request(rq);
@ -1210,65 +1212,31 @@ void blk_queue_invalidate_tags(request_queue_t *q)
printk(KERN_ERR printk(KERN_ERR
"%s: bad tag found on list\n", __FUNCTION__); "%s: bad tag found on list\n", __FUNCTION__);
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
rq->flags &= ~REQ_QUEUED; rq->cmd_flags &= ~REQ_QUEUED;
} else } else
blk_queue_end_tag(q, rq); blk_queue_end_tag(q, rq);
rq->flags &= ~REQ_STARTED; rq->cmd_flags &= ~REQ_STARTED;
__elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
} }
} }
EXPORT_SYMBOL(blk_queue_invalidate_tags); EXPORT_SYMBOL(blk_queue_invalidate_tags);
static const char * const rq_flags[] = {
"REQ_RW",
"REQ_FAILFAST",
"REQ_SORTED",
"REQ_SOFTBARRIER",
"REQ_HARDBARRIER",
"REQ_FUA",
"REQ_CMD",
"REQ_NOMERGE",
"REQ_STARTED",
"REQ_DONTPREP",
"REQ_QUEUED",
"REQ_ELVPRIV",
"REQ_PC",
"REQ_BLOCK_PC",
"REQ_SENSE",
"REQ_FAILED",
"REQ_QUIET",
"REQ_SPECIAL",
"REQ_DRIVE_CMD",
"REQ_DRIVE_TASK",
"REQ_DRIVE_TASKFILE",
"REQ_PREEMPT",
"REQ_PM_SUSPEND",
"REQ_PM_RESUME",
"REQ_PM_SHUTDOWN",
"REQ_ORDERED_COLOR",
};
void blk_dump_rq_flags(struct request *rq, char *msg) void blk_dump_rq_flags(struct request *rq, char *msg)
{ {
int bit; int bit;
printk("%s: dev %s: flags = ", msg, printk("%s: dev %s: type=%x, flags=%x\n", msg,
rq->rq_disk ? rq->rq_disk->disk_name : "?"); rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
bit = 0; rq->cmd_flags);
do {
if (rq->flags & (1 << bit))
printk("%s ", rq_flags[bit]);
bit++;
} while (bit < __REQ_NR_BITS);
printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
rq->nr_sectors, rq->nr_sectors,
rq->current_nr_sectors); rq->current_nr_sectors);
printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) { if (blk_pc_request(rq)) {
printk("cdb: "); printk("cdb: ");
for (bit = 0; bit < sizeof(rq->cmd); bit++) for (bit = 0; bit < sizeof(rq->cmd); bit++)
printk("%02x ", rq->cmd[bit]); printk("%02x ", rq->cmd[bit]);
@ -1441,7 +1409,7 @@ static inline int ll_new_mergeable(request_queue_t *q,
int nr_phys_segs = bio_phys_segments(q, bio); int nr_phys_segs = bio_phys_segments(q, bio);
if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
req->flags |= REQ_NOMERGE; req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge) if (req == q->last_merge)
q->last_merge = NULL; q->last_merge = NULL;
return 0; return 0;
@ -1464,7 +1432,7 @@ static inline int ll_new_hw_segment(request_queue_t *q,
if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
|| req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
req->flags |= REQ_NOMERGE; req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge) if (req == q->last_merge)
q->last_merge = NULL; q->last_merge = NULL;
return 0; return 0;
@ -1491,7 +1459,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
max_sectors = q->max_sectors; max_sectors = q->max_sectors;
if (req->nr_sectors + bio_sectors(bio) > max_sectors) { if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
req->flags |= REQ_NOMERGE; req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge) if (req == q->last_merge)
q->last_merge = NULL; q->last_merge = NULL;
return 0; return 0;
@ -1530,7 +1498,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req,
if (req->nr_sectors + bio_sectors(bio) > max_sectors) { if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
req->flags |= REQ_NOMERGE; req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge) if (req == q->last_merge)
q->last_merge = NULL; q->last_merge = NULL;
return 0; return 0;
@ -2029,7 +1997,7 @@ EXPORT_SYMBOL(blk_get_queue);
static inline void blk_free_request(request_queue_t *q, struct request *rq) static inline void blk_free_request(request_queue_t *q, struct request *rq)
{ {
if (rq->flags & REQ_ELVPRIV) if (rq->cmd_flags & REQ_ELVPRIV)
elv_put_request(q, rq); elv_put_request(q, rq);
mempool_free(rq, q->rq.rq_pool); mempool_free(rq, q->rq.rq_pool);
} }
@ -2044,17 +2012,17 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio,
return NULL; return NULL;
/* /*
* first three bits are identical in rq->flags and bio->bi_rw, * first three bits are identical in rq->cmd_flags and bio->bi_rw,
* see bio.h and blkdev.h * see bio.h and blkdev.h
*/ */
rq->flags = rw; rq->cmd_flags = rw;
if (priv) { if (priv) {
if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
mempool_free(rq, q->rq.rq_pool); mempool_free(rq, q->rq.rq_pool);
return NULL; return NULL;
} }
rq->flags |= REQ_ELVPRIV; rq->cmd_flags |= REQ_ELVPRIV;
} }
return rq; return rq;
@ -2351,7 +2319,8 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
* must not attempt merges on this) and that it acts as a soft * must not attempt merges on this) and that it acts as a soft
* barrier * barrier
*/ */
rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER; rq->cmd_type = REQ_TYPE_SPECIAL;
rq->cmd_flags |= REQ_SOFTBARRIER;
rq->special = data; rq->special = data;
@ -2558,7 +2527,7 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
rq->rq_disk = bd_disk; rq->rq_disk = bd_disk;
rq->flags |= REQ_NOMERGE; rq->cmd_flags |= REQ_NOMERGE;
rq->end_io = done; rq->end_io = done;
WARN_ON(irqs_disabled()); WARN_ON(irqs_disabled());
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
@ -2728,7 +2697,7 @@ void __blk_put_request(request_queue_t *q, struct request *req)
*/ */
if (rl) { if (rl) {
int rw = rq_data_dir(req); int rw = rq_data_dir(req);
int priv = req->flags & REQ_ELVPRIV; int priv = req->cmd_flags & REQ_ELVPRIV;
BUG_ON(!list_empty(&req->queuelist)); BUG_ON(!list_empty(&req->queuelist));
@ -2890,22 +2859,22 @@ static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
static void init_request_from_bio(struct request *req, struct bio *bio) static void init_request_from_bio(struct request *req, struct bio *bio)
{ {
req->flags |= REQ_CMD; req->cmd_type = REQ_TYPE_FS;
/* /*
* inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
*/ */
if (bio_rw_ahead(bio) || bio_failfast(bio)) if (bio_rw_ahead(bio) || bio_failfast(bio))
req->flags |= REQ_FAILFAST; req->cmd_flags |= REQ_FAILFAST;
/* /*
* REQ_BARRIER implies no merging, but lets make it explicit * REQ_BARRIER implies no merging, but lets make it explicit
*/ */
if (unlikely(bio_barrier(bio))) if (unlikely(bio_barrier(bio)))
req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
if (bio_sync(bio)) if (bio_sync(bio))
req->flags |= REQ_RW_SYNC; req->cmd_flags |= REQ_RW_SYNC;
req->errors = 0; req->errors = 0;
req->hard_sector = req->sector = bio->bi_sector; req->hard_sector = req->sector = bio->bi_sector;
@ -3306,7 +3275,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
req->errors = 0; req->errors = 0;
if (!uptodate) { if (!uptodate) {
if (blk_fs_request(req) && !(req->flags & REQ_QUIET)) if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
printk("end_request: I/O error, dev %s, sector %llu\n", printk("end_request: I/O error, dev %s, sector %llu\n",
req->rq_disk ? req->rq_disk->disk_name : "?", req->rq_disk ? req->rq_disk->disk_name : "?",
(unsigned long long)req->sector); (unsigned long long)req->sector);
@ -3569,8 +3538,8 @@ EXPORT_SYMBOL(end_request);
void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
{ {
/* first two bits are identical in rq->flags and bio->bi_rw */ /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
rq->flags |= (bio->bi_rw & 3); rq->cmd_flags |= (bio->bi_rw & 3);
rq->nr_phys_segments = bio_phys_segments(q, bio); rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->nr_hw_segments = bio_hw_segments(q, bio); rq->nr_hw_segments = bio_hw_segments(q, bio);

View File

@ -294,7 +294,7 @@ static int sg_io(struct file *file, request_queue_t *q,
rq->sense = sense; rq->sense = sense;
rq->sense_len = 0; rq->sense_len = 0;
rq->flags |= REQ_BLOCK_PC; rq->cmd_type = REQ_TYPE_BLOCK_PC;
bio = rq->bio; bio = rq->bio;
/* /*
@ -470,7 +470,7 @@ int sg_scsi_ioctl(struct file *file, struct request_queue *q,
memset(sense, 0, sizeof(sense)); memset(sense, 0, sizeof(sense));
rq->sense = sense; rq->sense = sense;
rq->sense_len = 0; rq->sense_len = 0;
rq->flags |= REQ_BLOCK_PC; rq->cmd_type = REQ_TYPE_BLOCK_PC;
blk_execute_rq(q, disk, rq, 0); blk_execute_rq(q, disk, rq, 0);
@ -502,7 +502,7 @@ static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int c
int err; int err;
rq = blk_get_request(q, WRITE, __GFP_WAIT); rq = blk_get_request(q, WRITE, __GFP_WAIT);
rq->flags |= REQ_BLOCK_PC; rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->data = NULL; rq->data = NULL;
rq->data_len = 0; rq->data_len = 0;
rq->timeout = BLK_DEFAULT_TIMEOUT; rq->timeout = BLK_DEFAULT_TIMEOUT;

View File

@ -2991,8 +2991,8 @@ static void do_fd_request(request_queue_t * q)
if (usage_count == 0) { if (usage_count == 0) {
printk("warning: usage count=0, current_req=%p exiting\n", printk("warning: usage count=0, current_req=%p exiting\n",
current_req); current_req);
printk("sect=%ld flags=%lx\n", (long)current_req->sector, printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector,
current_req->flags); current_req->cmd_type, current_req->cmd_flags);
return; return;
} }
if (test_bit(0, &fdc_busy)) { if (test_bit(0, &fdc_busy)) {

View File

@ -407,10 +407,10 @@ static void do_nbd_request(request_queue_t * q)
struct nbd_device *lo; struct nbd_device *lo;
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%lx)\n", dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
req->rq_disk->disk_name, req, req->flags); req->rq_disk->disk_name, req, req->cmd_type);
if (!(req->flags & REQ_CMD)) if (!blk_fs_request(req))
goto error_out; goto error_out;
lo = req->rq_disk->private_data; lo = req->rq_disk->private_data;
@ -489,7 +489,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
switch (cmd) { switch (cmd) {
case NBD_DISCONNECT: case NBD_DISCONNECT:
printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name); printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name);
sreq.flags = REQ_SPECIAL; sreq.cmd_type = REQ_TYPE_SPECIAL;
nbd_cmd(&sreq) = NBD_CMD_DISC; nbd_cmd(&sreq) = NBD_CMD_DISC;
/* /*
* Set these to sane values in case server implementation * Set these to sane values in case server implementation

View File

@ -437,7 +437,7 @@ static char *pd_buf; /* buffer for request in progress */
static enum action do_pd_io_start(void) static enum action do_pd_io_start(void)
{ {
if (pd_req->flags & REQ_SPECIAL) { if (blk_special_request(pd_req)) {
phase = pd_special; phase = pd_special;
return pd_special(); return pd_special();
} }

View File

@ -365,16 +365,16 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
rq->sense = sense; rq->sense = sense;
memset(sense, 0, sizeof(sense)); memset(sense, 0, sizeof(sense));
rq->sense_len = 0; rq->sense_len = 0;
rq->flags |= REQ_BLOCK_PC | REQ_HARDBARRIER; rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_HARDBARRIER;
if (cgc->quiet) if (cgc->quiet)
rq->flags |= REQ_QUIET; rq->cmd_flags |= REQ_QUIET;
memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
if (sizeof(rq->cmd) > CDROM_PACKET_SIZE) if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE); memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
rq->ref_count++; rq->ref_count++;
rq->flags |= REQ_NOMERGE;
rq->waiting = &wait; rq->waiting = &wait;
rq->end_io = blk_end_sync_rq; rq->end_io = blk_end_sync_rq;
elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1); elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);

View File

@ -313,7 +313,7 @@ static void do_xd_request (request_queue_t * q)
int res = 0; int res = 0;
int retry; int retry;
if (!(req->flags & REQ_CMD)) { if (!blk_fs_request(req)) {
end_request(req, 0); end_request(req, 0);
continue; continue;
} }

View File

@ -2129,7 +2129,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
rq->cmd[9] = 0xf8; rq->cmd[9] = 0xf8;
rq->cmd_len = 12; rq->cmd_len = 12;
rq->flags |= REQ_BLOCK_PC; rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->timeout = 60 * HZ; rq->timeout = 60 * HZ;
bio = rq->bio; bio = rq->bio;

View File

@ -1338,8 +1338,10 @@ static void do_cdu31a_request(request_queue_t * q)
} }
/* WTF??? */ /* WTF??? */
if (!(req->flags & REQ_CMD)) if (!blk_fs_request(req)) {
end_request(req, 0);
continue; continue;
}
if (rq_data_dir(req) == WRITE) { if (rq_data_dir(req) == WRITE) {
end_request(req, 0); end_request(req, 0);
continue; continue;

View File

@ -372,7 +372,7 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
{ {
int log = 0; int log = 0;
if (!sense || !rq || (rq->flags & REQ_QUIET)) if (!sense || !rq || (rq->cmd_flags & REQ_QUIET))
return 0; return 0;
switch (sense->sense_key) { switch (sense->sense_key) {
@ -597,7 +597,7 @@ static void cdrom_prepare_request(ide_drive_t *drive, struct request *rq)
struct cdrom_info *cd = drive->driver_data; struct cdrom_info *cd = drive->driver_data;
ide_init_drive_cmd(rq); ide_init_drive_cmd(rq);
rq->flags = REQ_PC; rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->rq_disk = cd->disk; rq->rq_disk = cd->disk;
} }
@ -617,7 +617,7 @@ static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
rq->cmd[0] = GPCMD_REQUEST_SENSE; rq->cmd[0] = GPCMD_REQUEST_SENSE;
rq->cmd[4] = rq->data_len = 18; rq->cmd[4] = rq->data_len = 18;
rq->flags = REQ_SENSE; rq->cmd_type = REQ_TYPE_SENSE;
/* NOTE! Save the failed command in "rq->buffer" */ /* NOTE! Save the failed command in "rq->buffer" */
rq->buffer = (void *) failed_command; rq->buffer = (void *) failed_command;
@ -630,10 +630,10 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
struct request *rq = HWGROUP(drive)->rq; struct request *rq = HWGROUP(drive)->rq;
int nsectors = rq->hard_cur_sectors; int nsectors = rq->hard_cur_sectors;
if ((rq->flags & REQ_SENSE) && uptodate) { if (blk_sense_request(rq) && uptodate) {
/* /*
* For REQ_SENSE, "rq->buffer" points to the original failed * For REQ_TYPE_SENSE, "rq->buffer" points to the original
* request * failed request
*/ */
struct request *failed = (struct request *) rq->buffer; struct request *failed = (struct request *) rq->buffer;
struct cdrom_info *info = drive->driver_data; struct cdrom_info *info = drive->driver_data;
@ -706,17 +706,17 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
return 1; return 1;
} }
if (rq->flags & REQ_SENSE) { if (blk_sense_request(rq)) {
/* We got an error trying to get sense info /* We got an error trying to get sense info
from the drive (probably while trying from the drive (probably while trying
to recover from a former error). Just give up. */ to recover from a former error). Just give up. */
rq->flags |= REQ_FAILED; rq->cmd_flags |= REQ_FAILED;
cdrom_end_request(drive, 0); cdrom_end_request(drive, 0);
ide_error(drive, "request sense failure", stat); ide_error(drive, "request sense failure", stat);
return 1; return 1;
} else if (rq->flags & (REQ_PC | REQ_BLOCK_PC)) { } else if (blk_pc_request(rq)) {
/* All other functions, except for READ. */ /* All other functions, except for READ. */
unsigned long flags; unsigned long flags;
@ -724,7 +724,7 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
* if we have an error, pass back CHECK_CONDITION as the * if we have an error, pass back CHECK_CONDITION as the
* scsi status byte * scsi status byte
*/ */
if ((rq->flags & REQ_BLOCK_PC) && !rq->errors) if (!rq->errors)
rq->errors = SAM_STAT_CHECK_CONDITION; rq->errors = SAM_STAT_CHECK_CONDITION;
/* Check for tray open. */ /* Check for tray open. */
@ -735,12 +735,12 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
cdrom_saw_media_change (drive); cdrom_saw_media_change (drive);
/*printk("%s: media changed\n",drive->name);*/ /*printk("%s: media changed\n",drive->name);*/
return 0; return 0;
} else if (!(rq->flags & REQ_QUIET)) { } else if (!(rq->cmd_flags & REQ_QUIET)) {
/* Otherwise, print an error. */ /* Otherwise, print an error. */
ide_dump_status(drive, "packet command error", stat); ide_dump_status(drive, "packet command error", stat);
} }
rq->flags |= REQ_FAILED; rq->cmd_flags |= REQ_FAILED;
/* /*
* instead of playing games with moving completions around, * instead of playing games with moving completions around,
@ -881,7 +881,7 @@ static int cdrom_timer_expiry(ide_drive_t *drive)
wait = ATAPI_WAIT_PC; wait = ATAPI_WAIT_PC;
break; break;
default: default:
if (!(rq->flags & REQ_QUIET)) if (!(rq->cmd_flags & REQ_QUIET))
printk(KERN_INFO "ide-cd: cmd 0x%x timed out\n", rq->cmd[0]); printk(KERN_INFO "ide-cd: cmd 0x%x timed out\n", rq->cmd[0]);
wait = 0; wait = 0;
break; break;
@ -1124,7 +1124,7 @@ static ide_startstop_t cdrom_read_intr (ide_drive_t *drive)
if (rq->current_nr_sectors > 0) { if (rq->current_nr_sectors > 0) {
printk (KERN_ERR "%s: cdrom_read_intr: data underrun (%d blocks)\n", printk (KERN_ERR "%s: cdrom_read_intr: data underrun (%d blocks)\n",
drive->name, rq->current_nr_sectors); drive->name, rq->current_nr_sectors);
rq->flags |= REQ_FAILED; rq->cmd_flags |= REQ_FAILED;
cdrom_end_request(drive, 0); cdrom_end_request(drive, 0);
} else } else
cdrom_end_request(drive, 1); cdrom_end_request(drive, 1);
@ -1456,7 +1456,7 @@ static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive)
printk ("%s: cdrom_pc_intr: data underrun %d\n", printk ("%s: cdrom_pc_intr: data underrun %d\n",
drive->name, pc->buflen); drive->name, pc->buflen);
*/ */
rq->flags |= REQ_FAILED; rq->cmd_flags |= REQ_FAILED;
cdrom_end_request(drive, 0); cdrom_end_request(drive, 0);
} }
return ide_stopped; return ide_stopped;
@ -1509,7 +1509,7 @@ static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive)
rq->data += thislen; rq->data += thislen;
rq->data_len -= thislen; rq->data_len -= thislen;
if (rq->flags & REQ_SENSE) if (blk_sense_request(rq))
rq->sense_len += thislen; rq->sense_len += thislen;
} else { } else {
confused: confused:
@ -1517,7 +1517,7 @@ confused:
"appears confused (ireason = 0x%02x). " "appears confused (ireason = 0x%02x). "
"Trying to recover by ending request.\n", "Trying to recover by ending request.\n",
drive->name, ireason); drive->name, ireason);
rq->flags |= REQ_FAILED; rq->cmd_flags |= REQ_FAILED;
cdrom_end_request(drive, 0); cdrom_end_request(drive, 0);
return ide_stopped; return ide_stopped;
} }
@ -1546,7 +1546,7 @@ static ide_startstop_t cdrom_do_packet_command (ide_drive_t *drive)
struct cdrom_info *info = drive->driver_data; struct cdrom_info *info = drive->driver_data;
info->dma = 0; info->dma = 0;
rq->flags &= ~REQ_FAILED; rq->cmd_flags &= ~REQ_FAILED;
len = rq->data_len; len = rq->data_len;
/* Start sending the command to the drive. */ /* Start sending the command to the drive. */
@ -1558,7 +1558,7 @@ static int cdrom_queue_packet_command(ide_drive_t *drive, struct request *rq)
{ {
struct request_sense sense; struct request_sense sense;
int retries = 10; int retries = 10;
unsigned int flags = rq->flags; unsigned int flags = rq->cmd_flags;
if (rq->sense == NULL) if (rq->sense == NULL)
rq->sense = &sense; rq->sense = &sense;
@ -1567,14 +1567,14 @@ static int cdrom_queue_packet_command(ide_drive_t *drive, struct request *rq)
do { do {
int error; int error;
unsigned long time = jiffies; unsigned long time = jiffies;
rq->flags = flags; rq->cmd_flags = flags;
error = ide_do_drive_cmd(drive, rq, ide_wait); error = ide_do_drive_cmd(drive, rq, ide_wait);
time = jiffies - time; time = jiffies - time;
/* FIXME: we should probably abort/retry or something /* FIXME: we should probably abort/retry or something
* in case of failure */ * in case of failure */
if (rq->flags & REQ_FAILED) { if (rq->cmd_flags & REQ_FAILED) {
/* The request failed. Retry if it was due to a unit /* The request failed. Retry if it was due to a unit
attention status attention status
(usually means media was changed). */ (usually means media was changed). */
@ -1596,10 +1596,10 @@ static int cdrom_queue_packet_command(ide_drive_t *drive, struct request *rq)
} }
/* End of retry loop. */ /* End of retry loop. */
} while ((rq->flags & REQ_FAILED) && retries >= 0); } while ((rq->cmd_flags & REQ_FAILED) && retries >= 0);
/* Return an error if the command failed. */ /* Return an error if the command failed. */
return (rq->flags & REQ_FAILED) ? -EIO : 0; return (rq->cmd_flags & REQ_FAILED) ? -EIO : 0;
} }
/* /*
@ -1963,7 +1963,7 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
{ {
struct cdrom_info *info = drive->driver_data; struct cdrom_info *info = drive->driver_data;
rq->flags |= REQ_QUIET; rq->cmd_flags |= REQ_QUIET;
info->dma = 0; info->dma = 0;
@ -2023,11 +2023,11 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
} }
info->last_block = block; info->last_block = block;
return action; return action;
} else if (rq->flags & (REQ_PC | REQ_SENSE)) { } else if (rq->cmd_type == REQ_TYPE_SENSE) {
return cdrom_do_packet_command(drive); return cdrom_do_packet_command(drive);
} else if (rq->flags & REQ_BLOCK_PC) { } else if (blk_pc_request(rq)) {
return cdrom_do_block_pc(drive, rq); return cdrom_do_block_pc(drive, rq);
} else if (rq->flags & REQ_SPECIAL) { } else if (blk_special_request(rq)) {
/* /*
* right now this can only be a reset... * right now this can only be a reset...
*/ */
@ -2105,7 +2105,7 @@ static int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
req.sense = sense; req.sense = sense;
req.cmd[0] = GPCMD_TEST_UNIT_READY; req.cmd[0] = GPCMD_TEST_UNIT_READY;
req.flags |= REQ_QUIET; req.cmd_flags |= REQ_QUIET;
#if ! STANDARD_ATAPI #if ! STANDARD_ATAPI
/* the Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to /* the Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to
@ -2207,7 +2207,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
req.cmd[0] = GPCMD_READ_CDVD_CAPACITY; req.cmd[0] = GPCMD_READ_CDVD_CAPACITY;
req.data = (char *)&capbuf; req.data = (char *)&capbuf;
req.data_len = sizeof(capbuf); req.data_len = sizeof(capbuf);
req.flags |= REQ_QUIET; req.cmd_flags |= REQ_QUIET;
stat = cdrom_queue_packet_command(drive, &req); stat = cdrom_queue_packet_command(drive, &req);
if (stat == 0) { if (stat == 0) {
@ -2230,7 +2230,7 @@ static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
req.sense = sense; req.sense = sense;
req.data = buf; req.data = buf;
req.data_len = buflen; req.data_len = buflen;
req.flags |= REQ_QUIET; req.cmd_flags |= REQ_QUIET;
req.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; req.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
req.cmd[6] = trackno; req.cmd[6] = trackno;
req.cmd[7] = (buflen >> 8); req.cmd[7] = (buflen >> 8);
@ -2531,7 +2531,7 @@ static int ide_cdrom_packet(struct cdrom_device_info *cdi,
req.timeout = cgc->timeout; req.timeout = cgc->timeout;
if (cgc->quiet) if (cgc->quiet)
req.flags |= REQ_QUIET; req.cmd_flags |= REQ_QUIET;
req.sense = cgc->sense; req.sense = cgc->sense;
cgc->stat = cdrom_queue_packet_command(drive, &req); cgc->stat = cdrom_queue_packet_command(drive, &req);
@ -2629,7 +2629,8 @@ int ide_cdrom_reset (struct cdrom_device_info *cdi)
int ret; int ret;
cdrom_prepare_request(drive, &req); cdrom_prepare_request(drive, &req);
req.flags = REQ_SPECIAL | REQ_QUIET; req.cmd_type = REQ_TYPE_SPECIAL;
req.cmd_flags = REQ_QUIET;
ret = ide_do_drive_cmd(drive, &req, ide_wait); ret = ide_do_drive_cmd(drive, &req, ide_wait);
/* /*
@ -3116,9 +3117,9 @@ static int ide_cdrom_prep_pc(struct request *rq)
static int ide_cdrom_prep_fn(request_queue_t *q, struct request *rq) static int ide_cdrom_prep_fn(request_queue_t *q, struct request *rq)
{ {
if (rq->flags & REQ_CMD) if (blk_fs_request(rq))
return ide_cdrom_prep_fs(q, rq); return ide_cdrom_prep_fs(q, rq);
else if (rq->flags & REQ_BLOCK_PC) else if (blk_pc_request(rq))
return ide_cdrom_prep_pc(rq); return ide_cdrom_prep_pc(rq);
return 0; return 0;

View File

@ -699,7 +699,8 @@ static void idedisk_prepare_flush(request_queue_t *q, struct request *rq)
rq->cmd[0] = WIN_FLUSH_CACHE; rq->cmd[0] = WIN_FLUSH_CACHE;
rq->flags |= REQ_DRIVE_TASK; rq->cmd_type = REQ_TYPE_ATA_TASK;
rq->cmd_flags |= REQ_SOFTBARRIER;
rq->buffer = rq->cmd; rq->buffer = rq->cmd;
} }
@ -740,7 +741,7 @@ static int set_multcount(ide_drive_t *drive, int arg)
if (drive->special.b.set_multmode) if (drive->special.b.set_multmode)
return -EBUSY; return -EBUSY;
ide_init_drive_cmd (&rq); ide_init_drive_cmd (&rq);
rq.flags = REQ_DRIVE_CMD; rq.cmd_type = REQ_TYPE_ATA_CMD;
drive->mult_req = arg; drive->mult_req = arg;
drive->special.b.set_multmode = 1; drive->special.b.set_multmode = 1;
(void) ide_do_drive_cmd (drive, &rq, ide_wait); (void) ide_do_drive_cmd (drive, &rq, ide_wait);

View File

@ -205,7 +205,7 @@ int ide_build_sglist(ide_drive_t *drive, struct request *rq)
ide_hwif_t *hwif = HWIF(drive); ide_hwif_t *hwif = HWIF(drive);
struct scatterlist *sg = hwif->sg_table; struct scatterlist *sg = hwif->sg_table;
BUG_ON((rq->flags & REQ_DRIVE_TASKFILE) && rq->nr_sectors > 256); BUG_ON((rq->cmd_type == REQ_TYPE_ATA_TASKFILE) && rq->nr_sectors > 256);
ide_map_sg(drive, rq); ide_map_sg(drive, rq);

View File

@ -588,7 +588,7 @@ static int idefloppy_do_end_request(ide_drive_t *drive, int uptodate, int nsecs)
/* Why does this happen? */ /* Why does this happen? */
if (!rq) if (!rq)
return 0; return 0;
if (!(rq->flags & REQ_SPECIAL)) { //if (!IDEFLOPPY_RQ_CMD (rq->cmd)) { if (!blk_special_request(rq)) {
/* our real local end request function */ /* our real local end request function */
ide_end_request(drive, uptodate, nsecs); ide_end_request(drive, uptodate, nsecs);
return 0; return 0;
@ -689,7 +689,7 @@ static void idefloppy_queue_pc_head (ide_drive_t *drive,idefloppy_pc_t *pc,struc
ide_init_drive_cmd(rq); ide_init_drive_cmd(rq);
rq->buffer = (char *) pc; rq->buffer = (char *) pc;
rq->flags = REQ_SPECIAL; //rq->cmd = IDEFLOPPY_PC_RQ; rq->cmd_type = REQ_TYPE_SPECIAL;
rq->rq_disk = floppy->disk; rq->rq_disk = floppy->disk;
(void) ide_do_drive_cmd(drive, rq, ide_preempt); (void) ide_do_drive_cmd(drive, rq, ide_preempt);
} }
@ -1250,7 +1250,7 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t
pc->callback = &idefloppy_rw_callback; pc->callback = &idefloppy_rw_callback;
pc->rq = rq; pc->rq = rq;
pc->b_count = cmd == READ ? 0 : rq->bio->bi_size; pc->b_count = cmd == READ ? 0 : rq->bio->bi_size;
if (rq->flags & REQ_RW) if (rq->cmd_flags & REQ_RW)
set_bit(PC_WRITING, &pc->flags); set_bit(PC_WRITING, &pc->flags);
pc->buffer = NULL; pc->buffer = NULL;
pc->request_transfer = pc->buffer_size = blocks * floppy->block_size; pc->request_transfer = pc->buffer_size = blocks * floppy->block_size;
@ -1303,7 +1303,7 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request
idefloppy_do_end_request(drive, 0, 0); idefloppy_do_end_request(drive, 0, 0);
return ide_stopped; return ide_stopped;
} }
if (rq->flags & REQ_CMD) { if (blk_fs_request(rq)) {
if (((long)rq->sector % floppy->bs_factor) || if (((long)rq->sector % floppy->bs_factor) ||
(rq->nr_sectors % floppy->bs_factor)) { (rq->nr_sectors % floppy->bs_factor)) {
printk("%s: unsupported r/w request size\n", printk("%s: unsupported r/w request size\n",
@ -1313,9 +1313,9 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request
} }
pc = idefloppy_next_pc_storage(drive); pc = idefloppy_next_pc_storage(drive);
idefloppy_create_rw_cmd(floppy, pc, rq, block); idefloppy_create_rw_cmd(floppy, pc, rq, block);
} else if (rq->flags & REQ_SPECIAL) { } else if (blk_special_request(rq)) {
pc = (idefloppy_pc_t *) rq->buffer; pc = (idefloppy_pc_t *) rq->buffer;
} else if (rq->flags & REQ_BLOCK_PC) { } else if (blk_pc_request(rq)) {
pc = idefloppy_next_pc_storage(drive); pc = idefloppy_next_pc_storage(drive);
if (idefloppy_blockpc_cmd(floppy, pc, rq)) { if (idefloppy_blockpc_cmd(floppy, pc, rq)) {
idefloppy_do_end_request(drive, 0, 0); idefloppy_do_end_request(drive, 0, 0);
@ -1343,7 +1343,7 @@ static int idefloppy_queue_pc_tail (ide_drive_t *drive,idefloppy_pc_t *pc)
ide_init_drive_cmd (&rq); ide_init_drive_cmd (&rq);
rq.buffer = (char *) pc; rq.buffer = (char *) pc;
rq.flags = REQ_SPECIAL; // rq.cmd = IDEFLOPPY_PC_RQ; rq.cmd_type = REQ_TYPE_SPECIAL;
rq.rq_disk = floppy->disk; rq.rq_disk = floppy->disk;
return ide_do_drive_cmd(drive, &rq, ide_wait); return ide_do_drive_cmd(drive, &rq, ide_wait);

View File

@ -59,7 +59,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
{ {
int ret = 1; int ret = 1;
BUG_ON(!(rq->flags & REQ_STARTED)); BUG_ON(!blk_rq_started(rq));
/* /*
* if failfast is set on a request, override number of sectors and * if failfast is set on a request, override number of sectors and
@ -244,7 +244,7 @@ int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&ide_lock, flags);
BUG_ON(!(rq->flags & REQ_STARTED)); BUG_ON(!blk_rq_started(rq));
/* /*
* if failfast is set on a request, override number of sectors and * if failfast is set on a request, override number of sectors and
@ -366,7 +366,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
rq = HWGROUP(drive)->rq; rq = HWGROUP(drive)->rq;
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
if (rq->flags & REQ_DRIVE_CMD) { if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
u8 *args = (u8 *) rq->buffer; u8 *args = (u8 *) rq->buffer;
if (rq->errors == 0) if (rq->errors == 0)
rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
@ -376,7 +376,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
args[1] = err; args[1] = err;
args[2] = hwif->INB(IDE_NSECTOR_REG); args[2] = hwif->INB(IDE_NSECTOR_REG);
} }
} else if (rq->flags & REQ_DRIVE_TASK) { } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
u8 *args = (u8 *) rq->buffer; u8 *args = (u8 *) rq->buffer;
if (rq->errors == 0) if (rq->errors == 0)
rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
@ -390,7 +390,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
args[5] = hwif->INB(IDE_HCYL_REG); args[5] = hwif->INB(IDE_HCYL_REG);
args[6] = hwif->INB(IDE_SELECT_REG); args[6] = hwif->INB(IDE_SELECT_REG);
} }
} else if (rq->flags & REQ_DRIVE_TASKFILE) { } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
ide_task_t *args = (ide_task_t *) rq->special; ide_task_t *args = (ide_task_t *) rq->special;
if (rq->errors == 0) if (rq->errors == 0)
rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
@ -587,7 +587,7 @@ ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat)
return ide_stopped; return ide_stopped;
/* retry only "normal" I/O: */ /* retry only "normal" I/O: */
if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK | REQ_DRIVE_TASKFILE)) { if (!blk_fs_request(rq)) {
rq->errors = 1; rq->errors = 1;
ide_end_drive_cmd(drive, stat, err); ide_end_drive_cmd(drive, stat, err);
return ide_stopped; return ide_stopped;
@ -638,7 +638,7 @@ ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg)
return ide_stopped; return ide_stopped;
/* retry only "normal" I/O: */ /* retry only "normal" I/O: */
if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK | REQ_DRIVE_TASKFILE)) { if (!blk_fs_request(rq)) {
rq->errors = 1; rq->errors = 1;
ide_end_drive_cmd(drive, BUSY_STAT, 0); ide_end_drive_cmd(drive, BUSY_STAT, 0);
return ide_stopped; return ide_stopped;
@ -808,7 +808,7 @@ void ide_map_sg(ide_drive_t *drive, struct request *rq)
if (hwif->sg_mapped) /* needed by ide-scsi */ if (hwif->sg_mapped) /* needed by ide-scsi */
return; return;
if ((rq->flags & REQ_DRIVE_TASKFILE) == 0) { if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) {
hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
} else { } else {
sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
@ -844,7 +844,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
struct request *rq) struct request *rq)
{ {
ide_hwif_t *hwif = HWIF(drive); ide_hwif_t *hwif = HWIF(drive);
if (rq->flags & REQ_DRIVE_TASKFILE) { if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
ide_task_t *args = rq->special; ide_task_t *args = rq->special;
if (!args) if (!args)
@ -866,7 +866,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
if (args->tf_out_flags.all != 0) if (args->tf_out_flags.all != 0)
return flagged_taskfile(drive, args); return flagged_taskfile(drive, args);
return do_rw_taskfile(drive, args); return do_rw_taskfile(drive, args);
} else if (rq->flags & REQ_DRIVE_TASK) { } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
u8 *args = rq->buffer; u8 *args = rq->buffer;
u8 sel; u8 sel;
@ -892,7 +892,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
hwif->OUTB(sel, IDE_SELECT_REG); hwif->OUTB(sel, IDE_SELECT_REG);
ide_cmd(drive, args[0], args[2], &drive_cmd_intr); ide_cmd(drive, args[0], args[2], &drive_cmd_intr);
return ide_started; return ide_started;
} else if (rq->flags & REQ_DRIVE_CMD) { } else if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
u8 *args = rq->buffer; u8 *args = rq->buffer;
if (!args) if (!args)
@ -980,7 +980,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
ide_startstop_t startstop; ide_startstop_t startstop;
sector_t block; sector_t block;
BUG_ON(!(rq->flags & REQ_STARTED)); BUG_ON(!blk_rq_started(rq));
#ifdef DEBUG #ifdef DEBUG
printk("%s: start_request: current=0x%08lx\n", printk("%s: start_request: current=0x%08lx\n",
@ -1013,9 +1013,9 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
if (!drive->special.all) { if (!drive->special.all) {
ide_driver_t *drv; ide_driver_t *drv;
if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK)) if (rq->cmd_type == REQ_TYPE_ATA_CMD ||
return execute_drive_cmd(drive, rq); rq->cmd_type == REQ_TYPE_ATA_TASK ||
else if (rq->flags & REQ_DRIVE_TASKFILE) rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
return execute_drive_cmd(drive, rq); return execute_drive_cmd(drive, rq);
else if (blk_pm_request(rq)) { else if (blk_pm_request(rq)) {
struct request_pm_state *pm = rq->end_io_data; struct request_pm_state *pm = rq->end_io_data;
@ -1264,7 +1264,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
* We count how many times we loop here to make sure we service * We count how many times we loop here to make sure we service
* all drives in the hwgroup without looping for ever * all drives in the hwgroup without looping for ever
*/ */
if (drive->blocked && !blk_pm_request(rq) && !(rq->flags & REQ_PREEMPT)) { if (drive->blocked && !blk_pm_request(rq) && !(rq->cmd_flags & REQ_PREEMPT)) {
drive = drive->next ? drive->next : hwgroup->drive; drive = drive->next ? drive->next : hwgroup->drive;
if (loops++ < 4 && !blk_queue_plugged(drive->queue)) if (loops++ < 4 && !blk_queue_plugged(drive->queue))
goto again; goto again;
@ -1670,7 +1670,7 @@ irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs)
void ide_init_drive_cmd (struct request *rq) void ide_init_drive_cmd (struct request *rq)
{ {
memset(rq, 0, sizeof(*rq)); memset(rq, 0, sizeof(*rq));
rq->flags = REQ_DRIVE_CMD; rq->cmd_type = REQ_TYPE_ATA_CMD;
rq->ref_count = 1; rq->ref_count = 1;
} }
@ -1727,7 +1727,7 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
hwgroup->rq = NULL; hwgroup->rq = NULL;
if (action == ide_preempt || action == ide_head_wait) { if (action == ide_preempt || action == ide_head_wait) {
where = ELEVATOR_INSERT_FRONT; where = ELEVATOR_INSERT_FRONT;
rq->flags |= REQ_PREEMPT; rq->cmd_flags |= REQ_PREEMPT;
} }
__elv_add_request(drive->queue, rq, where, 0); __elv_add_request(drive->queue, rq, where, 0);
ide_do_request(hwgroup, IDE_NO_IRQ); ide_do_request(hwgroup, IDE_NO_IRQ);

View File

@ -456,13 +456,14 @@ static void ide_dump_opcode(ide_drive_t *drive)
spin_unlock(&ide_lock); spin_unlock(&ide_lock);
if (!rq) if (!rq)
return; return;
if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK)) { if (rq->cmd_type == REQ_TYPE_ATA_CMD ||
rq->cmd_type == REQ_TYPE_ATA_TASK) {
char *args = rq->buffer; char *args = rq->buffer;
if (args) { if (args) {
opcode = args[0]; opcode = args[0];
found = 1; found = 1;
} }
} else if (rq->flags & REQ_DRIVE_TASKFILE) { } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
ide_task_t *args = rq->special; ide_task_t *args = rq->special;
if (args) { if (args) {
task_struct_t *tf = (task_struct_t *) args->tfRegister; task_struct_t *tf = (task_struct_t *) args->tfRegister;

View File

@ -1776,7 +1776,7 @@ static void idetape_create_request_sense_cmd (idetape_pc_t *pc)
static void idetape_init_rq(struct request *rq, u8 cmd) static void idetape_init_rq(struct request *rq, u8 cmd)
{ {
memset(rq, 0, sizeof(*rq)); memset(rq, 0, sizeof(*rq));
rq->flags = REQ_SPECIAL; rq->cmd_type = REQ_TYPE_SPECIAL;
rq->cmd[0] = cmd; rq->cmd[0] = cmd;
} }
@ -2433,12 +2433,12 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
rq->sector, rq->nr_sectors, rq->current_nr_sectors); rq->sector, rq->nr_sectors, rq->current_nr_sectors);
#endif /* IDETAPE_DEBUG_LOG */ #endif /* IDETAPE_DEBUG_LOG */
if ((rq->flags & REQ_SPECIAL) == 0) { if (!blk_special_request(rq)) {
/* /*
* We do not support buffer cache originated requests. * We do not support buffer cache originated requests.
*/ */
printk(KERN_NOTICE "ide-tape: %s: Unsupported request in " printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
"request queue (%ld)\n", drive->name, rq->flags); "request queue (%d)\n", drive->name, rq->cmd_type);
ide_end_request(drive, 0, 0); ide_end_request(drive, 0, 0);
return ide_stopped; return ide_stopped;
} }
@ -2768,7 +2768,7 @@ static void idetape_wait_for_request (ide_drive_t *drive, struct request *rq)
idetape_tape_t *tape = drive->driver_data; idetape_tape_t *tape = drive->driver_data;
#if IDETAPE_DEBUG_BUGS #if IDETAPE_DEBUG_BUGS
if (rq == NULL || (rq->flags & REQ_SPECIAL) == 0) { if (rq == NULL || !blk_special_request(rq)) {
printk (KERN_ERR "ide-tape: bug: Trying to sleep on non-valid request\n"); printk (KERN_ERR "ide-tape: bug: Trying to sleep on non-valid request\n");
return; return;
} }

View File

@ -363,7 +363,7 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat) static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
{ {
if (rq->flags & REQ_DRIVE_TASKFILE) { if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
ide_task_t *task = rq->special; ide_task_t *task = rq->special;
if (task->tf_out_flags.all) { if (task->tf_out_flags.all) {
@ -474,7 +474,7 @@ static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long
struct request rq; struct request rq;
memset(&rq, 0, sizeof(rq)); memset(&rq, 0, sizeof(rq));
rq.flags = REQ_DRIVE_TASKFILE; rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
rq.buffer = buf; rq.buffer = buf;
/* /*
@ -499,7 +499,7 @@ static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long
rq.hard_cur_sectors = rq.current_nr_sectors = rq.nr_sectors; rq.hard_cur_sectors = rq.current_nr_sectors = rq.nr_sectors;
if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE) if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
rq.flags |= REQ_RW; rq.cmd_flags |= REQ_RW;
} }
rq.special = args; rq.special = args;
@ -737,7 +737,7 @@ static int ide_wait_cmd_task(ide_drive_t *drive, u8 *buf)
struct request rq; struct request rq;
ide_init_drive_cmd(&rq); ide_init_drive_cmd(&rq);
rq.flags = REQ_DRIVE_TASK; rq.cmd_type = REQ_TYPE_ATA_TASK;
rq.buffer = buf; rq.buffer = buf;
return ide_do_drive_cmd(drive, &rq, ide_wait); return ide_do_drive_cmd(drive, &rq, ide_wait);
} }

View File

@ -1217,7 +1217,7 @@ static int generic_ide_suspend(struct device *dev, pm_message_t mesg)
memset(&rq, 0, sizeof(rq)); memset(&rq, 0, sizeof(rq));
memset(&rqpm, 0, sizeof(rqpm)); memset(&rqpm, 0, sizeof(rqpm));
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
rq.flags = REQ_PM_SUSPEND; rq.cmd_type = REQ_TYPE_PM_SUSPEND;
rq.special = &args; rq.special = &args;
rq.end_io_data = &rqpm; rq.end_io_data = &rqpm;
rqpm.pm_step = ide_pm_state_start_suspend; rqpm.pm_step = ide_pm_state_start_suspend;
@ -1238,7 +1238,7 @@ static int generic_ide_resume(struct device *dev)
memset(&rq, 0, sizeof(rq)); memset(&rq, 0, sizeof(rq));
memset(&rqpm, 0, sizeof(rqpm)); memset(&rqpm, 0, sizeof(rqpm));
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
rq.flags = REQ_PM_RESUME; rq.cmd_type = REQ_TYPE_PM_RESUME;
rq.special = &args; rq.special = &args;
rq.end_io_data = &rqpm; rq.end_io_data = &rqpm;
rqpm.pm_step = ide_pm_state_start_resume; rqpm.pm_step = ide_pm_state_start_resume;

View File

@ -626,7 +626,7 @@ repeat:
req->rq_disk->disk_name, (req->cmd == READ)?"read":"writ", req->rq_disk->disk_name, (req->cmd == READ)?"read":"writ",
cyl, head, sec, nsect, req->buffer); cyl, head, sec, nsect, req->buffer);
#endif #endif
if (req->flags & REQ_CMD) { if (blk_fs_request(req)) {
switch (rq_data_dir(req)) { switch (rq_data_dir(req)) {
case READ: case READ:
hd_out(disk,nsect,sec,head,cyl,WIN_READ,&read_intr); hd_out(disk,nsect,sec,head,cyl,WIN_READ,&read_intr);

View File

@ -126,7 +126,8 @@ static struct request *get_failover_req(struct emc_handler *h,
memset(&rq->cmd, 0, BLK_MAX_CDB); memset(&rq->cmd, 0, BLK_MAX_CDB);
rq->timeout = EMC_FAILOVER_TIMEOUT; rq->timeout = EMC_FAILOVER_TIMEOUT;
rq->flags |= (REQ_BLOCK_PC | REQ_FAILFAST | REQ_NOMERGE); rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
return rq; return rq;
} }

View File

@ -390,9 +390,9 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
} }
/* request is already processed by us, so return */ /* request is already processed by us, so return */
if (req->flags & REQ_SPECIAL) { if (blk_special_request(req)) {
osm_debug("REQ_SPECIAL already set!\n"); osm_debug("REQ_SPECIAL already set!\n");
req->flags |= REQ_DONTPREP; req->cmd_flags |= REQ_DONTPREP;
return BLKPREP_OK; return BLKPREP_OK;
} }
@ -411,7 +411,8 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
ireq = req->special; ireq = req->special;
/* do not come back here */ /* do not come back here */
req->flags |= REQ_DONTPREP | REQ_SPECIAL; req->cmd_type = REQ_TYPE_SPECIAL;
req->cmd_flags |= REQ_DONTPREP;
return BLKPREP_OK; return BLKPREP_OK;
}; };

View File

@ -28,7 +28,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
struct mmc_queue *mq = q->queuedata; struct mmc_queue *mq = q->queuedata;
int ret = BLKPREP_KILL; int ret = BLKPREP_KILL;
if (req->flags & REQ_SPECIAL) { if (blk_special_request(req)) {
/* /*
* Special commands already have the command * Special commands already have the command
* blocks already setup in req->special. * blocks already setup in req->special.
@ -36,7 +36,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
BUG_ON(!req->special); BUG_ON(!req->special);
ret = BLKPREP_OK; ret = BLKPREP_OK;
} else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { } else if (blk_fs_request(req) || blk_pc_request(req)) {
/* /*
* Block I/O requests need translating according * Block I/O requests need translating according
* to the protocol. * to the protocol.
@ -50,7 +50,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
} }
if (ret == BLKPREP_OK) if (ret == BLKPREP_OK)
req->flags |= REQ_DONTPREP; req->cmd_flags |= REQ_DONTPREP;
return ret; return ret;
} }

View File

@ -46,7 +46,7 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
nsect = req->current_nr_sectors; nsect = req->current_nr_sectors;
buf = req->buffer; buf = req->buffer;
if (!(req->flags & REQ_CMD)) if (!blk_fs_request(req))
return 0; return 0;
if (block + nsect > get_capacity(req->rq_disk)) if (block + nsect > get_capacity(req->rq_disk))

View File

@ -529,7 +529,7 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
} }
cqr->retries = DIAG_MAX_RETRIES; cqr->retries = DIAG_MAX_RETRIES;
cqr->buildclk = get_clock(); cqr->buildclk = get_clock();
if (req->flags & REQ_FAILFAST) if (req->cmd_flags & REQ_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->device = device; cqr->device = device;
cqr->expires = DIAG_TIMEOUT; cqr->expires = DIAG_TIMEOUT;

View File

@ -1266,7 +1266,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
recid++; recid++;
} }
} }
if (req->flags & REQ_FAILFAST) if (req->cmd_flags & REQ_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->device = device; cqr->device = device;
cqr->expires = 5 * 60 * HZ; /* 5 minutes */ cqr->expires = 5 * 60 * HZ; /* 5 minutes */

View File

@ -344,7 +344,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
recid++; recid++;
} }
} }
if (req->flags & REQ_FAILFAST) if (req->cmd_flags & REQ_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->device = device; cqr->device = device;
cqr->expires = 5 * 60 * HZ; /* 5 minutes */ cqr->expires = 5 * 60 * HZ; /* 5 minutes */

View File

@ -2862,7 +2862,7 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
aic_dev->r_total++; aic_dev->r_total++;
ptr = aic_dev->r_bins; ptr = aic_dev->r_bins;
} }
if(cmd->device->simple_tags && cmd->request->flags & REQ_HARDBARRIER) if(cmd->device->simple_tags && cmd->request->cmd_flags & REQ_HARDBARRIER)
{ {
aic_dev->barrier_total++; aic_dev->barrier_total++;
if(scb->tag_action == MSG_ORDERED_Q_TAG) if(scb->tag_action == MSG_ORDERED_Q_TAG)
@ -10158,7 +10158,7 @@ aic7xxx_buildscb(struct aic7xxx_host *p, Scsi_Cmnd *cmd,
/* We always force TEST_UNIT_READY to untagged */ /* We always force TEST_UNIT_READY to untagged */
if (cmd->cmnd[0] != TEST_UNIT_READY && sdptr->simple_tags) if (cmd->cmnd[0] != TEST_UNIT_READY && sdptr->simple_tags)
{ {
if (req->flags & REQ_HARDBARRIER) if (req->cmd_flags & REQ_HARDBARRIER)
{ {
if(sdptr->ordered_tags) if(sdptr->ordered_tags)
{ {

View File

@ -344,7 +344,7 @@ static int idescsi_check_condition(ide_drive_t *drive, struct request *failed_co
pc->buffer = buf; pc->buffer = buf;
pc->c[0] = REQUEST_SENSE; pc->c[0] = REQUEST_SENSE;
pc->c[4] = pc->request_transfer = pc->buffer_size = SCSI_SENSE_BUFFERSIZE; pc->c[4] = pc->request_transfer = pc->buffer_size = SCSI_SENSE_BUFFERSIZE;
rq->flags = REQ_SENSE; rq->cmd_type = REQ_TYPE_SENSE;
pc->timeout = jiffies + WAIT_READY; pc->timeout = jiffies + WAIT_READY;
/* NOTE! Save the failed packet command in "rq->buffer" */ /* NOTE! Save the failed packet command in "rq->buffer" */
rq->buffer = (void *) failed_command->special; rq->buffer = (void *) failed_command->special;
@ -398,12 +398,12 @@ static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs)
int errors = rq->errors; int errors = rq->errors;
unsigned long flags; unsigned long flags;
if (!(rq->flags & (REQ_SPECIAL|REQ_SENSE))) { if (!blk_special_request(rq) && !blk_sense_request(rq)) {
ide_end_request(drive, uptodate, nrsecs); ide_end_request(drive, uptodate, nrsecs);
return 0; return 0;
} }
ide_end_drive_cmd (drive, 0, 0); ide_end_drive_cmd (drive, 0, 0);
if (rq->flags & REQ_SENSE) { if (blk_sense_request(rq)) {
idescsi_pc_t *opc = (idescsi_pc_t *) rq->buffer; idescsi_pc_t *opc = (idescsi_pc_t *) rq->buffer;
if (log) { if (log) {
printk ("ide-scsi: %s: wrap up check %lu, rst = ", drive->name, opc->scsi_cmd->serial_number); printk ("ide-scsi: %s: wrap up check %lu, rst = ", drive->name, opc->scsi_cmd->serial_number);
@ -712,7 +712,7 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r
printk (KERN_INFO "sector: %ld, nr_sectors: %ld, current_nr_sectors: %d\n",rq->sector,rq->nr_sectors,rq->current_nr_sectors); printk (KERN_INFO "sector: %ld, nr_sectors: %ld, current_nr_sectors: %d\n",rq->sector,rq->nr_sectors,rq->current_nr_sectors);
#endif /* IDESCSI_DEBUG_LOG */ #endif /* IDESCSI_DEBUG_LOG */
if (rq->flags & (REQ_SPECIAL|REQ_SENSE)) { if (blk_sense_request(rq) || blk_special_request(rq)) {
return idescsi_issue_pc (drive, (idescsi_pc_t *) rq->special); return idescsi_issue_pc (drive, (idescsi_pc_t *) rq->special);
} }
blk_dump_rq_flags(rq, "ide-scsi: unsup command"); blk_dump_rq_flags(rq, "ide-scsi: unsup command");
@ -938,7 +938,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
ide_init_drive_cmd (rq); ide_init_drive_cmd (rq);
rq->special = (char *) pc; rq->special = (char *) pc;
rq->flags = REQ_SPECIAL; rq->cmd_type = REQ_TYPE_SPECIAL;
spin_unlock_irq(host->host_lock); spin_unlock_irq(host->host_lock);
rq->rq_disk = scsi->disk; rq->rq_disk = scsi->disk;
(void) ide_do_drive_cmd (drive, rq, ide_end); (void) ide_do_drive_cmd (drive, rq, ide_end);
@ -992,7 +992,7 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
*/ */
printk (KERN_ERR "ide-scsi: cmd aborted!\n"); printk (KERN_ERR "ide-scsi: cmd aborted!\n");
if (scsi->pc->rq->flags & REQ_SENSE) if (blk_sense_request(scsi->pc->rq))
kfree(scsi->pc->buffer); kfree(scsi->pc->buffer);
kfree(scsi->pc->rq); kfree(scsi->pc->rq);
kfree(scsi->pc); kfree(scsi->pc);
@ -1042,7 +1042,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
/* kill current request */ /* kill current request */
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
end_that_request_last(req, 0); end_that_request_last(req, 0);
if (req->flags & REQ_SENSE) if (blk_sense_request(req))
kfree(scsi->pc->buffer); kfree(scsi->pc->buffer);
kfree(scsi->pc); kfree(scsi->pc);
scsi->pc = NULL; scsi->pc = NULL;

View File

@ -67,7 +67,6 @@ static void __init pluto_detect_done(Scsi_Cmnd *SCpnt)
static void __init pluto_detect_scsi_done(Scsi_Cmnd *SCpnt) static void __init pluto_detect_scsi_done(Scsi_Cmnd *SCpnt)
{ {
SCpnt->request->rq_status = RQ_SCSI_DONE;
PLND(("Detect done %08lx\n", (long)SCpnt)) PLND(("Detect done %08lx\n", (long)SCpnt))
if (atomic_dec_and_test (&fcss)) if (atomic_dec_and_test (&fcss))
up(&fc_sem); up(&fc_sem);
@ -166,7 +165,7 @@ int __init pluto_detect(struct scsi_host_template *tpnt)
SCpnt->cmd_len = COMMAND_SIZE(INQUIRY); SCpnt->cmd_len = COMMAND_SIZE(INQUIRY);
SCpnt->request->rq_status = RQ_SCSI_BUSY; SCpnt->request->cmd_flags &= ~REQ_STARTED;
SCpnt->done = pluto_detect_done; SCpnt->done = pluto_detect_done;
SCpnt->request_bufflen = 256; SCpnt->request_bufflen = 256;
@ -178,7 +177,8 @@ int __init pluto_detect(struct scsi_host_template *tpnt)
for (retry = 0; retry < 5; retry++) { for (retry = 0; retry < 5; retry++) {
for (i = 0; i < fcscount; i++) { for (i = 0; i < fcscount; i++) {
if (!fcs[i].fc) break; if (!fcs[i].fc) break;
if (fcs[i].cmd.request->rq_status != RQ_SCSI_DONE) { if (!(fcs[i].cmd.request->cmd_flags & REQ_STARTED)) {
fcs[i].cmd.request->cmd_flags |= REQ_STARTED;
disable_irq(fcs[i].fc->irq); disable_irq(fcs[i].fc->irq);
PLND(("queuecommand %d %d\n", retry, i)) PLND(("queuecommand %d %d\n", retry, i))
fcp_scsi_queuecommand (&(fcs[i].cmd), fcp_scsi_queuecommand (&(fcs[i].cmd),

View File

@ -82,7 +82,7 @@ static void scsi_unprep_request(struct request *req)
{ {
struct scsi_cmnd *cmd = req->special; struct scsi_cmnd *cmd = req->special;
req->flags &= ~REQ_DONTPREP; req->cmd_flags &= ~REQ_DONTPREP;
req->special = NULL; req->special = NULL;
scsi_put_command(cmd); scsi_put_command(cmd);
@ -196,7 +196,8 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
req->sense_len = 0; req->sense_len = 0;
req->retries = retries; req->retries = retries;
req->timeout = timeout; req->timeout = timeout;
req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET; req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
/* /*
* head injection *required* here otherwise quiesce won't work * head injection *required* here otherwise quiesce won't work
@ -397,7 +398,8 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
req = blk_get_request(sdev->request_queue, write, gfp); req = blk_get_request(sdev->request_queue, write, gfp);
if (!req) if (!req)
goto free_sense; goto free_sense;
req->flags |= REQ_BLOCK_PC | REQ_QUIET; req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_QUIET;
if (use_sg) if (use_sg)
err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
@ -933,7 +935,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
break; break;
} }
} }
if (!(req->flags & REQ_QUIET)) { if (!(req->cmd_flags & REQ_QUIET)) {
scmd_printk(KERN_INFO, cmd, scmd_printk(KERN_INFO, cmd,
"Device not ready: "); "Device not ready: ");
scsi_print_sense_hdr("", &sshdr); scsi_print_sense_hdr("", &sshdr);
@ -941,7 +943,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
scsi_end_request(cmd, 0, this_count, 1); scsi_end_request(cmd, 0, this_count, 1);
return; return;
case VOLUME_OVERFLOW: case VOLUME_OVERFLOW:
if (!(req->flags & REQ_QUIET)) { if (!(req->cmd_flags & REQ_QUIET)) {
scmd_printk(KERN_INFO, cmd, scmd_printk(KERN_INFO, cmd,
"Volume overflow, CDB: "); "Volume overflow, CDB: ");
__scsi_print_command(cmd->cmnd); __scsi_print_command(cmd->cmnd);
@ -963,7 +965,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
return; return;
} }
if (result) { if (result) {
if (!(req->flags & REQ_QUIET)) { if (!(req->cmd_flags & REQ_QUIET)) {
scmd_printk(KERN_INFO, cmd, scmd_printk(KERN_INFO, cmd,
"SCSI error: return code = 0x%08x\n", "SCSI error: return code = 0x%08x\n",
result); result);
@ -995,7 +997,7 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
/* /*
* if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
*/ */
if ((req->flags & REQ_BLOCK_PC) && !req->bio) { if (blk_pc_request(req) && !req->bio) {
cmd->request_bufflen = req->data_len; cmd->request_bufflen = req->data_len;
cmd->request_buffer = req->data; cmd->request_buffer = req->data;
req->buffer = req->data; req->buffer = req->data;
@ -1139,13 +1141,12 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
* these two cases differently. We differentiate by looking * these two cases differently. We differentiate by looking
* at request->cmd, as this tells us the real story. * at request->cmd, as this tells us the real story.
*/ */
if (req->flags & REQ_SPECIAL && req->special) { if (blk_special_request(req) && req->special)
cmd = req->special; cmd = req->special;
} else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { else if (blk_pc_request(req) || blk_fs_request(req)) {
if (unlikely(specials_only) && !(req->cmd_flags & REQ_PREEMPT)){
if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) { if (specials_only == SDEV_QUIESCE ||
if(specials_only == SDEV_QUIESCE || specials_only == SDEV_BLOCK)
specials_only == SDEV_BLOCK)
goto defer; goto defer;
sdev_printk(KERN_ERR, sdev, sdev_printk(KERN_ERR, sdev,
@ -1153,7 +1154,6 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
goto kill; goto kill;
} }
/* /*
* Now try and find a command block that we can use. * Now try and find a command block that we can use.
*/ */
@ -1184,7 +1184,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
* lock. We hope REQ_STARTED prevents anything untoward from * lock. We hope REQ_STARTED prevents anything untoward from
* happening now. * happening now.
*/ */
if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { if (blk_fs_request(req) || blk_pc_request(req)) {
int ret; int ret;
/* /*
@ -1216,7 +1216,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
/* /*
* Initialize the actual SCSI command for this request. * Initialize the actual SCSI command for this request.
*/ */
if (req->flags & REQ_BLOCK_PC) { if (blk_pc_request(req)) {
scsi_setup_blk_pc_cmnd(cmd); scsi_setup_blk_pc_cmnd(cmd);
} else if (req->rq_disk) { } else if (req->rq_disk) {
struct scsi_driver *drv; struct scsi_driver *drv;
@ -1233,7 +1233,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
/* /*
* The request is now prepped, no need to come back here * The request is now prepped, no need to come back here
*/ */
req->flags |= REQ_DONTPREP; req->cmd_flags |= REQ_DONTPREP;
return BLKPREP_OK; return BLKPREP_OK;
defer: defer:
@ -1454,8 +1454,9 @@ static void scsi_request_fn(struct request_queue *q)
if (unlikely(cmd == NULL)) { if (unlikely(cmd == NULL)) {
printk(KERN_CRIT "impossible request in %s.\n" printk(KERN_CRIT "impossible request in %s.\n"
"please mail a stack trace to " "please mail a stack trace to "
"linux-scsi@vger.kernel.org", "linux-scsi@vger.kernel.org\n",
__FUNCTION__); __FUNCTION__);
blk_dump_rq_flags(req, "foo");
BUG(); BUG();
} }
spin_lock(shost->host_lock); spin_lock(shost->host_lock);

View File

@ -443,8 +443,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
SCpnt->cmnd[0] = READ_6; SCpnt->cmnd[0] = READ_6;
SCpnt->sc_data_direction = DMA_FROM_DEVICE; SCpnt->sc_data_direction = DMA_FROM_DEVICE;
} else { } else {
printk(KERN_ERR "sd: Unknown command %lx\n", rq->flags); printk(KERN_ERR "sd: Unknown command %x\n", rq->cmd_flags);
/* overkill panic("Unknown sd command %lx\n", rq->flags); */
return 0; return 0;
} }
@ -840,7 +839,7 @@ static int sd_issue_flush(struct device *dev, sector_t *error_sector)
static void sd_prepare_flush(request_queue_t *q, struct request *rq) static void sd_prepare_flush(request_queue_t *q, struct request *rq)
{ {
memset(rq->cmd, 0, sizeof(rq->cmd)); memset(rq->cmd, 0, sizeof(rq->cmd));
rq->flags |= REQ_BLOCK_PC; rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->timeout = SD_TIMEOUT; rq->timeout = SD_TIMEOUT;
rq->cmd[0] = SYNCHRONIZE_CACHE; rq->cmd[0] = SYNCHRONIZE_CACHE;
rq->cmd_len = 10; rq->cmd_len = 10;

View File

@ -2017,7 +2017,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done
!= cmd)) != cmd))
{ {
if(cmd->request->flags & REQ_CMD) { if(blk_fs_request(cmd->request)) {
sun3scsi_dma_setup(d, count, sun3scsi_dma_setup(d, count,
rq_data_dir(cmd->request)); rq_data_dir(cmd->request));
sun3_dma_setup_done = cmd; sun3_dma_setup_done = cmd;

View File

@ -524,7 +524,7 @@ static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance)
static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, Scsi_Cmnd *cmd, static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, Scsi_Cmnd *cmd,
int write_flag) int write_flag)
{ {
if(cmd->request->flags & REQ_CMD) if(blk_fs_request(cmd->request))
return wanted; return wanted;
else else
return 0; return 0;

View File

@ -458,7 +458,7 @@ static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance)
static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, Scsi_Cmnd *cmd, static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, Scsi_Cmnd *cmd,
int write_flag) int write_flag)
{ {
if(cmd->request->flags & REQ_CMD) if(blk_fs_request(cmd->request))
return wanted; return wanted;
else else
return 0; return 0;

View File

@ -120,6 +120,86 @@ struct request_list {
wait_queue_head_t wait[2]; wait_queue_head_t wait[2];
}; };
/*
* request command types
*/
enum rq_cmd_type_bits {
REQ_TYPE_FS = 1, /* fs request */
REQ_TYPE_BLOCK_PC, /* scsi command */
REQ_TYPE_SENSE, /* sense request */
REQ_TYPE_PM_SUSPEND, /* suspend request */
REQ_TYPE_PM_RESUME, /* resume request */
REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
REQ_TYPE_FLUSH, /* flush request */
REQ_TYPE_SPECIAL, /* driver defined type */
REQ_TYPE_LINUX_BLOCK, /* generic block layer message */
/*
* for ATA/ATAPI devices. this really doesn't belong here, ide should
* use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
* private REQ_LB opcodes to differentiate what type of request this is
*/
REQ_TYPE_ATA_CMD,
REQ_TYPE_ATA_TASK,
REQ_TYPE_ATA_TASKFILE,
};
/*
* For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
* sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
* SCSI cdb.
*
* 0x00 -> 0x3f are driver private, to be used for whatever purpose they need,
* typically to differentiate REQ_TYPE_SPECIAL requests.
*
*/
enum {
/*
* just examples for now
*/
REQ_LB_OP_EJECT = 0x40, /* eject request */
REQ_LB_OP_FLUSH = 0x41, /* flush device */
};
/*
* request type modified bits. first three bits match BIO_RW* bits, important
*/
enum rq_flag_bits {
__REQ_RW, /* not set, read. set, write */
__REQ_FAILFAST, /* no low level driver retries */
__REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_HARDBARRIER, /* may not be passed by drive either */
__REQ_FUA, /* forced unit access */
__REQ_NOMERGE, /* don't touch this for merging */
__REQ_STARTED, /* drive already may have started this one */
__REQ_DONTPREP, /* don't call prep for this one */
__REQ_QUEUED, /* uses queueing */
__REQ_ELVPRIV, /* elevator private data attached */
__REQ_FAILED, /* set if the request failed */
__REQ_QUIET, /* don't worry about errors */
__REQ_PREEMPT, /* set for "ide_preempt" requests */
__REQ_ORDERED_COLOR, /* is before or after barrier */
__REQ_RW_SYNC, /* request is sync (O_DIRECT) */
__REQ_NR_BITS, /* stops here */
};
#define REQ_RW (1 << __REQ_RW)
#define REQ_FAILFAST (1 << __REQ_FAILFAST)
#define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
#define REQ_FUA (1 << __REQ_FUA)
#define REQ_NOMERGE (1 << __REQ_NOMERGE)
#define REQ_STARTED (1 << __REQ_STARTED)
#define REQ_DONTPREP (1 << __REQ_DONTPREP)
#define REQ_QUEUED (1 << __REQ_QUEUED)
#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
#define REQ_FAILED (1 << __REQ_FAILED)
#define REQ_QUIET (1 << __REQ_QUIET)
#define REQ_PREEMPT (1 << __REQ_PREEMPT)
#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
#define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
#define BLK_MAX_CDB 16 #define BLK_MAX_CDB 16
/* /*
@ -129,7 +209,8 @@ struct request {
struct list_head queuelist; struct list_head queuelist;
struct list_head donelist; struct list_head donelist;
unsigned long flags; /* see REQ_ bits below */ unsigned int cmd_flags;
enum rq_cmd_type_bits cmd_type;
/* Maintain bio traversal state for part by part I/O submission. /* Maintain bio traversal state for part by part I/O submission.
* hard_* are block layer internals, no driver should touch them! * hard_* are block layer internals, no driver should touch them!
@ -202,73 +283,7 @@ struct request {
}; };
/* /*
* first three bits match BIO_RW* bits, important * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
*/
enum rq_flag_bits {
__REQ_RW, /* not set, read. set, write */
__REQ_FAILFAST, /* no low level driver retries */
__REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_HARDBARRIER, /* may not be passed by drive either */
__REQ_FUA, /* forced unit access */
__REQ_CMD, /* is a regular fs rw request */
__REQ_NOMERGE, /* don't touch this for merging */
__REQ_STARTED, /* drive already may have started this one */
__REQ_DONTPREP, /* don't call prep for this one */
__REQ_QUEUED, /* uses queueing */
__REQ_ELVPRIV, /* elevator private data attached */
/*
* for ATA/ATAPI devices
*/
__REQ_PC, /* packet command (special) */
__REQ_BLOCK_PC, /* queued down pc from block layer */
__REQ_SENSE, /* sense retrival */
__REQ_FAILED, /* set if the request failed */
__REQ_QUIET, /* don't worry about errors */
__REQ_SPECIAL, /* driver suplied command */
__REQ_DRIVE_CMD,
__REQ_DRIVE_TASK,
__REQ_DRIVE_TASKFILE,
__REQ_PREEMPT, /* set for "ide_preempt" requests */
__REQ_PM_SUSPEND, /* suspend request */
__REQ_PM_RESUME, /* resume request */
__REQ_PM_SHUTDOWN, /* shutdown request */
__REQ_ORDERED_COLOR, /* is before or after barrier */
__REQ_RW_SYNC, /* request is sync (O_DIRECT) */
__REQ_NR_BITS, /* stops here */
};
#define REQ_RW (1 << __REQ_RW)
#define REQ_FAILFAST (1 << __REQ_FAILFAST)
#define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
#define REQ_FUA (1 << __REQ_FUA)
#define REQ_CMD (1 << __REQ_CMD)
#define REQ_NOMERGE (1 << __REQ_NOMERGE)
#define REQ_STARTED (1 << __REQ_STARTED)
#define REQ_DONTPREP (1 << __REQ_DONTPREP)
#define REQ_QUEUED (1 << __REQ_QUEUED)
#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
#define REQ_PC (1 << __REQ_PC)
#define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC)
#define REQ_SENSE (1 << __REQ_SENSE)
#define REQ_FAILED (1 << __REQ_FAILED)
#define REQ_QUIET (1 << __REQ_QUIET)
#define REQ_SPECIAL (1 << __REQ_SPECIAL)
#define REQ_DRIVE_CMD (1 << __REQ_DRIVE_CMD)
#define REQ_DRIVE_TASK (1 << __REQ_DRIVE_TASK)
#define REQ_DRIVE_TASKFILE (1 << __REQ_DRIVE_TASKFILE)
#define REQ_PREEMPT (1 << __REQ_PREEMPT)
#define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND)
#define REQ_PM_RESUME (1 << __REQ_PM_RESUME)
#define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN)
#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
#define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
/*
* State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME
* requests. Some step values could eventually be made generic. * requests. Some step values could eventually be made generic.
*/ */
struct request_pm_state struct request_pm_state
@ -490,25 +505,28 @@ enum {
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_flushing(q) ((q)->ordseq) #define blk_queue_flushing(q) ((q)->ordseq)
#define blk_fs_request(rq) ((rq)->flags & REQ_CMD) #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC) #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
#define blk_noretry_request(rq) ((rq)->flags & REQ_FAILFAST) #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL)
#define blk_rq_started(rq) ((rq)->flags & REQ_STARTED) #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE)
#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST)
#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) #define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq))
#define blk_pm_suspend_request(rq) ((rq)->flags & REQ_PM_SUSPEND) #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
#define blk_pm_resume_request(rq) ((rq)->flags & REQ_PM_RESUME) #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
#define blk_pm_request(rq) \ #define blk_pm_request(rq) \
((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME)) (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
#define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)
#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
#define blk_fua_rq(rq) ((rq)->flags & REQ_FUA) #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
#define rq_data_dir(rq) ((rq)->flags & 1) #define rq_data_dir(rq) ((rq)->cmd_flags & 1)
static inline int blk_queue_full(struct request_queue *q, int rw) static inline int blk_queue_full(struct request_queue *q, int rw)
{ {
@ -541,7 +559,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw)
#define RQ_NOMERGE_FLAGS \ #define RQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
#define rq_mergeable(rq) \ #define rq_mergeable(rq) \
(!((rq)->flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
/* /*
* noop, requests are automagically marked as active/inactive by I/O * noop, requests are automagically marked as active/inactive by I/O
@ -737,7 +755,7 @@ extern void blk_put_queue(request_queue_t *);
*/ */
#define blk_queue_tag_depth(q) ((q)->queue_tags->busy) #define blk_queue_tag_depth(q) ((q)->queue_tags->busy)
#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth)
#define blk_rq_tagged(rq) ((rq)->flags & REQ_QUEUED) #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
extern int blk_queue_start_tag(request_queue_t *, struct request *); extern int blk_queue_start_tag(request_queue_t *, struct request *);
extern struct request *blk_queue_find_tag(request_queue_t *, int); extern struct request *blk_queue_find_tag(request_queue_t *, int);
extern void blk_queue_end_tag(request_queue_t *, struct request *); extern void blk_queue_end_tag(request_queue_t *, struct request *);

View File

@ -148,7 +148,7 @@ static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
u32 what) u32 what)
{ {
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt = q->blk_trace;
int rw = rq->flags & 0x03; int rw = rq->cmd_flags & 0x03;
if (likely(!bt)) if (likely(!bt))
return; return;

View File

@ -100,7 +100,7 @@ static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg)
struct scsi_device *sdev = cmd->device; struct scsi_device *sdev = cmd->device;
if (blk_rq_tagged(req)) { if (blk_rq_tagged(req)) {
if (sdev->ordered_tags && req->flags & REQ_HARDBARRIER) if (sdev->ordered_tags && req->cmd_flags & REQ_HARDBARRIER)
*msg++ = MSG_ORDERED_TAG; *msg++ = MSG_ORDERED_TAG;
else else
*msg++ = MSG_SIMPLE_TAG; *msg++ = MSG_SIMPLE_TAG;