mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 21:02:19 +00:00
blk-mq: centralise related handling into blk_mq_get_driver_tag
Move .nr_active update and request assignment into blk_mq_get_driver_tag(), all are good to do during getting driver tag. Meantime blk-flush related code is simplified and flush request needn't to update the request table manually any more. Signed-off-by: Ming Lei <ming.lei@redhat.com> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
7bf137298c
commit
568f270065
@ -219,7 +219,6 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
|||||||
struct request *rq, *n;
|
struct request *rq, *n;
|
||||||
unsigned long flags = 0;
|
unsigned long flags = 0;
|
||||||
struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
|
struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
|
||||||
struct blk_mq_hw_ctx *hctx;
|
|
||||||
|
|
||||||
blk_account_io_flush(flush_rq);
|
blk_account_io_flush(flush_rq);
|
||||||
|
|
||||||
@ -235,13 +234,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
|||||||
if (fq->rq_status != BLK_STS_OK)
|
if (fq->rq_status != BLK_STS_OK)
|
||||||
error = fq->rq_status;
|
error = fq->rq_status;
|
||||||
|
|
||||||
hctx = flush_rq->mq_hctx;
|
|
||||||
if (!q->elevator) {
|
if (!q->elevator) {
|
||||||
blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
|
flush_rq->tag = BLK_MQ_NO_TAG;
|
||||||
flush_rq->tag = -1;
|
|
||||||
} else {
|
} else {
|
||||||
blk_mq_put_driver_tag(flush_rq);
|
blk_mq_put_driver_tag(flush_rq);
|
||||||
flush_rq->internal_tag = -1;
|
flush_rq->internal_tag = BLK_MQ_NO_TAG;
|
||||||
}
|
}
|
||||||
|
|
||||||
running = &fq->flush_queue[fq->flush_running_idx];
|
running = &fq->flush_queue[fq->flush_running_idx];
|
||||||
@ -316,13 +313,10 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
|
|||||||
flush_rq->mq_ctx = first_rq->mq_ctx;
|
flush_rq->mq_ctx = first_rq->mq_ctx;
|
||||||
flush_rq->mq_hctx = first_rq->mq_hctx;
|
flush_rq->mq_hctx = first_rq->mq_hctx;
|
||||||
|
|
||||||
if (!q->elevator) {
|
if (!q->elevator)
|
||||||
fq->orig_rq = first_rq;
|
|
||||||
flush_rq->tag = first_rq->tag;
|
flush_rq->tag = first_rq->tag;
|
||||||
blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq);
|
else
|
||||||
} else {
|
|
||||||
flush_rq->internal_tag = first_rq->internal_tag;
|
flush_rq->internal_tag = first_rq->internal_tag;
|
||||||
}
|
|
||||||
|
|
||||||
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
|
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
|
||||||
flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
|
flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
|
||||||
|
@ -101,18 +101,6 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
|
|||||||
return atomic_read(&hctx->nr_active) < depth;
|
return atomic_read(&hctx->nr_active) < depth;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This helper should only be used for flush request to share tag
|
|
||||||
* with the request cloned from, and both the two requests can't be
|
|
||||||
* in flight at the same time. The caller has to make sure the tag
|
|
||||||
* can't be freed.
|
|
||||||
*/
|
|
||||||
static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx,
|
|
||||||
unsigned int tag, struct request *rq)
|
|
||||||
{
|
|
||||||
hctx->tags->rqs[tag] = rq;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
|
static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
|
||||||
unsigned int tag)
|
unsigned int tag)
|
||||||
{
|
{
|
||||||
|
@ -277,26 +277,20 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
|||||||
{
|
{
|
||||||
struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
|
struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
|
||||||
struct request *rq = tags->static_rqs[tag];
|
struct request *rq = tags->static_rqs[tag];
|
||||||
req_flags_t rq_flags = 0;
|
|
||||||
|
|
||||||
if (data->q->elevator) {
|
if (data->q->elevator) {
|
||||||
rq->tag = BLK_MQ_NO_TAG;
|
rq->tag = BLK_MQ_NO_TAG;
|
||||||
rq->internal_tag = tag;
|
rq->internal_tag = tag;
|
||||||
} else {
|
} else {
|
||||||
if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
|
|
||||||
rq_flags = RQF_MQ_INFLIGHT;
|
|
||||||
atomic_inc(&data->hctx->nr_active);
|
|
||||||
}
|
|
||||||
rq->tag = tag;
|
rq->tag = tag;
|
||||||
rq->internal_tag = BLK_MQ_NO_TAG;
|
rq->internal_tag = BLK_MQ_NO_TAG;
|
||||||
data->hctx->tags->rqs[rq->tag] = rq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* csd/requeue_work/fifo_time is initialized before use */
|
/* csd/requeue_work/fifo_time is initialized before use */
|
||||||
rq->q = data->q;
|
rq->q = data->q;
|
||||||
rq->mq_ctx = data->ctx;
|
rq->mq_ctx = data->ctx;
|
||||||
rq->mq_hctx = data->hctx;
|
rq->mq_hctx = data->hctx;
|
||||||
rq->rq_flags = rq_flags;
|
rq->rq_flags = 0;
|
||||||
rq->cmd_flags = data->cmd_flags;
|
rq->cmd_flags = data->cmd_flags;
|
||||||
if (data->flags & BLK_MQ_REQ_PREEMPT)
|
if (data->flags & BLK_MQ_REQ_PREEMPT)
|
||||||
rq->rq_flags |= RQF_PREEMPT;
|
rq->rq_flags |= RQF_PREEMPT;
|
||||||
@ -1105,9 +1099,10 @@ static bool __blk_mq_get_driver_tag(struct request *rq)
|
|||||||
{
|
{
|
||||||
struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
|
struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
|
||||||
unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
|
unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
|
||||||
bool shared = blk_mq_tag_busy(rq->mq_hctx);
|
|
||||||
int tag;
|
int tag;
|
||||||
|
|
||||||
|
blk_mq_tag_busy(rq->mq_hctx);
|
||||||
|
|
||||||
if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
|
if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
|
||||||
bt = &rq->mq_hctx->tags->breserved_tags;
|
bt = &rq->mq_hctx->tags->breserved_tags;
|
||||||
tag_offset = 0;
|
tag_offset = 0;
|
||||||
@ -1120,19 +1115,23 @@ static bool __blk_mq_get_driver_tag(struct request *rq)
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
rq->tag = tag + tag_offset;
|
rq->tag = tag + tag_offset;
|
||||||
if (shared) {
|
|
||||||
rq->rq_flags |= RQF_MQ_INFLIGHT;
|
|
||||||
atomic_inc(&rq->mq_hctx->nr_active);
|
|
||||||
}
|
|
||||||
rq->mq_hctx->tags->rqs[rq->tag] = rq;
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool blk_mq_get_driver_tag(struct request *rq)
|
static bool blk_mq_get_driver_tag(struct request *rq)
|
||||||
{
|
{
|
||||||
if (rq->tag != BLK_MQ_NO_TAG)
|
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||||
return true;
|
|
||||||
return __blk_mq_get_driver_tag(rq);
|
if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if ((hctx->flags & BLK_MQ_F_TAG_SHARED) &&
|
||||||
|
!(rq->rq_flags & RQF_MQ_INFLIGHT)) {
|
||||||
|
rq->rq_flags |= RQF_MQ_INFLIGHT;
|
||||||
|
atomic_inc(&hctx->nr_active);
|
||||||
|
}
|
||||||
|
hctx->tags->rqs[rq->tag] = rq;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
|
static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
|
||||||
|
@ -25,11 +25,6 @@ struct blk_flush_queue {
|
|||||||
struct list_head flush_data_in_flight;
|
struct list_head flush_data_in_flight;
|
||||||
struct request *flush_rq;
|
struct request *flush_rq;
|
||||||
|
|
||||||
/*
|
|
||||||
* flush_rq shares tag with this rq, both can't be active
|
|
||||||
* at the same time
|
|
||||||
*/
|
|
||||||
struct request *orig_rq;
|
|
||||||
struct lock_class_key key;
|
struct lock_class_key key;
|
||||||
spinlock_t mq_flush_lock;
|
spinlock_t mq_flush_lock;
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user