mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
blk-mq: pass obtained budget count to blk_mq_dispatch_rq_list
Pass obtained budget count to blk_mq_dispatch_rq_list(), and prepare for supporting fully batching submission. With the obtained budget count, it is easier to put extra budgets in case of .queue_rq failure. Meantime remove the old 'got_budget' parameter. Signed-off-by: Ming Lei <ming.lei@redhat.com> Tested-by: Baolin Wang <baolin.wang7@gmail.com> Reviewed-by: Christoph Hellwig <hch@infradead.org> Cc: Sagi Grimberg <sagi@grimberg.me> Cc: Baolin Wang <baolin.wang7@gmail.com> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
bbdb3c5d94
commit
1fd40b5ea7
@ -130,7 +130,7 @@ static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
|
||||
* in blk_mq_dispatch_rq_list().
|
||||
*/
|
||||
list_add(&rq->queuelist, &rq_list);
|
||||
} while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, true));
|
||||
} while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -198,7 +198,7 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
|
||||
/* round robin for fair dispatch */
|
||||
ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
|
||||
|
||||
} while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, true));
|
||||
} while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
|
||||
|
||||
WRITE_ONCE(hctx->dispatch_from, ctx);
|
||||
return ret;
|
||||
@ -238,7 +238,7 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
|
||||
*/
|
||||
if (!list_empty(&rq_list)) {
|
||||
blk_mq_sched_mark_restart_hctx(hctx);
|
||||
if (blk_mq_dispatch_rq_list(hctx, &rq_list, false)) {
|
||||
if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
|
||||
if (has_sched_dispatch)
|
||||
ret = blk_mq_do_dispatch_sched(hctx);
|
||||
else
|
||||
@ -251,7 +251,7 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
|
||||
ret = blk_mq_do_dispatch_ctx(hctx);
|
||||
} else {
|
||||
blk_mq_flush_busy_ctxs(hctx, &rq_list);
|
||||
blk_mq_dispatch_rq_list(hctx, &rq_list, false);
|
||||
blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -1280,6 +1280,11 @@ static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
|
||||
* we'll re-run it below.
|
||||
*/
|
||||
if (!blk_mq_mark_tag_wait(hctx, rq)) {
|
||||
/*
|
||||
* All budgets not got from this function will be put
|
||||
* together during handling partial dispatch
|
||||
*/
|
||||
if (need_budget)
|
||||
blk_mq_put_dispatch_budget(rq->q);
|
||||
return PREP_DISPATCH_NO_TAG;
|
||||
}
|
||||
@ -1288,11 +1293,21 @@ static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
|
||||
return PREP_DISPATCH_OK;
|
||||
}
|
||||
|
||||
/* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
|
||||
static void blk_mq_release_budgets(struct request_queue *q,
|
||||
unsigned int nr_budgets)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_budgets; i++)
|
||||
blk_mq_put_dispatch_budget(q);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if we did some work AND can potentially do more.
|
||||
*/
|
||||
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
|
||||
bool got_budget)
|
||||
unsigned int nr_budgets)
|
||||
{
|
||||
enum prep_dispatch prep;
|
||||
struct request_queue *q = hctx->queue;
|
||||
@ -1304,7 +1319,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
|
||||
if (list_empty(list))
|
||||
return false;
|
||||
|
||||
WARN_ON(!list_is_singular(list) && got_budget);
|
||||
WARN_ON(!list_is_singular(list) && nr_budgets);
|
||||
|
||||
/*
|
||||
* Now process all the entries, sending them to the driver.
|
||||
@ -1316,7 +1331,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
|
||||
rq = list_first_entry(list, struct request, queuelist);
|
||||
|
||||
WARN_ON_ONCE(hctx != rq->mq_hctx);
|
||||
prep = blk_mq_prep_dispatch_rq(rq, !got_budget);
|
||||
prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
|
||||
if (prep != PREP_DISPATCH_OK)
|
||||
break;
|
||||
|
||||
@ -1335,6 +1350,12 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
|
||||
bd.last = !blk_mq_get_driver_tag(nxt);
|
||||
}
|
||||
|
||||
/*
|
||||
* once the request is queued to lld, no need to cover the
|
||||
* budget any more
|
||||
*/
|
||||
if (nr_budgets)
|
||||
nr_budgets--;
|
||||
ret = q->mq_ops->queue_rq(hctx, &bd);
|
||||
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
|
||||
blk_mq_handle_dev_resource(rq, list);
|
||||
@ -1376,6 +1397,8 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
|
||||
(hctx->flags & BLK_MQ_F_TAG_SHARED);
|
||||
bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET;
|
||||
|
||||
blk_mq_release_budgets(q, nr_budgets);
|
||||
|
||||
/*
|
||||
* If we didn't flush the entire list, we could have told
|
||||
* the driver there was more coming, but that turned out to
|
||||
|
@ -40,7 +40,8 @@ struct blk_mq_ctx {
|
||||
void blk_mq_exit_queue(struct request_queue *q);
|
||||
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
|
||||
void blk_mq_wake_waiters(struct request_queue *q);
|
||||
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, bool);
|
||||
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
|
||||
unsigned int);
|
||||
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
|
||||
bool kick_requeue_list);
|
||||
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
|
||||
|
Loading…
Reference in New Issue
Block a user