mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
blk-mq: change blk_mq_queue_busy() to blk_mq_queue_inflight()
There's a single user of this function, dm, and dm just wants to check if IO is inflight, not that it's just allocated. This fixes a hang with srp/002 in blktests with dm, where it tries to suspend but waits for inflight IO to finish first. As it checks for just allocated requests, this fails. Tested-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
e5edd5f298
commit
3c94d83cb3
@ -805,14 +805,14 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_tag_to_rq);
|
EXPORT_SYMBOL(blk_mq_tag_to_rq);
|
||||||
|
|
||||||
static bool blk_mq_check_busy(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||||
void *priv, bool reserved)
|
void *priv, bool reserved)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* If we find a request, we know the queue is busy. Return false
|
* If we find a request that is inflight and the queue matches,
|
||||||
* to stop the iteration.
|
* we know the queue is busy. Return false to stop the iteration.
|
||||||
*/
|
*/
|
||||||
if (rq->q == hctx->queue) {
|
if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) {
|
||||||
bool *busy = priv;
|
bool *busy = priv;
|
||||||
|
|
||||||
*busy = true;
|
*busy = true;
|
||||||
@ -822,14 +822,14 @@ static bool blk_mq_check_busy(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool blk_mq_queue_busy(struct request_queue *q)
|
bool blk_mq_queue_inflight(struct request_queue *q)
|
||||||
{
|
{
|
||||||
bool busy = false;
|
bool busy = false;
|
||||||
|
|
||||||
blk_mq_queue_tag_busy_iter(q, blk_mq_check_busy, &busy);
|
blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
|
||||||
return busy;
|
return busy;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_queue_busy);
|
EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
|
||||||
|
|
||||||
static void blk_mq_rq_timed_out(struct request *req, bool reserved)
|
static void blk_mq_rq_timed_out(struct request *req, bool reserved)
|
||||||
{
|
{
|
||||||
|
@ -663,7 +663,7 @@ static bool md_in_flight_bios(struct mapped_device *md)
|
|||||||
static bool md_in_flight(struct mapped_device *md)
|
static bool md_in_flight(struct mapped_device *md)
|
||||||
{
|
{
|
||||||
if (queue_is_mq(md->queue))
|
if (queue_is_mq(md->queue))
|
||||||
return blk_mq_queue_busy(md->queue);
|
return blk_mq_queue_inflight(md->queue);
|
||||||
else
|
else
|
||||||
return md_in_flight_bios(md);
|
return md_in_flight_bios(md);
|
||||||
}
|
}
|
||||||
|
@ -257,7 +257,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
|
|||||||
void blk_mq_free_request(struct request *rq);
|
void blk_mq_free_request(struct request *rq);
|
||||||
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
|
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
|
||||||
|
|
||||||
bool blk_mq_queue_busy(struct request_queue *q);
|
bool blk_mq_queue_inflight(struct request_queue *q);
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
/* return when out of requests */
|
/* return when out of requests */
|
||||||
|
Loading…
Reference in New Issue
Block a user