forked from Minki/linux
blk-mq: don't leak preempt counter/q_usage_counter when allocating rq failed
When blk_mq_get_request() failed, preempt counter isn't released, and blk_mq_make_request() doesn't release the counter too. This patch fixes the issue, and makes sure that preempt counter is only held if rq is allocated successfully. The same policy is applied on .q_usage_counter too. Signed-off-by: Ming Lei <minlei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
46d556e6aa
commit
1ad43c0078
@ -301,11 +301,12 @@ static struct request *blk_mq_get_request(struct request_queue *q,
|
|||||||
struct elevator_queue *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
unsigned int tag;
|
unsigned int tag;
|
||||||
|
struct blk_mq_ctx *local_ctx = NULL;
|
||||||
|
|
||||||
blk_queue_enter_live(q);
|
blk_queue_enter_live(q);
|
||||||
data->q = q;
|
data->q = q;
|
||||||
if (likely(!data->ctx))
|
if (likely(!data->ctx))
|
||||||
data->ctx = blk_mq_get_ctx(q);
|
data->ctx = local_ctx = blk_mq_get_ctx(q);
|
||||||
if (likely(!data->hctx))
|
if (likely(!data->hctx))
|
||||||
data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
|
data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
|
||||||
if (op & REQ_NOWAIT)
|
if (op & REQ_NOWAIT)
|
||||||
@ -324,6 +325,10 @@ static struct request *blk_mq_get_request(struct request_queue *q,
|
|||||||
|
|
||||||
tag = blk_mq_get_tag(data);
|
tag = blk_mq_get_tag(data);
|
||||||
if (tag == BLK_MQ_TAG_FAIL) {
|
if (tag == BLK_MQ_TAG_FAIL) {
|
||||||
|
if (local_ctx) {
|
||||||
|
blk_mq_put_ctx(local_ctx);
|
||||||
|
data->ctx = NULL;
|
||||||
|
}
|
||||||
blk_queue_exit(q);
|
blk_queue_exit(q);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -356,12 +361,12 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
|
|||||||
|
|
||||||
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
||||||
|
|
||||||
blk_mq_put_ctx(alloc_data.ctx);
|
|
||||||
blk_queue_exit(q);
|
|
||||||
|
|
||||||
if (!rq)
|
if (!rq)
|
||||||
return ERR_PTR(-EWOULDBLOCK);
|
return ERR_PTR(-EWOULDBLOCK);
|
||||||
|
|
||||||
|
blk_mq_put_ctx(alloc_data.ctx);
|
||||||
|
blk_queue_exit(q);
|
||||||
|
|
||||||
rq->__data_len = 0;
|
rq->__data_len = 0;
|
||||||
rq->__sector = (sector_t) -1;
|
rq->__sector = (sector_t) -1;
|
||||||
rq->bio = rq->biotail = NULL;
|
rq->bio = rq->biotail = NULL;
|
||||||
@ -407,11 +412,11 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
|||||||
|
|
||||||
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
||||||
|
|
||||||
blk_queue_exit(q);
|
|
||||||
|
|
||||||
if (!rq)
|
if (!rq)
|
||||||
return ERR_PTR(-EWOULDBLOCK);
|
return ERR_PTR(-EWOULDBLOCK);
|
||||||
|
|
||||||
|
blk_queue_exit(q);
|
||||||
|
|
||||||
return rq;
|
return rq;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
|
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
|
||||||
|
Loading…
Reference in New Issue
Block a user