mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 06:12:08 +00:00
blk-mq: remove blk_mq_delay_queue()
No driver uses this interface any more, so remove it. Cc: Stefan Haberland <sth@linux.vnet.ibm.com> Tested-by: Christian Borntraeger <borntraeger@de.ibm.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f82ddf1923
commit
15fe8a90bb
@ -235,7 +235,6 @@ static const char *const hctx_state_name[] = {
|
|||||||
HCTX_STATE_NAME(STOPPED),
|
HCTX_STATE_NAME(STOPPED),
|
||||||
HCTX_STATE_NAME(TAG_ACTIVE),
|
HCTX_STATE_NAME(TAG_ACTIVE),
|
||||||
HCTX_STATE_NAME(SCHED_RESTART),
|
HCTX_STATE_NAME(SCHED_RESTART),
|
||||||
HCTX_STATE_NAME(START_ON_RUN),
|
|
||||||
};
|
};
|
||||||
#undef HCTX_STATE_NAME
|
#undef HCTX_STATE_NAME
|
||||||
|
|
||||||
|
@ -1562,40 +1562,14 @@ static void blk_mq_run_work_fn(struct work_struct *work)
|
|||||||
hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
|
hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we are stopped, don't run the queue. The exception is if
|
* If we are stopped, don't run the queue.
|
||||||
* BLK_MQ_S_START_ON_RUN is set. For that case, we auto-clear
|
|
||||||
* the STOPPED bit and run it.
|
|
||||||
*/
|
*/
|
||||||
if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) {
|
if (test_bit(BLK_MQ_S_STOPPED, &hctx->state))
|
||||||
if (!test_bit(BLK_MQ_S_START_ON_RUN, &hctx->state))
|
|
||||||
return;
|
|
||||||
|
|
||||||
clear_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
|
|
||||||
clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
|
clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
|
||||||
}
|
|
||||||
|
|
||||||
__blk_mq_run_hw_queue(hctx);
|
__blk_mq_run_hw_queue(hctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
|
|
||||||
{
|
|
||||||
if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Stop the hw queue, then modify currently delayed work.
|
|
||||||
* This should prevent us from running the queue prematurely.
|
|
||||||
* Mark the queue as auto-clearing STOPPED when it runs.
|
|
||||||
*/
|
|
||||||
blk_mq_stop_hw_queue(hctx);
|
|
||||||
set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
|
|
||||||
kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
|
|
||||||
&hctx->run_work,
|
|
||||||
msecs_to_jiffies(msecs));
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(blk_mq_delay_queue);
|
|
||||||
|
|
||||||
static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
|
static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
|
||||||
struct request *rq,
|
struct request *rq,
|
||||||
bool at_head)
|
bool at_head)
|
||||||
|
@ -183,7 +183,6 @@ enum {
|
|||||||
BLK_MQ_S_STOPPED = 0,
|
BLK_MQ_S_STOPPED = 0,
|
||||||
BLK_MQ_S_TAG_ACTIVE = 1,
|
BLK_MQ_S_TAG_ACTIVE = 1,
|
||||||
BLK_MQ_S_SCHED_RESTART = 2,
|
BLK_MQ_S_SCHED_RESTART = 2,
|
||||||
BLK_MQ_S_START_ON_RUN = 3,
|
|
||||||
|
|
||||||
BLK_MQ_MAX_DEPTH = 10240,
|
BLK_MQ_MAX_DEPTH = 10240,
|
||||||
|
|
||||||
@ -270,7 +269,6 @@ void blk_mq_unquiesce_queue(struct request_queue *q);
|
|||||||
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
|
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
|
||||||
bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
|
bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
|
||||||
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
|
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
|
||||||
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
|
|
||||||
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
|
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
|
||||||
busy_tag_iter_fn *fn, void *priv);
|
busy_tag_iter_fn *fn, void *priv);
|
||||||
void blk_mq_freeze_queue(struct request_queue *q);
|
void blk_mq_freeze_queue(struct request_queue *q);
|
||||||
|
Loading…
Reference in New Issue
Block a user