blk-mq: unify hctx delayed_run_work and run_work
They serve the exact same purpose. Get rid of the non-delayed work variant, and just run it without delay for the normal case. Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <Bart.VanAssche@sandisk.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
60ae36ad03
commit
9f99373790
@ -269,7 +269,7 @@ void blk_sync_queue(struct request_queue *q)
|
||||
int i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
cancel_work_sync(&hctx->run_work);
|
||||
cancel_delayed_work_sync(&hctx->run_work);
|
||||
cancel_delayed_work_sync(&hctx->delay_work);
|
||||
}
|
||||
} else {
|
||||
|
@ -1166,13 +1166,9 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
if (msecs == 0)
|
||||
kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx),
|
||||
&hctx->run_work);
|
||||
else
|
||||
kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
|
||||
&hctx->delayed_run_work,
|
||||
msecs_to_jiffies(msecs));
|
||||
kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
|
||||
&hctx->run_work,
|
||||
msecs_to_jiffies(msecs));
|
||||
}
|
||||
|
||||
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
|
||||
@ -1224,7 +1220,7 @@ EXPORT_SYMBOL(blk_mq_queue_stopped);
|
||||
|
||||
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
cancel_work(&hctx->run_work);
|
||||
cancel_delayed_work_sync(&hctx->run_work);
|
||||
cancel_delayed_work(&hctx->delay_work);
|
||||
set_bit(BLK_MQ_S_STOPPED, &hctx->state);
|
||||
}
|
||||
@ -1282,17 +1278,7 @@ static void blk_mq_run_work_fn(struct work_struct *work)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
|
||||
hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
|
||||
|
||||
__blk_mq_run_hw_queue(hctx);
|
||||
}
|
||||
|
||||
static void blk_mq_delayed_run_work_fn(struct work_struct *work)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
|
||||
hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work);
|
||||
|
||||
hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
|
||||
__blk_mq_run_hw_queue(hctx);
|
||||
}
|
||||
|
||||
@ -1898,8 +1884,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
||||
if (node == NUMA_NO_NODE)
|
||||
node = hctx->numa_node = set->numa_node;
|
||||
|
||||
INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
|
||||
INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn);
|
||||
INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
|
||||
INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
|
||||
spin_lock_init(&hctx->lock);
|
||||
INIT_LIST_HEAD(&hctx->dispatch);
|
||||
|
@ -15,7 +15,7 @@ struct blk_mq_hw_ctx {
|
||||
unsigned long state; /* BLK_MQ_S_* flags */
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
struct work_struct run_work;
|
||||
struct delayed_work run_work;
|
||||
cpumask_var_t cpumask;
|
||||
int next_cpu;
|
||||
int next_cpu_batch;
|
||||
@ -51,7 +51,6 @@ struct blk_mq_hw_ctx {
|
||||
|
||||
atomic_t nr_active;
|
||||
|
||||
struct delayed_work delayed_run_work;
|
||||
struct delayed_work delay_work;
|
||||
|
||||
struct hlist_node cpuhp_dead;
|
||||
|
Loading…
Reference in New Issue
Block a user