mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
kyber: use sbitmap add_wait_queue/list_del wait helpers
sbq_wake_ptr() checks sbq->ws_active to know if it needs to loop
the wait indexes or not. This requires the use of the sbitmap
waitqueue wrappers, but kyber doesn't use those for its domain
token waitqueue handling.
Convert kyber to use the helpers. This fixes a hang with waiting
for domain tokens.
Fixes: 5d2ee7122c
("sbitmap: optimize wakeup check")
Tested-by: Ming Lei <ming.lei@redhat.com>
Reported-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
9f6b7ef6c3
commit
00203ba40d
@ -195,7 +195,7 @@ struct kyber_hctx_data {
|
|||||||
unsigned int batching;
|
unsigned int batching;
|
||||||
struct kyber_ctx_queue *kcqs;
|
struct kyber_ctx_queue *kcqs;
|
||||||
struct sbitmap kcq_map[KYBER_NUM_DOMAINS];
|
struct sbitmap kcq_map[KYBER_NUM_DOMAINS];
|
||||||
wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
|
struct sbq_wait domain_wait[KYBER_NUM_DOMAINS];
|
||||||
struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
|
struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
|
||||||
atomic_t wait_index[KYBER_NUM_DOMAINS];
|
atomic_t wait_index[KYBER_NUM_DOMAINS];
|
||||||
};
|
};
|
||||||
@ -501,10 +501,11 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
|
|||||||
|
|
||||||
for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
|
for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
|
||||||
INIT_LIST_HEAD(&khd->rqs[i]);
|
INIT_LIST_HEAD(&khd->rqs[i]);
|
||||||
init_waitqueue_func_entry(&khd->domain_wait[i],
|
khd->domain_wait[i].sbq = NULL;
|
||||||
|
init_waitqueue_func_entry(&khd->domain_wait[i].wait,
|
||||||
kyber_domain_wake);
|
kyber_domain_wake);
|
||||||
khd->domain_wait[i].private = hctx;
|
khd->domain_wait[i].wait.private = hctx;
|
||||||
INIT_LIST_HEAD(&khd->domain_wait[i].entry);
|
INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry);
|
||||||
atomic_set(&khd->wait_index[i], 0);
|
atomic_set(&khd->wait_index[i], 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -698,12 +699,13 @@ static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd,
|
|||||||
flush_busy_kcq, &data);
|
flush_busy_kcq, &data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
|
static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags,
|
||||||
void *key)
|
void *key)
|
||||||
{
|
{
|
||||||
struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private);
|
struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private);
|
||||||
|
struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait);
|
||||||
|
|
||||||
list_del_init(&wait->entry);
|
sbitmap_del_wait_queue(wait);
|
||||||
blk_mq_run_hw_queue(hctx, true);
|
blk_mq_run_hw_queue(hctx, true);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
@ -714,7 +716,7 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
|
|||||||
{
|
{
|
||||||
unsigned int sched_domain = khd->cur_domain;
|
unsigned int sched_domain = khd->cur_domain;
|
||||||
struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
|
struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
|
||||||
wait_queue_entry_t *wait = &khd->domain_wait[sched_domain];
|
struct sbq_wait *wait = &khd->domain_wait[sched_domain];
|
||||||
struct sbq_wait_state *ws;
|
struct sbq_wait_state *ws;
|
||||||
int nr;
|
int nr;
|
||||||
|
|
||||||
@ -725,11 +727,11 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
|
|||||||
* run when one becomes available. Note that this is serialized on
|
* run when one becomes available. Note that this is serialized on
|
||||||
* khd->lock, but we still need to be careful about the waker.
|
* khd->lock, but we still need to be careful about the waker.
|
||||||
*/
|
*/
|
||||||
if (nr < 0 && list_empty_careful(&wait->entry)) {
|
if (nr < 0 && list_empty_careful(&wait->wait.entry)) {
|
||||||
ws = sbq_wait_ptr(domain_tokens,
|
ws = sbq_wait_ptr(domain_tokens,
|
||||||
&khd->wait_index[sched_domain]);
|
&khd->wait_index[sched_domain]);
|
||||||
khd->domain_ws[sched_domain] = ws;
|
khd->domain_ws[sched_domain] = ws;
|
||||||
add_wait_queue(&ws->wait, wait);
|
sbitmap_add_wait_queue(domain_tokens, ws, wait);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try again in case a token was freed before we got on the wait
|
* Try again in case a token was freed before we got on the wait
|
||||||
@ -745,10 +747,10 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
|
|||||||
* between the !list_empty_careful() check and us grabbing the lock, but
|
* between the !list_empty_careful() check and us grabbing the lock, but
|
||||||
* list_del_init() is okay with that.
|
* list_del_init() is okay with that.
|
||||||
*/
|
*/
|
||||||
if (nr >= 0 && !list_empty_careful(&wait->entry)) {
|
if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) {
|
||||||
ws = khd->domain_ws[sched_domain];
|
ws = khd->domain_ws[sched_domain];
|
||||||
spin_lock_irq(&ws->wait.lock);
|
spin_lock_irq(&ws->wait.lock);
|
||||||
list_del_init(&wait->entry);
|
sbitmap_del_wait_queue(wait);
|
||||||
spin_unlock_irq(&ws->wait.lock);
|
spin_unlock_irq(&ws->wait.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -951,7 +953,7 @@ static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
|
|||||||
{ \
|
{ \
|
||||||
struct blk_mq_hw_ctx *hctx = data; \
|
struct blk_mq_hw_ctx *hctx = data; \
|
||||||
struct kyber_hctx_data *khd = hctx->sched_data; \
|
struct kyber_hctx_data *khd = hctx->sched_data; \
|
||||||
wait_queue_entry_t *wait = &khd->domain_wait[domain]; \
|
wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \
|
||||||
\
|
\
|
||||||
seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \
|
seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \
|
||||||
return 0; \
|
return 0; \
|
||||||
|
Loading…
Reference in New Issue
Block a user