mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 01:22:07 +00:00
d94ecfc399
Allocation of the driver tag in the case of using a scheduler shares very
little code with the "normal" tag allocation. Split out a new helper to
streamline this path, and untangle it from the complex normal tag
allocation.
This way also avoids to fail driver tag allocation because of inactive hctx
during cpu hotplug, and fixes potential hang risk.
Fixes: bf0beec060
("blk-mq: drain I/O when all CPUs in a hctx are offline")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: John Garry <john.garry@huawei.com>
Cc: Dongli Zhang <dongli.zhang@oracle.com>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
100 lines
2.5 KiB
C
100 lines
2.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef INT_BLK_MQ_TAG_H
|
|
#define INT_BLK_MQ_TAG_H
|
|
|
|
#include "blk-mq.h"
|
|
|
|
/*
|
|
* Tag address space map.
|
|
*/
|
|
struct blk_mq_tags {
|
|
unsigned int nr_tags;
|
|
unsigned int nr_reserved_tags;
|
|
|
|
atomic_t active_queues;
|
|
|
|
struct sbitmap_queue bitmap_tags;
|
|
struct sbitmap_queue breserved_tags;
|
|
|
|
struct request **rqs;
|
|
struct request **static_rqs;
|
|
struct list_head page_list;
|
|
};
|
|
|
|
|
|
extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy);
|
|
extern void blk_mq_free_tags(struct blk_mq_tags *tags);
|
|
|
|
extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
|
|
extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
|
|
unsigned int tag);
|
|
extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
|
|
struct blk_mq_tags **tags,
|
|
unsigned int depth, bool can_grow);
|
|
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
|
|
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
|
void *priv);
|
|
void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
|
|
void *priv);
|
|
|
|
static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
|
|
struct blk_mq_hw_ctx *hctx)
|
|
{
|
|
if (!hctx)
|
|
return &bt->ws[0];
|
|
return sbq_wait_ptr(bt, &hctx->wait_index);
|
|
}
|
|
|
|
enum {
|
|
BLK_MQ_NO_TAG = -1U,
|
|
BLK_MQ_TAG_MIN = 1,
|
|
BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
|
|
};
|
|
|
|
bool __blk_mq_get_driver_tag(struct request *rq);
|
|
static inline bool blk_mq_get_driver_tag(struct request *rq)
|
|
{
|
|
if (rq->tag != BLK_MQ_NO_TAG)
|
|
return true;
|
|
return __blk_mq_get_driver_tag(rq);
|
|
}
|
|
|
|
extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
|
|
extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
|
|
|
|
static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
|
|
{
|
|
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
|
|
return false;
|
|
|
|
return __blk_mq_tag_busy(hctx);
|
|
}
|
|
|
|
static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
|
|
{
|
|
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
|
|
return;
|
|
|
|
__blk_mq_tag_idle(hctx);
|
|
}
|
|
|
|
/*
|
|
* This helper should only be used for flush request to share tag
|
|
* with the request cloned from, and both the two requests can't be
|
|
* in flight at the same time. The caller has to make sure the tag
|
|
* can't be freed.
|
|
*/
|
|
static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx,
|
|
unsigned int tag, struct request *rq)
|
|
{
|
|
hctx->tags->rqs[tag] = rq;
|
|
}
|
|
|
|
static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
|
|
unsigned int tag)
|
|
{
|
|
return tag < tags->nr_reserved_tags;
|
|
}
|
|
|
|
#endif
|