mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
568f270065
Move .nr_active update and request assignment into blk_mq_get_driver_tag(), all are good to do during getting driver tag. Meantime blk-flush related code is simplified and flush request needn't to update the request table manually any more. Signed-off-by: Ming Lei <ming.lei@redhat.com> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
111 lines
2.6 KiB
C
111 lines
2.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef INT_BLK_MQ_TAG_H
|
|
#define INT_BLK_MQ_TAG_H
|
|
|
|
#include "blk-mq.h"
|
|
|
|
/*
|
|
* Tag address space map.
|
|
*/
|
|
struct blk_mq_tags {
|
|
unsigned int nr_tags;
|
|
unsigned int nr_reserved_tags;
|
|
|
|
atomic_t active_queues;
|
|
|
|
struct sbitmap_queue bitmap_tags;
|
|
struct sbitmap_queue breserved_tags;
|
|
|
|
struct request **rqs;
|
|
struct request **static_rqs;
|
|
struct list_head page_list;
|
|
};
|
|
|
|
|
|
extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy);
|
|
extern void blk_mq_free_tags(struct blk_mq_tags *tags);
|
|
|
|
extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
|
|
extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
|
|
unsigned int tag);
|
|
extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
|
|
struct blk_mq_tags **tags,
|
|
unsigned int depth, bool can_grow);
|
|
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
|
|
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
|
void *priv);
|
|
void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
|
|
void *priv);
|
|
|
|
static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
|
|
struct blk_mq_hw_ctx *hctx)
|
|
{
|
|
if (!hctx)
|
|
return &bt->ws[0];
|
|
return sbq_wait_ptr(bt, &hctx->wait_index);
|
|
}
|
|
|
|
enum {
|
|
BLK_MQ_NO_TAG = -1U,
|
|
BLK_MQ_TAG_MIN = 1,
|
|
BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
|
|
};
|
|
|
|
extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
|
|
extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
|
|
|
|
static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
|
|
{
|
|
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
|
|
return false;
|
|
|
|
return __blk_mq_tag_busy(hctx);
|
|
}
|
|
|
|
static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
|
|
{
|
|
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
|
|
return;
|
|
|
|
__blk_mq_tag_idle(hctx);
|
|
}
|
|
|
|
/*
|
|
* For shared tag users, we track the number of currently active users
|
|
* and attempt to provide a fair share of the tag depth for each of them.
|
|
*/
|
|
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
|
|
struct sbitmap_queue *bt)
|
|
{
|
|
unsigned int depth, users;
|
|
|
|
if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
|
|
return true;
|
|
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
|
|
return true;
|
|
|
|
/*
|
|
* Don't try dividing an ant
|
|
*/
|
|
if (bt->sb.depth == 1)
|
|
return true;
|
|
|
|
users = atomic_read(&hctx->tags->active_queues);
|
|
if (!users)
|
|
return true;
|
|
|
|
/*
|
|
* Allow at least some tags
|
|
*/
|
|
depth = max((bt->sb.depth + users - 1) / users, 4U);
|
|
return atomic_read(&hctx->nr_active) < depth;
|
|
}
|
|
|
|
static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
|
|
unsigned int tag)
|
|
{
|
|
return tag < tags->nr_reserved_tags;
|
|
}
|
|
|
|
#endif
|