forked from Minki/linux
blk-mq: Relocate hctx_may_queue()
blk-mq.h and blk-mq-tag.h include on each other, which is less than ideal. Locate hctx_may_queue() to blk-mq.h, as it is not really tag specific code. In this way, we can drop the blk-mq-tag.h include of blk-mq.h Signed-off-by: John Garry <john.garry@huawei.com> Tested-by: Douglas Gilbert <dgilbert@interlog.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
32bc15afed
commit
a0235d230f
@ -2,8 +2,6 @@
|
||||
#ifndef INT_BLK_MQ_TAG_H
|
||||
#define INT_BLK_MQ_TAG_H
|
||||
|
||||
#include "blk-mq.h"
|
||||
|
||||
/*
|
||||
* Tag address space map.
|
||||
*/
|
||||
@ -81,37 +79,6 @@ static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
|
||||
__blk_mq_tag_idle(hctx);
|
||||
}
|
||||
|
||||
/*
|
||||
* For shared tag users, we track the number of currently active users
|
||||
* and attempt to provide a fair share of the tag depth for each of them.
|
||||
*/
|
||||
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
|
||||
struct sbitmap_queue *bt)
|
||||
{
|
||||
unsigned int depth, users;
|
||||
|
||||
if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
|
||||
return true;
|
||||
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Don't try dividing an ant
|
||||
*/
|
||||
if (bt->sb.depth == 1)
|
||||
return true;
|
||||
|
||||
users = atomic_read(&hctx->tags->active_queues);
|
||||
if (!users)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Allow at least some tags
|
||||
*/
|
||||
depth = max((bt->sb.depth + users - 1) / users, 4U);
|
||||
return atomic_read(&hctx->nr_active) < depth;
|
||||
}
|
||||
|
||||
static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
|
||||
unsigned int tag)
|
||||
{
|
||||
|
@ -259,4 +259,36 @@ static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* For shared tag users, we track the number of currently active users
|
||||
* and attempt to provide a fair share of the tag depth for each of them.
|
||||
*/
|
||||
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
|
||||
struct sbitmap_queue *bt)
|
||||
{
|
||||
unsigned int depth, users;
|
||||
|
||||
if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
|
||||
return true;
|
||||
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Don't try dividing an ant
|
||||
*/
|
||||
if (bt->sb.depth == 1)
|
||||
return true;
|
||||
|
||||
users = atomic_read(&hctx->tags->active_queues);
|
||||
if (!users)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Allow at least some tags
|
||||
*/
|
||||
depth = max((bt->sb.depth + users - 1) / users, 4U);
|
||||
return atomic_read(&hctx->nr_active) < depth;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user