mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
blk-throttle: set default latency baseline for harddisk
hard disk IO latency varies a lot depending on spindle move. The latency range could be from several microseconds to several milliseconds. It's pretty hard to get the baseline latency used by io.low. We will use a different stragety here. The idea is only using IO with spindle move to determine if cgroup IO is in good state. For HD, if io latency is small (< 1ms), we ignore the IO. Such IO is likely from sequential IO, and is helpless to help determine if a cgroup's IO is impacted by other cgroups. With this, we only account IO with big latency. Then we can choose a hardcoded baseline latency for HD (4ms, which is typical IO latency with seek). With all these settings, the io.low latency works for both HD and SSD. Signed-off-by: Shaohua Li <shli@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
a41b816c17
commit
6679a90c4b
@ -27,6 +27,13 @@ static int throtl_quantum = 32;
|
|||||||
#define MIN_THROTL_IOPS (10)
|
#define MIN_THROTL_IOPS (10)
|
||||||
#define DFL_LATENCY_TARGET (-1L)
|
#define DFL_LATENCY_TARGET (-1L)
|
||||||
#define DFL_IDLE_THRESHOLD (0)
|
#define DFL_IDLE_THRESHOLD (0)
|
||||||
|
#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
|
||||||
|
#define LATENCY_FILTERED_SSD (0)
|
||||||
|
/*
|
||||||
|
* For HD, very small latency comes from sequential IO. Such IO is helpless to
|
||||||
|
* help determine if its IO is impacted by others, hence we ignore the IO
|
||||||
|
*/
|
||||||
|
#define LATENCY_FILTERED_HD (1000L) /* 1ms */
|
||||||
|
|
||||||
#define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
|
#define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
|
||||||
|
|
||||||
@ -212,6 +219,7 @@ struct throtl_data
|
|||||||
struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
|
struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
|
||||||
struct latency_bucket __percpu *latency_buckets;
|
struct latency_bucket __percpu *latency_buckets;
|
||||||
unsigned long last_calculate_time;
|
unsigned long last_calculate_time;
|
||||||
|
unsigned long filtered_latency;
|
||||||
|
|
||||||
bool track_bio_latency;
|
bool track_bio_latency;
|
||||||
};
|
};
|
||||||
@ -2281,7 +2289,7 @@ void blk_throtl_bio_endio(struct bio *bio)
|
|||||||
throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
|
throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
|
||||||
bio_op(bio), lat);
|
bio_op(bio), lat);
|
||||||
|
|
||||||
if (tg->latency_target) {
|
if (tg->latency_target && lat >= tg->td->filtered_latency) {
|
||||||
int bucket;
|
int bucket;
|
||||||
unsigned int threshold;
|
unsigned int threshold;
|
||||||
|
|
||||||
@ -2417,14 +2425,20 @@ void blk_throtl_exit(struct request_queue *q)
|
|||||||
void blk_throtl_register_queue(struct request_queue *q)
|
void blk_throtl_register_queue(struct request_queue *q)
|
||||||
{
|
{
|
||||||
struct throtl_data *td;
|
struct throtl_data *td;
|
||||||
|
int i;
|
||||||
|
|
||||||
td = q->td;
|
td = q->td;
|
||||||
BUG_ON(!td);
|
BUG_ON(!td);
|
||||||
|
|
||||||
if (blk_queue_nonrot(q))
|
if (blk_queue_nonrot(q)) {
|
||||||
td->throtl_slice = DFL_THROTL_SLICE_SSD;
|
td->throtl_slice = DFL_THROTL_SLICE_SSD;
|
||||||
else
|
td->filtered_latency = LATENCY_FILTERED_SSD;
|
||||||
|
} else {
|
||||||
td->throtl_slice = DFL_THROTL_SLICE_HD;
|
td->throtl_slice = DFL_THROTL_SLICE_HD;
|
||||||
|
td->filtered_latency = LATENCY_FILTERED_HD;
|
||||||
|
for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
|
||||||
|
td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY;
|
||||||
|
}
|
||||||
#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
|
#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
|
||||||
/* if no low limit, use previous default */
|
/* if no low limit, use previous default */
|
||||||
td->throtl_slice = DFL_THROTL_SLICE_HD;
|
td->throtl_slice = DFL_THROTL_SLICE_HD;
|
||||||
|
Loading…
Reference in New Issue
Block a user