forked from Minki/linux
Merge branch 'writeback-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/linux
* 'writeback-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/linux: writeback: Add a 'reason' to wb_writeback_work writeback: send work item to queue_io, move_expired_inodes writeback: trace event balance_dirty_pages writeback: trace event bdi_dirty_ratelimit writeback: fix ppc compile warnings on do_div(long long, unsigned long) writeback: per-bdi background threshold writeback: dirty position control - bdi reserve area writeback: control dirty pause time writeback: limit max dirty pause time writeback: IO-less balance_dirty_pages() writeback: per task dirty rate limit writeback: stabilize bdi->dirty_ratelimit writeback: dirty rate control writeback: add bg_threshold parameter to __bdi_update_bandwidth() writeback: dirty position control writeback: account per-bdi accumulated dirtied pages
This commit is contained in:
commit
208bca0860
@ -3340,7 +3340,8 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
|
||||
smp_mb();
|
||||
nr_pages = min_t(unsigned long, nr_pages,
|
||||
root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
|
||||
writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
|
||||
writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
|
||||
WB_REASON_FS_FREE_SPACE);
|
||||
|
||||
spin_lock(&space_info->lock);
|
||||
if (reserved > space_info->bytes_reserved)
|
||||
|
@ -288,7 +288,7 @@ static void free_more_memory(void)
|
||||
struct zone *zone;
|
||||
int nid;
|
||||
|
||||
wakeup_flusher_threads(1024);
|
||||
wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
|
||||
yield();
|
||||
|
||||
for_each_online_node(nid) {
|
||||
|
@ -2372,7 +2372,7 @@ static int ext4_nonda_switch(struct super_block *sb)
|
||||
* start pushing delalloc when 1/2 of free blocks are dirty.
|
||||
*/
|
||||
if (free_blocks < 2 * dirty_blocks)
|
||||
writeback_inodes_sb_if_idle(sb);
|
||||
writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -41,11 +41,23 @@ struct wb_writeback_work {
|
||||
unsigned int for_kupdate:1;
|
||||
unsigned int range_cyclic:1;
|
||||
unsigned int for_background:1;
|
||||
enum wb_reason reason; /* why was writeback initiated? */
|
||||
|
||||
struct list_head list; /* pending work list */
|
||||
struct completion *done; /* set if the caller waits */
|
||||
};
|
||||
|
||||
const char *wb_reason_name[] = {
|
||||
[WB_REASON_BACKGROUND] = "background",
|
||||
[WB_REASON_TRY_TO_FREE_PAGES] = "try_to_free_pages",
|
||||
[WB_REASON_SYNC] = "sync",
|
||||
[WB_REASON_PERIODIC] = "periodic",
|
||||
[WB_REASON_LAPTOP_TIMER] = "laptop_timer",
|
||||
[WB_REASON_FREE_MORE_MEM] = "free_more_memory",
|
||||
[WB_REASON_FS_FREE_SPACE] = "fs_free_space",
|
||||
[WB_REASON_FORKER_THREAD] = "forker_thread"
|
||||
};
|
||||
|
||||
/*
|
||||
* Include the creation of the trace points after defining the
|
||||
* wb_writeback_work structure so that the definition remains local to this
|
||||
@ -115,7 +127,7 @@ static void bdi_queue_work(struct backing_dev_info *bdi,
|
||||
|
||||
static void
|
||||
__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
|
||||
bool range_cyclic)
|
||||
bool range_cyclic, enum wb_reason reason)
|
||||
{
|
||||
struct wb_writeback_work *work;
|
||||
|
||||
@ -135,6 +147,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
|
||||
work->sync_mode = WB_SYNC_NONE;
|
||||
work->nr_pages = nr_pages;
|
||||
work->range_cyclic = range_cyclic;
|
||||
work->reason = reason;
|
||||
|
||||
bdi_queue_work(bdi, work);
|
||||
}
|
||||
@ -150,9 +163,10 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
|
||||
* completion. Caller need not hold sb s_umount semaphore.
|
||||
*
|
||||
*/
|
||||
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
|
||||
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
|
||||
enum wb_reason reason)
|
||||
{
|
||||
__bdi_start_writeback(bdi, nr_pages, true);
|
||||
__bdi_start_writeback(bdi, nr_pages, true, reason);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -251,7 +265,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
|
||||
*/
|
||||
static int move_expired_inodes(struct list_head *delaying_queue,
|
||||
struct list_head *dispatch_queue,
|
||||
unsigned long *older_than_this)
|
||||
struct wb_writeback_work *work)
|
||||
{
|
||||
LIST_HEAD(tmp);
|
||||
struct list_head *pos, *node;
|
||||
@ -262,8 +276,8 @@ static int move_expired_inodes(struct list_head *delaying_queue,
|
||||
|
||||
while (!list_empty(delaying_queue)) {
|
||||
inode = wb_inode(delaying_queue->prev);
|
||||
if (older_than_this &&
|
||||
inode_dirtied_after(inode, *older_than_this))
|
||||
if (work->older_than_this &&
|
||||
inode_dirtied_after(inode, *work->older_than_this))
|
||||
break;
|
||||
if (sb && sb != inode->i_sb)
|
||||
do_sb_sort = 1;
|
||||
@ -302,13 +316,13 @@ out:
|
||||
* |
|
||||
* +--> dequeue for IO
|
||||
*/
|
||||
static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
|
||||
static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
|
||||
{
|
||||
int moved;
|
||||
assert_spin_locked(&wb->list_lock);
|
||||
list_splice_init(&wb->b_more_io, &wb->b_io);
|
||||
moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
|
||||
trace_writeback_queue_io(wb, older_than_this, moved);
|
||||
moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
|
||||
trace_writeback_queue_io(wb, work, moved);
|
||||
}
|
||||
|
||||
static int write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||
@ -641,31 +655,40 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
|
||||
return wrote;
|
||||
}
|
||||
|
||||
long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages)
|
||||
long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
|
||||
enum wb_reason reason)
|
||||
{
|
||||
struct wb_writeback_work work = {
|
||||
.nr_pages = nr_pages,
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
.range_cyclic = 1,
|
||||
.reason = reason,
|
||||
};
|
||||
|
||||
spin_lock(&wb->list_lock);
|
||||
if (list_empty(&wb->b_io))
|
||||
queue_io(wb, NULL);
|
||||
queue_io(wb, &work);
|
||||
__writeback_inodes_wb(wb, &work);
|
||||
spin_unlock(&wb->list_lock);
|
||||
|
||||
return nr_pages - work.nr_pages;
|
||||
}
|
||||
|
||||
static inline bool over_bground_thresh(void)
|
||||
static bool over_bground_thresh(struct backing_dev_info *bdi)
|
||||
{
|
||||
unsigned long background_thresh, dirty_thresh;
|
||||
|
||||
global_dirty_limits(&background_thresh, &dirty_thresh);
|
||||
|
||||
return (global_page_state(NR_FILE_DIRTY) +
|
||||
global_page_state(NR_UNSTABLE_NFS) > background_thresh);
|
||||
if (global_page_state(NR_FILE_DIRTY) +
|
||||
global_page_state(NR_UNSTABLE_NFS) > background_thresh)
|
||||
return true;
|
||||
|
||||
if (bdi_stat(bdi, BDI_RECLAIMABLE) >
|
||||
bdi_dirty_limit(bdi, background_thresh))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -675,7 +698,7 @@ static inline bool over_bground_thresh(void)
|
||||
static void wb_update_bandwidth(struct bdi_writeback *wb,
|
||||
unsigned long start_time)
|
||||
{
|
||||
__bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, start_time);
|
||||
__bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -727,7 +750,7 @@ static long wb_writeback(struct bdi_writeback *wb,
|
||||
* For background writeout, stop when we are below the
|
||||
* background dirty threshold
|
||||
*/
|
||||
if (work->for_background && !over_bground_thresh())
|
||||
if (work->for_background && !over_bground_thresh(wb->bdi))
|
||||
break;
|
||||
|
||||
if (work->for_kupdate) {
|
||||
@ -738,7 +761,7 @@ static long wb_writeback(struct bdi_writeback *wb,
|
||||
|
||||
trace_writeback_start(wb->bdi, work);
|
||||
if (list_empty(&wb->b_io))
|
||||
queue_io(wb, work->older_than_this);
|
||||
queue_io(wb, work);
|
||||
if (work->sb)
|
||||
progress = writeback_sb_inodes(work->sb, wb, work);
|
||||
else
|
||||
@ -811,13 +834,14 @@ static unsigned long get_nr_dirty_pages(void)
|
||||
|
||||
static long wb_check_background_flush(struct bdi_writeback *wb)
|
||||
{
|
||||
if (over_bground_thresh()) {
|
||||
if (over_bground_thresh(wb->bdi)) {
|
||||
|
||||
struct wb_writeback_work work = {
|
||||
.nr_pages = LONG_MAX,
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
.for_background = 1,
|
||||
.range_cyclic = 1,
|
||||
.reason = WB_REASON_BACKGROUND,
|
||||
};
|
||||
|
||||
return wb_writeback(wb, &work);
|
||||
@ -851,6 +875,7 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb)
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
.for_kupdate = 1,
|
||||
.range_cyclic = 1,
|
||||
.reason = WB_REASON_PERIODIC,
|
||||
};
|
||||
|
||||
return wb_writeback(wb, &work);
|
||||
@ -969,7 +994,7 @@ int bdi_writeback_thread(void *data)
|
||||
* Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
|
||||
* the whole world.
|
||||
*/
|
||||
void wakeup_flusher_threads(long nr_pages)
|
||||
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
|
||||
{
|
||||
struct backing_dev_info *bdi;
|
||||
|
||||
@ -982,7 +1007,7 @@ void wakeup_flusher_threads(long nr_pages)
|
||||
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
|
||||
if (!bdi_has_dirty_io(bdi))
|
||||
continue;
|
||||
__bdi_start_writeback(bdi, nr_pages, false);
|
||||
__bdi_start_writeback(bdi, nr_pages, false, reason);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
@ -1203,7 +1228,9 @@ static void wait_sb_inodes(struct super_block *sb)
|
||||
* on how many (if any) will be written, and this function does not wait
|
||||
* for IO completion of submitted IO.
|
||||
*/
|
||||
void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr)
|
||||
void writeback_inodes_sb_nr(struct super_block *sb,
|
||||
unsigned long nr,
|
||||
enum wb_reason reason)
|
||||
{
|
||||
DECLARE_COMPLETION_ONSTACK(done);
|
||||
struct wb_writeback_work work = {
|
||||
@ -1212,6 +1239,7 @@ void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr)
|
||||
.tagged_writepages = 1,
|
||||
.done = &done,
|
||||
.nr_pages = nr,
|
||||
.reason = reason,
|
||||
};
|
||||
|
||||
WARN_ON(!rwsem_is_locked(&sb->s_umount));
|
||||
@ -1228,9 +1256,9 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr);
|
||||
* on how many (if any) will be written, and this function does not wait
|
||||
* for IO completion of submitted IO.
|
||||
*/
|
||||
void writeback_inodes_sb(struct super_block *sb)
|
||||
void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
|
||||
{
|
||||
return writeback_inodes_sb_nr(sb, get_nr_dirty_pages());
|
||||
return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
|
||||
}
|
||||
EXPORT_SYMBOL(writeback_inodes_sb);
|
||||
|
||||
@ -1241,11 +1269,11 @@ EXPORT_SYMBOL(writeback_inodes_sb);
|
||||
* Invoke writeback_inodes_sb if no writeback is currently underway.
|
||||
* Returns 1 if writeback was started, 0 if not.
|
||||
*/
|
||||
int writeback_inodes_sb_if_idle(struct super_block *sb)
|
||||
int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason)
|
||||
{
|
||||
if (!writeback_in_progress(sb->s_bdi)) {
|
||||
down_read(&sb->s_umount);
|
||||
writeback_inodes_sb(sb);
|
||||
writeback_inodes_sb(sb, reason);
|
||||
up_read(&sb->s_umount);
|
||||
return 1;
|
||||
} else
|
||||
@ -1262,11 +1290,12 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
|
||||
* Returns 1 if writeback was started, 0 if not.
|
||||
*/
|
||||
int writeback_inodes_sb_nr_if_idle(struct super_block *sb,
|
||||
unsigned long nr)
|
||||
unsigned long nr,
|
||||
enum wb_reason reason)
|
||||
{
|
||||
if (!writeback_in_progress(sb->s_bdi)) {
|
||||
down_read(&sb->s_umount);
|
||||
writeback_inodes_sb_nr(sb, nr);
|
||||
writeback_inodes_sb_nr(sb, nr, reason);
|
||||
up_read(&sb->s_umount);
|
||||
return 1;
|
||||
} else
|
||||
@ -1290,6 +1319,7 @@ void sync_inodes_sb(struct super_block *sb)
|
||||
.nr_pages = LONG_MAX,
|
||||
.range_cyclic = 0,
|
||||
.done = &done,
|
||||
.reason = WB_REASON_SYNC,
|
||||
};
|
||||
|
||||
WARN_ON(!rwsem_is_locked(&sb->s_umount));
|
||||
|
@ -286,7 +286,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
|
||||
/* caller already holds s_umount */
|
||||
if (sb->s_flags & MS_RDONLY)
|
||||
return -EROFS;
|
||||
writeback_inodes_sb(sb);
|
||||
writeback_inodes_sb(sb, WB_REASON_SYNC);
|
||||
return 0;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -43,7 +43,7 @@ static int __sync_filesystem(struct super_block *sb, int wait)
|
||||
if (wait)
|
||||
sync_inodes_sb(sb);
|
||||
else
|
||||
writeback_inodes_sb(sb);
|
||||
writeback_inodes_sb(sb, WB_REASON_SYNC);
|
||||
|
||||
if (sb->s_op->sync_fs)
|
||||
sb->s_op->sync_fs(sb, wait);
|
||||
@ -98,7 +98,7 @@ static void sync_filesystems(int wait)
|
||||
*/
|
||||
SYSCALL_DEFINE0(sync)
|
||||
{
|
||||
wakeup_flusher_threads(0);
|
||||
wakeup_flusher_threads(0, WB_REASON_SYNC);
|
||||
sync_filesystems(0);
|
||||
sync_filesystems(1);
|
||||
if (unlikely(laptop_mode))
|
||||
|
@ -63,7 +63,7 @@
|
||||
static void shrink_liability(struct ubifs_info *c, int nr_to_write)
|
||||
{
|
||||
down_read(&c->vfs_sb->s_umount);
|
||||
writeback_inodes_sb(c->vfs_sb);
|
||||
writeback_inodes_sb(c->vfs_sb, WB_REASON_FS_FREE_SPACE);
|
||||
up_read(&c->vfs_sb->s_umount);
|
||||
}
|
||||
|
||||
|
@ -40,6 +40,7 @@ typedef int (congested_fn)(void *, int);
|
||||
enum bdi_stat_item {
|
||||
BDI_RECLAIMABLE,
|
||||
BDI_WRITEBACK,
|
||||
BDI_DIRTIED,
|
||||
BDI_WRITTEN,
|
||||
NR_BDI_STAT_ITEMS
|
||||
};
|
||||
@ -74,10 +75,20 @@ struct backing_dev_info {
|
||||
struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
|
||||
|
||||
unsigned long bw_time_stamp; /* last time write bw is updated */
|
||||
unsigned long dirtied_stamp;
|
||||
unsigned long written_stamp; /* pages written at bw_time_stamp */
|
||||
unsigned long write_bandwidth; /* the estimated write bandwidth */
|
||||
unsigned long avg_write_bandwidth; /* further smoothed write bw */
|
||||
|
||||
/*
|
||||
* The base dirty throttle rate, re-calculated on every 200ms.
|
||||
* All the bdi tasks' dirty rate will be curbed under it.
|
||||
* @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
|
||||
* in small steps and is much more smooth/stable than the latter.
|
||||
*/
|
||||
unsigned long dirty_ratelimit;
|
||||
unsigned long balanced_dirty_ratelimit;
|
||||
|
||||
struct prop_local_percpu completions;
|
||||
int dirty_exceeded;
|
||||
|
||||
@ -107,7 +118,8 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
|
||||
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
|
||||
void bdi_unregister(struct backing_dev_info *bdi);
|
||||
int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
|
||||
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages);
|
||||
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
|
||||
enum wb_reason reason);
|
||||
void bdi_start_background_writeback(struct backing_dev_info *bdi);
|
||||
int bdi_writeback_thread(void *data);
|
||||
int bdi_has_dirty_io(struct backing_dev_info *bdi);
|
||||
|
@ -1522,6 +1522,13 @@ struct task_struct {
|
||||
int make_it_fail;
|
||||
#endif
|
||||
struct prop_local_single dirties;
|
||||
/*
|
||||
* when (nr_dirtied >= nr_dirtied_pause), it's time to call
|
||||
* balance_dirty_pages() for some dirty throttling pause
|
||||
*/
|
||||
int nr_dirtied;
|
||||
int nr_dirtied_pause;
|
||||
|
||||
#ifdef CONFIG_LATENCYTOP
|
||||
int latency_record_count;
|
||||
struct latency_record latency_record[LT_SAVECOUNT];
|
||||
|
@ -38,6 +38,23 @@ enum writeback_sync_modes {
|
||||
WB_SYNC_ALL, /* Wait on every mapping */
|
||||
};
|
||||
|
||||
/*
|
||||
* why some writeback work was initiated
|
||||
*/
|
||||
enum wb_reason {
|
||||
WB_REASON_BACKGROUND,
|
||||
WB_REASON_TRY_TO_FREE_PAGES,
|
||||
WB_REASON_SYNC,
|
||||
WB_REASON_PERIODIC,
|
||||
WB_REASON_LAPTOP_TIMER,
|
||||
WB_REASON_FREE_MORE_MEM,
|
||||
WB_REASON_FS_FREE_SPACE,
|
||||
WB_REASON_FORKER_THREAD,
|
||||
|
||||
WB_REASON_MAX,
|
||||
};
|
||||
extern const char *wb_reason_name[];
|
||||
|
||||
/*
|
||||
* A control structure which tells the writeback code what to do. These are
|
||||
* always on the stack, and hence need no locking. They are always initialised
|
||||
@ -69,14 +86,17 @@ struct writeback_control {
|
||||
*/
|
||||
struct bdi_writeback;
|
||||
int inode_wait(void *);
|
||||
void writeback_inodes_sb(struct super_block *);
|
||||
void writeback_inodes_sb_nr(struct super_block *, unsigned long nr);
|
||||
int writeback_inodes_sb_if_idle(struct super_block *);
|
||||
int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr);
|
||||
void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
|
||||
void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
|
||||
enum wb_reason reason);
|
||||
int writeback_inodes_sb_if_idle(struct super_block *, enum wb_reason reason);
|
||||
int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr,
|
||||
enum wb_reason reason);
|
||||
void sync_inodes_sb(struct super_block *);
|
||||
long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages);
|
||||
long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
|
||||
enum wb_reason reason);
|
||||
long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
|
||||
void wakeup_flusher_threads(long nr_pages);
|
||||
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
|
||||
|
||||
/* writeback.h requires fs.h; it, too, is not included from here. */
|
||||
static inline void wait_on_inode(struct inode *inode)
|
||||
@ -143,6 +163,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
|
||||
|
||||
void __bdi_update_bandwidth(struct backing_dev_info *bdi,
|
||||
unsigned long thresh,
|
||||
unsigned long bg_thresh,
|
||||
unsigned long dirty,
|
||||
unsigned long bdi_thresh,
|
||||
unsigned long bdi_dirty,
|
||||
|
@ -34,6 +34,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
|
||||
__field(int, for_kupdate)
|
||||
__field(int, range_cyclic)
|
||||
__field(int, for_background)
|
||||
__field(int, reason)
|
||||
),
|
||||
TP_fast_assign(
|
||||
strncpy(__entry->name, dev_name(bdi->dev), 32);
|
||||
@ -43,16 +44,18 @@ DECLARE_EVENT_CLASS(writeback_work_class,
|
||||
__entry->for_kupdate = work->for_kupdate;
|
||||
__entry->range_cyclic = work->range_cyclic;
|
||||
__entry->for_background = work->for_background;
|
||||
__entry->reason = work->reason;
|
||||
),
|
||||
TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
|
||||
"kupdate=%d range_cyclic=%d background=%d",
|
||||
"kupdate=%d range_cyclic=%d background=%d reason=%s",
|
||||
__entry->name,
|
||||
MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
|
||||
__entry->nr_pages,
|
||||
__entry->sync_mode,
|
||||
__entry->for_kupdate,
|
||||
__entry->range_cyclic,
|
||||
__entry->for_background
|
||||
__entry->for_background,
|
||||
wb_reason_name[__entry->reason]
|
||||
)
|
||||
);
|
||||
#define DEFINE_WRITEBACK_WORK_EVENT(name) \
|
||||
@ -104,30 +107,6 @@ DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
|
||||
DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
|
||||
DEFINE_WRITEBACK_EVENT(writeback_thread_start);
|
||||
DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
|
||||
DEFINE_WRITEBACK_EVENT(balance_dirty_start);
|
||||
DEFINE_WRITEBACK_EVENT(balance_dirty_wait);
|
||||
|
||||
TRACE_EVENT(balance_dirty_written,
|
||||
|
||||
TP_PROTO(struct backing_dev_info *bdi, int written),
|
||||
|
||||
TP_ARGS(bdi, written),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, name, 32)
|
||||
__field(int, written)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
strncpy(__entry->name, dev_name(bdi->dev), 32);
|
||||
__entry->written = written;
|
||||
),
|
||||
|
||||
TP_printk("bdi %s written %d",
|
||||
__entry->name,
|
||||
__entry->written
|
||||
)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(wbc_class,
|
||||
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
|
||||
@ -181,27 +160,31 @@ DEFINE_WBC_EVENT(wbc_writepage);
|
||||
|
||||
TRACE_EVENT(writeback_queue_io,
|
||||
TP_PROTO(struct bdi_writeback *wb,
|
||||
unsigned long *older_than_this,
|
||||
struct wb_writeback_work *work,
|
||||
int moved),
|
||||
TP_ARGS(wb, older_than_this, moved),
|
||||
TP_ARGS(wb, work, moved),
|
||||
TP_STRUCT__entry(
|
||||
__array(char, name, 32)
|
||||
__field(unsigned long, older)
|
||||
__field(long, age)
|
||||
__field(int, moved)
|
||||
__field(int, reason)
|
||||
),
|
||||
TP_fast_assign(
|
||||
unsigned long *older_than_this = work->older_than_this;
|
||||
strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
|
||||
__entry->older = older_than_this ? *older_than_this : 0;
|
||||
__entry->age = older_than_this ?
|
||||
(jiffies - *older_than_this) * 1000 / HZ : -1;
|
||||
__entry->moved = moved;
|
||||
__entry->reason = work->reason;
|
||||
),
|
||||
TP_printk("bdi %s: older=%lu age=%ld enqueue=%d",
|
||||
TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
|
||||
__entry->name,
|
||||
__entry->older, /* older_than_this in jiffies */
|
||||
__entry->age, /* older_than_this in relative milliseconds */
|
||||
__entry->moved)
|
||||
__entry->moved,
|
||||
wb_reason_name[__entry->reason])
|
||||
);
|
||||
|
||||
TRACE_EVENT(global_dirty_state,
|
||||
@ -250,6 +233,124 @@ TRACE_EVENT(global_dirty_state,
|
||||
)
|
||||
);
|
||||
|
||||
#define KBps(x) ((x) << (PAGE_SHIFT - 10))
|
||||
|
||||
TRACE_EVENT(bdi_dirty_ratelimit,
|
||||
|
||||
TP_PROTO(struct backing_dev_info *bdi,
|
||||
unsigned long dirty_rate,
|
||||
unsigned long task_ratelimit),
|
||||
|
||||
TP_ARGS(bdi, dirty_rate, task_ratelimit),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, bdi, 32)
|
||||
__field(unsigned long, write_bw)
|
||||
__field(unsigned long, avg_write_bw)
|
||||
__field(unsigned long, dirty_rate)
|
||||
__field(unsigned long, dirty_ratelimit)
|
||||
__field(unsigned long, task_ratelimit)
|
||||
__field(unsigned long, balanced_dirty_ratelimit)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
|
||||
__entry->write_bw = KBps(bdi->write_bandwidth);
|
||||
__entry->avg_write_bw = KBps(bdi->avg_write_bandwidth);
|
||||
__entry->dirty_rate = KBps(dirty_rate);
|
||||
__entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit);
|
||||
__entry->task_ratelimit = KBps(task_ratelimit);
|
||||
__entry->balanced_dirty_ratelimit =
|
||||
KBps(bdi->balanced_dirty_ratelimit);
|
||||
),
|
||||
|
||||
TP_printk("bdi %s: "
|
||||
"write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
|
||||
"dirty_ratelimit=%lu task_ratelimit=%lu "
|
||||
"balanced_dirty_ratelimit=%lu",
|
||||
__entry->bdi,
|
||||
__entry->write_bw, /* write bandwidth */
|
||||
__entry->avg_write_bw, /* avg write bandwidth */
|
||||
__entry->dirty_rate, /* bdi dirty rate */
|
||||
__entry->dirty_ratelimit, /* base ratelimit */
|
||||
__entry->task_ratelimit, /* ratelimit with position control */
|
||||
__entry->balanced_dirty_ratelimit /* the balanced ratelimit */
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(balance_dirty_pages,
|
||||
|
||||
TP_PROTO(struct backing_dev_info *bdi,
|
||||
unsigned long thresh,
|
||||
unsigned long bg_thresh,
|
||||
unsigned long dirty,
|
||||
unsigned long bdi_thresh,
|
||||
unsigned long bdi_dirty,
|
||||
unsigned long dirty_ratelimit,
|
||||
unsigned long task_ratelimit,
|
||||
unsigned long dirtied,
|
||||
long pause,
|
||||
unsigned long start_time),
|
||||
|
||||
TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
|
||||
dirty_ratelimit, task_ratelimit,
|
||||
dirtied, pause, start_time),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array( char, bdi, 32)
|
||||
__field(unsigned long, limit)
|
||||
__field(unsigned long, setpoint)
|
||||
__field(unsigned long, dirty)
|
||||
__field(unsigned long, bdi_setpoint)
|
||||
__field(unsigned long, bdi_dirty)
|
||||
__field(unsigned long, dirty_ratelimit)
|
||||
__field(unsigned long, task_ratelimit)
|
||||
__field(unsigned int, dirtied)
|
||||
__field(unsigned int, dirtied_pause)
|
||||
__field(unsigned long, paused)
|
||||
__field( long, pause)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
unsigned long freerun = (thresh + bg_thresh) / 2;
|
||||
strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
|
||||
|
||||
__entry->limit = global_dirty_limit;
|
||||
__entry->setpoint = (global_dirty_limit + freerun) / 2;
|
||||
__entry->dirty = dirty;
|
||||
__entry->bdi_setpoint = __entry->setpoint *
|
||||
bdi_thresh / (thresh + 1);
|
||||
__entry->bdi_dirty = bdi_dirty;
|
||||
__entry->dirty_ratelimit = KBps(dirty_ratelimit);
|
||||
__entry->task_ratelimit = KBps(task_ratelimit);
|
||||
__entry->dirtied = dirtied;
|
||||
__entry->dirtied_pause = current->nr_dirtied_pause;
|
||||
__entry->pause = pause * 1000 / HZ;
|
||||
__entry->paused = (jiffies - start_time) * 1000 / HZ;
|
||||
),
|
||||
|
||||
|
||||
TP_printk("bdi %s: "
|
||||
"limit=%lu setpoint=%lu dirty=%lu "
|
||||
"bdi_setpoint=%lu bdi_dirty=%lu "
|
||||
"dirty_ratelimit=%lu task_ratelimit=%lu "
|
||||
"dirtied=%u dirtied_pause=%u "
|
||||
"paused=%lu pause=%ld",
|
||||
__entry->bdi,
|
||||
__entry->limit,
|
||||
__entry->setpoint,
|
||||
__entry->dirty,
|
||||
__entry->bdi_setpoint,
|
||||
__entry->bdi_dirty,
|
||||
__entry->dirty_ratelimit,
|
||||
__entry->task_ratelimit,
|
||||
__entry->dirtied,
|
||||
__entry->dirtied_pause,
|
||||
__entry->paused, /* ms */
|
||||
__entry->pause /* ms */
|
||||
)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(writeback_congest_waited_template,
|
||||
|
||||
TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
|
||||
|
@ -1299,6 +1299,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
p->pdeath_signal = 0;
|
||||
p->exit_state = 0;
|
||||
|
||||
p->nr_dirtied = 0;
|
||||
p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
|
||||
|
||||
/*
|
||||
* Ok, make it visible to the rest of the system.
|
||||
* We dont wake it up yet.
|
||||
|
@ -97,6 +97,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
|
||||
"BdiDirtyThresh: %10lu kB\n"
|
||||
"DirtyThresh: %10lu kB\n"
|
||||
"BackgroundThresh: %10lu kB\n"
|
||||
"BdiDirtied: %10lu kB\n"
|
||||
"BdiWritten: %10lu kB\n"
|
||||
"BdiWriteBandwidth: %10lu kBps\n"
|
||||
"b_dirty: %10lu\n"
|
||||
@ -109,6 +110,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
|
||||
K(bdi_thresh),
|
||||
K(dirty_thresh),
|
||||
K(background_thresh),
|
||||
(unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)),
|
||||
(unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)),
|
||||
(unsigned long) K(bdi->write_bandwidth),
|
||||
nr_dirty,
|
||||
@ -473,7 +475,8 @@ static int bdi_forker_thread(void *ptr)
|
||||
* the bdi from the thread. Hopefully 1024 is
|
||||
* large enough for efficient IO.
|
||||
*/
|
||||
writeback_inodes_wb(&bdi->wb, 1024);
|
||||
writeback_inodes_wb(&bdi->wb, 1024,
|
||||
WB_REASON_FORKER_THREAD);
|
||||
} else {
|
||||
/*
|
||||
* The spinlock makes sure we do not lose
|
||||
@ -683,6 +686,8 @@ int bdi_init(struct backing_dev_info *bdi)
|
||||
bdi->bw_time_stamp = jiffies;
|
||||
bdi->written_stamp = 0;
|
||||
|
||||
bdi->balanced_dirty_ratelimit = INIT_BW;
|
||||
bdi->dirty_ratelimit = INIT_BW;
|
||||
bdi->write_bandwidth = INIT_BW;
|
||||
bdi->avg_write_bandwidth = INIT_BW;
|
||||
|
||||
|
@ -46,26 +46,14 @@
|
||||
*/
|
||||
#define BANDWIDTH_INTERVAL max(HZ/5, 1)
|
||||
|
||||
#define RATELIMIT_CALC_SHIFT 10
|
||||
|
||||
/*
|
||||
* After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
|
||||
* will look to see if it needs to force writeback or throttling.
|
||||
*/
|
||||
static long ratelimit_pages = 32;
|
||||
|
||||
/*
|
||||
* When balance_dirty_pages decides that the caller needs to perform some
|
||||
* non-background writeback, this is how many pages it will attempt to write.
|
||||
* It should be somewhat larger than dirtied pages to ensure that reasonably
|
||||
* large amounts of I/O are submitted.
|
||||
*/
|
||||
static inline long sync_writeback_pages(unsigned long dirtied)
|
||||
{
|
||||
if (dirtied < ratelimit_pages)
|
||||
dirtied = ratelimit_pages;
|
||||
|
||||
return dirtied + dirtied / 2;
|
||||
}
|
||||
|
||||
/* The following parameters are exported via /proc/sys/vm */
|
||||
|
||||
/*
|
||||
@ -167,6 +155,8 @@ static void update_completion_period(void)
|
||||
int shift = calc_period_shift();
|
||||
prop_change_shift(&vm_completions, shift);
|
||||
prop_change_shift(&vm_dirties, shift);
|
||||
|
||||
writeback_set_ratelimit();
|
||||
}
|
||||
|
||||
int dirty_background_ratio_handler(struct ctl_table *table, int write,
|
||||
@ -260,50 +250,6 @@ static void bdi_writeout_fraction(struct backing_dev_info *bdi,
|
||||
numerator, denominator);
|
||||
}
|
||||
|
||||
static inline void task_dirties_fraction(struct task_struct *tsk,
|
||||
long *numerator, long *denominator)
|
||||
{
|
||||
prop_fraction_single(&vm_dirties, &tsk->dirties,
|
||||
numerator, denominator);
|
||||
}
|
||||
|
||||
/*
|
||||
* task_dirty_limit - scale down dirty throttling threshold for one task
|
||||
*
|
||||
* task specific dirty limit:
|
||||
*
|
||||
* dirty -= (dirty/8) * p_{t}
|
||||
*
|
||||
* To protect light/slow dirtying tasks from heavier/fast ones, we start
|
||||
* throttling individual tasks before reaching the bdi dirty limit.
|
||||
* Relatively low thresholds will be allocated to heavy dirtiers. So when
|
||||
* dirty pages grow large, heavy dirtiers will be throttled first, which will
|
||||
* effectively curb the growth of dirty pages. Light dirtiers with high enough
|
||||
* dirty threshold may never get throttled.
|
||||
*/
|
||||
#define TASK_LIMIT_FRACTION 8
|
||||
static unsigned long task_dirty_limit(struct task_struct *tsk,
|
||||
unsigned long bdi_dirty)
|
||||
{
|
||||
long numerator, denominator;
|
||||
unsigned long dirty = bdi_dirty;
|
||||
u64 inv = dirty / TASK_LIMIT_FRACTION;
|
||||
|
||||
task_dirties_fraction(tsk, &numerator, &denominator);
|
||||
inv *= numerator;
|
||||
do_div(inv, denominator);
|
||||
|
||||
dirty -= inv;
|
||||
|
||||
return max(dirty, bdi_dirty/2);
|
||||
}
|
||||
|
||||
/* Minimum limit for any task */
|
||||
static unsigned long task_min_dirty_limit(unsigned long bdi_dirty)
|
||||
{
|
||||
return bdi_dirty - bdi_dirty / TASK_LIMIT_FRACTION;
|
||||
}
|
||||
|
||||
/*
|
||||
* bdi_min_ratio keeps the sum of the minimum dirty shares of all
|
||||
* registered backing devices, which, for obvious reasons, can not
|
||||
@ -413,6 +359,12 @@ unsigned long determine_dirtyable_memory(void)
|
||||
return x + 1; /* Ensure that we never return 0 */
|
||||
}
|
||||
|
||||
static unsigned long dirty_freerun_ceiling(unsigned long thresh,
|
||||
unsigned long bg_thresh)
|
||||
{
|
||||
return (thresh + bg_thresh) / 2;
|
||||
}
|
||||
|
||||
static unsigned long hard_dirty_limit(unsigned long thresh)
|
||||
{
|
||||
return max(thresh, global_dirty_limit);
|
||||
@ -497,6 +449,198 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
|
||||
return bdi_dirty;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dirty position control.
|
||||
*
|
||||
* (o) global/bdi setpoints
|
||||
*
|
||||
* We want the dirty pages be balanced around the global/bdi setpoints.
|
||||
* When the number of dirty pages is higher/lower than the setpoint, the
|
||||
* dirty position control ratio (and hence task dirty ratelimit) will be
|
||||
* decreased/increased to bring the dirty pages back to the setpoint.
|
||||
*
|
||||
* pos_ratio = 1 << RATELIMIT_CALC_SHIFT
|
||||
*
|
||||
* if (dirty < setpoint) scale up pos_ratio
|
||||
* if (dirty > setpoint) scale down pos_ratio
|
||||
*
|
||||
* if (bdi_dirty < bdi_setpoint) scale up pos_ratio
|
||||
* if (bdi_dirty > bdi_setpoint) scale down pos_ratio
|
||||
*
|
||||
* task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
|
||||
*
|
||||
* (o) global control line
|
||||
*
|
||||
* ^ pos_ratio
|
||||
* |
|
||||
* | |<===== global dirty control scope ======>|
|
||||
* 2.0 .............*
|
||||
* | .*
|
||||
* | . *
|
||||
* | . *
|
||||
* | . *
|
||||
* | . *
|
||||
* | . *
|
||||
* 1.0 ................................*
|
||||
* | . . *
|
||||
* | . . *
|
||||
* | . . *
|
||||
* | . . *
|
||||
* | . . *
|
||||
* 0 +------------.------------------.----------------------*------------->
|
||||
* freerun^ setpoint^ limit^ dirty pages
|
||||
*
|
||||
* (o) bdi control line
|
||||
*
|
||||
* ^ pos_ratio
|
||||
* |
|
||||
* | *
|
||||
* | *
|
||||
* | *
|
||||
* | *
|
||||
* | * |<=========== span ============>|
|
||||
* 1.0 .......................*
|
||||
* | . *
|
||||
* | . *
|
||||
* | . *
|
||||
* | . *
|
||||
* | . *
|
||||
* | . *
|
||||
* | . *
|
||||
* | . *
|
||||
* | . *
|
||||
* | . *
|
||||
* | . *
|
||||
* 1/4 ...............................................* * * * * * * * * * * *
|
||||
* | . .
|
||||
* | . .
|
||||
* | . .
|
||||
* 0 +----------------------.-------------------------------.------------->
|
||||
* bdi_setpoint^ x_intercept^
|
||||
*
|
||||
* The bdi control line won't drop below pos_ratio=1/4, so that bdi_dirty can
|
||||
* be smoothly throttled down to normal if it starts high in situations like
|
||||
* - start writing to a slow SD card and a fast disk at the same time. The SD
|
||||
* card's bdi_dirty may rush to many times higher than bdi_setpoint.
|
||||
* - the bdi dirty thresh drops quickly due to change of JBOD workload
|
||||
*/
|
||||
static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
|
||||
unsigned long thresh,
|
||||
unsigned long bg_thresh,
|
||||
unsigned long dirty,
|
||||
unsigned long bdi_thresh,
|
||||
unsigned long bdi_dirty)
|
||||
{
|
||||
unsigned long write_bw = bdi->avg_write_bandwidth;
|
||||
unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
|
||||
unsigned long limit = hard_dirty_limit(thresh);
|
||||
unsigned long x_intercept;
|
||||
unsigned long setpoint; /* dirty pages' target balance point */
|
||||
unsigned long bdi_setpoint;
|
||||
unsigned long span;
|
||||
long long pos_ratio; /* for scaling up/down the rate limit */
|
||||
long x;
|
||||
|
||||
if (unlikely(dirty >= limit))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* global setpoint
|
||||
*
|
||||
* setpoint - dirty 3
|
||||
* f(dirty) := 1.0 + (----------------)
|
||||
* limit - setpoint
|
||||
*
|
||||
* it's a 3rd order polynomial that subjects to
|
||||
*
|
||||
* (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast
|
||||
* (2) f(setpoint) = 1.0 => the balance point
|
||||
* (3) f(limit) = 0 => the hard limit
|
||||
* (4) df/dx <= 0 => negative feedback control
|
||||
* (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
|
||||
* => fast response on large errors; small oscillation near setpoint
|
||||
*/
|
||||
setpoint = (freerun + limit) / 2;
|
||||
x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT,
|
||||
limit - setpoint + 1);
|
||||
pos_ratio = x;
|
||||
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
|
||||
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
|
||||
pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
|
||||
|
||||
/*
|
||||
* We have computed basic pos_ratio above based on global situation. If
|
||||
* the bdi is over/under its share of dirty pages, we want to scale
|
||||
* pos_ratio further down/up. That is done by the following mechanism.
|
||||
*/
|
||||
|
||||
/*
|
||||
* bdi setpoint
|
||||
*
|
||||
* f(bdi_dirty) := 1.0 + k * (bdi_dirty - bdi_setpoint)
|
||||
*
|
||||
* x_intercept - bdi_dirty
|
||||
* := --------------------------
|
||||
* x_intercept - bdi_setpoint
|
||||
*
|
||||
* The main bdi control line is a linear function that subjects to
|
||||
*
|
||||
* (1) f(bdi_setpoint) = 1.0
|
||||
* (2) k = - 1 / (8 * write_bw) (in single bdi case)
|
||||
* or equally: x_intercept = bdi_setpoint + 8 * write_bw
|
||||
*
|
||||
* For single bdi case, the dirty pages are observed to fluctuate
|
||||
* regularly within range
|
||||
* [bdi_setpoint - write_bw/2, bdi_setpoint + write_bw/2]
|
||||
* for various filesystems, where (2) can yield in a reasonable 12.5%
|
||||
* fluctuation range for pos_ratio.
|
||||
*
|
||||
* For JBOD case, bdi_thresh (not bdi_dirty!) could fluctuate up to its
|
||||
* own size, so move the slope over accordingly and choose a slope that
|
||||
* yields 100% pos_ratio fluctuation on suddenly doubled bdi_thresh.
|
||||
*/
|
||||
if (unlikely(bdi_thresh > thresh))
|
||||
bdi_thresh = thresh;
|
||||
bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
|
||||
/*
|
||||
* scale global setpoint to bdi's:
|
||||
* bdi_setpoint = setpoint * bdi_thresh / thresh
|
||||
*/
|
||||
x = div_u64((u64)bdi_thresh << 16, thresh + 1);
|
||||
bdi_setpoint = setpoint * (u64)x >> 16;
|
||||
/*
|
||||
* Use span=(8*write_bw) in single bdi case as indicated by
|
||||
* (thresh - bdi_thresh ~= 0) and transit to bdi_thresh in JBOD case.
|
||||
*
|
||||
* bdi_thresh thresh - bdi_thresh
|
||||
* span = ---------- * (8 * write_bw) + ------------------- * bdi_thresh
|
||||
* thresh thresh
|
||||
*/
|
||||
span = (thresh - bdi_thresh + 8 * write_bw) * (u64)x >> 16;
|
||||
x_intercept = bdi_setpoint + span;
|
||||
|
||||
if (bdi_dirty < x_intercept - span / 4) {
|
||||
pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
|
||||
x_intercept - bdi_setpoint + 1);
|
||||
} else
|
||||
pos_ratio /= 4;
|
||||
|
||||
/*
|
||||
* bdi reserve area, safeguard against dirty pool underrun and disk idle
|
||||
* It may push the desired control point of global dirty pages higher
|
||||
* than setpoint.
|
||||
*/
|
||||
x_intercept = bdi_thresh / 2;
|
||||
if (bdi_dirty < x_intercept) {
|
||||
if (bdi_dirty > x_intercept / 8)
|
||||
pos_ratio = div_u64(pos_ratio * x_intercept, bdi_dirty);
|
||||
else
|
||||
pos_ratio *= 8;
|
||||
}
|
||||
|
||||
return pos_ratio;
|
||||
}
|
||||
|
||||
static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
|
||||
unsigned long elapsed,
|
||||
unsigned long written)
|
||||
@ -593,8 +737,153 @@ static void global_update_bandwidth(unsigned long thresh,
|
||||
spin_unlock(&dirty_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Maintain bdi->dirty_ratelimit, the base dirty throttle rate.
|
||||
*
|
||||
* Normal bdi tasks will be curbed at or below it in long term.
|
||||
* Obviously it should be around (write_bw / N) when there are N dd tasks.
|
||||
*/
|
||||
static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
|
||||
unsigned long thresh,
|
||||
unsigned long bg_thresh,
|
||||
unsigned long dirty,
|
||||
unsigned long bdi_thresh,
|
||||
unsigned long bdi_dirty,
|
||||
unsigned long dirtied,
|
||||
unsigned long elapsed)
|
||||
{
|
||||
unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
|
||||
unsigned long limit = hard_dirty_limit(thresh);
|
||||
unsigned long setpoint = (freerun + limit) / 2;
|
||||
unsigned long write_bw = bdi->avg_write_bandwidth;
|
||||
unsigned long dirty_ratelimit = bdi->dirty_ratelimit;
|
||||
unsigned long dirty_rate;
|
||||
unsigned long task_ratelimit;
|
||||
unsigned long balanced_dirty_ratelimit;
|
||||
unsigned long pos_ratio;
|
||||
unsigned long step;
|
||||
unsigned long x;
|
||||
|
||||
/*
|
||||
* The dirty rate will match the writeout rate in long term, except
|
||||
* when dirty pages are truncated by userspace or re-dirtied by FS.
|
||||
*/
|
||||
dirty_rate = (dirtied - bdi->dirtied_stamp) * HZ / elapsed;
|
||||
|
||||
pos_ratio = bdi_position_ratio(bdi, thresh, bg_thresh, dirty,
|
||||
bdi_thresh, bdi_dirty);
|
||||
/*
|
||||
* task_ratelimit reflects each dd's dirty rate for the past 200ms.
|
||||
*/
|
||||
task_ratelimit = (u64)dirty_ratelimit *
|
||||
pos_ratio >> RATELIMIT_CALC_SHIFT;
|
||||
task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
|
||||
|
||||
/*
|
||||
* A linear estimation of the "balanced" throttle rate. The theory is,
|
||||
* if there are N dd tasks, each throttled at task_ratelimit, the bdi's
|
||||
* dirty_rate will be measured to be (N * task_ratelimit). So the below
|
||||
* formula will yield the balanced rate limit (write_bw / N).
|
||||
*
|
||||
* Note that the expanded form is not a pure rate feedback:
|
||||
* rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1)
|
||||
* but also takes pos_ratio into account:
|
||||
* rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2)
|
||||
*
|
||||
* (1) is not realistic because pos_ratio also takes part in balancing
|
||||
* the dirty rate. Consider the state
|
||||
* pos_ratio = 0.5 (3)
|
||||
* rate = 2 * (write_bw / N) (4)
|
||||
* If (1) is used, it will stuck in that state! Because each dd will
|
||||
* be throttled at
|
||||
* task_ratelimit = pos_ratio * rate = (write_bw / N) (5)
|
||||
* yielding
|
||||
* dirty_rate = N * task_ratelimit = write_bw (6)
|
||||
* put (6) into (1) we get
|
||||
* rate_(i+1) = rate_(i) (7)
|
||||
*
|
||||
* So we end up using (2) to always keep
|
||||
* rate_(i+1) ~= (write_bw / N) (8)
|
||||
* regardless of the value of pos_ratio. As long as (8) is satisfied,
|
||||
* pos_ratio is able to drive itself to 1.0, which is not only where
|
||||
* the dirty count meet the setpoint, but also where the slope of
|
||||
* pos_ratio is most flat and hence task_ratelimit is least fluctuated.
|
||||
*/
|
||||
balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
|
||||
dirty_rate | 1);
|
||||
|
||||
/*
|
||||
* We could safely do this and return immediately:
|
||||
*
|
||||
* bdi->dirty_ratelimit = balanced_dirty_ratelimit;
|
||||
*
|
||||
* However to get a more stable dirty_ratelimit, the below elaborated
|
||||
* code makes use of task_ratelimit to filter out sigular points and
|
||||
* limit the step size.
|
||||
*
|
||||
* The below code essentially only uses the relative value of
|
||||
*
|
||||
* task_ratelimit - dirty_ratelimit
|
||||
* = (pos_ratio - 1) * dirty_ratelimit
|
||||
*
|
||||
* which reflects the direction and size of dirty position error.
|
||||
*/
|
||||
|
||||
/*
|
||||
* dirty_ratelimit will follow balanced_dirty_ratelimit iff
|
||||
* task_ratelimit is on the same side of dirty_ratelimit, too.
|
||||
* For example, when
|
||||
* - dirty_ratelimit > balanced_dirty_ratelimit
|
||||
* - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
|
||||
* lowering dirty_ratelimit will help meet both the position and rate
|
||||
* control targets. Otherwise, don't update dirty_ratelimit if it will
|
||||
* only help meet the rate target. After all, what the users ultimately
|
||||
* feel and care are stable dirty rate and small position error.
|
||||
*
|
||||
* |task_ratelimit - dirty_ratelimit| is used to limit the step size
|
||||
* and filter out the sigular points of balanced_dirty_ratelimit. Which
|
||||
* keeps jumping around randomly and can even leap far away at times
|
||||
* due to the small 200ms estimation period of dirty_rate (we want to
|
||||
* keep that period small to reduce time lags).
|
||||
*/
|
||||
step = 0;
|
||||
if (dirty < setpoint) {
|
||||
x = min(bdi->balanced_dirty_ratelimit,
|
||||
min(balanced_dirty_ratelimit, task_ratelimit));
|
||||
if (dirty_ratelimit < x)
|
||||
step = x - dirty_ratelimit;
|
||||
} else {
|
||||
x = max(bdi->balanced_dirty_ratelimit,
|
||||
max(balanced_dirty_ratelimit, task_ratelimit));
|
||||
if (dirty_ratelimit > x)
|
||||
step = dirty_ratelimit - x;
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't pursue 100% rate matching. It's impossible since the balanced
|
||||
* rate itself is constantly fluctuating. So decrease the track speed
|
||||
* when it gets close to the target. Helps eliminate pointless tremors.
|
||||
*/
|
||||
step >>= dirty_ratelimit / (2 * step + 1);
|
||||
/*
|
||||
* Limit the tracking speed to avoid overshooting.
|
||||
*/
|
||||
step = (step + 7) / 8;
|
||||
|
||||
if (dirty_ratelimit < balanced_dirty_ratelimit)
|
||||
dirty_ratelimit += step;
|
||||
else
|
||||
dirty_ratelimit -= step;
|
||||
|
||||
bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL);
|
||||
bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
|
||||
|
||||
trace_bdi_dirty_ratelimit(bdi, dirty_rate, task_ratelimit);
|
||||
}
|
||||
|
||||
void __bdi_update_bandwidth(struct backing_dev_info *bdi,
|
||||
unsigned long thresh,
|
||||
unsigned long bg_thresh,
|
||||
unsigned long dirty,
|
||||
unsigned long bdi_thresh,
|
||||
unsigned long bdi_dirty,
|
||||
@ -602,6 +891,7 @@ void __bdi_update_bandwidth(struct backing_dev_info *bdi,
|
||||
{
|
||||
unsigned long now = jiffies;
|
||||
unsigned long elapsed = now - bdi->bw_time_stamp;
|
||||
unsigned long dirtied;
|
||||
unsigned long written;
|
||||
|
||||
/*
|
||||
@ -610,6 +900,7 @@ void __bdi_update_bandwidth(struct backing_dev_info *bdi,
|
||||
if (elapsed < BANDWIDTH_INTERVAL)
|
||||
return;
|
||||
|
||||
dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]);
|
||||
written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
|
||||
|
||||
/*
|
||||
@ -619,18 +910,23 @@ void __bdi_update_bandwidth(struct backing_dev_info *bdi,
|
||||
if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
|
||||
goto snapshot;
|
||||
|
||||
if (thresh)
|
||||
if (thresh) {
|
||||
global_update_bandwidth(thresh, dirty, now);
|
||||
|
||||
bdi_update_dirty_ratelimit(bdi, thresh, bg_thresh, dirty,
|
||||
bdi_thresh, bdi_dirty,
|
||||
dirtied, elapsed);
|
||||
}
|
||||
bdi_update_write_bandwidth(bdi, elapsed, written);
|
||||
|
||||
snapshot:
|
||||
bdi->dirtied_stamp = dirtied;
|
||||
bdi->written_stamp = written;
|
||||
bdi->bw_time_stamp = now;
|
||||
}
|
||||
|
||||
static void bdi_update_bandwidth(struct backing_dev_info *bdi,
|
||||
unsigned long thresh,
|
||||
unsigned long bg_thresh,
|
||||
unsigned long dirty,
|
||||
unsigned long bdi_thresh,
|
||||
unsigned long bdi_dirty,
|
||||
@ -639,37 +935,99 @@ static void bdi_update_bandwidth(struct backing_dev_info *bdi,
|
||||
if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
|
||||
return;
|
||||
spin_lock(&bdi->wb.list_lock);
|
||||
__bdi_update_bandwidth(bdi, thresh, dirty, bdi_thresh, bdi_dirty,
|
||||
start_time);
|
||||
__bdi_update_bandwidth(bdi, thresh, bg_thresh, dirty,
|
||||
bdi_thresh, bdi_dirty, start_time);
|
||||
spin_unlock(&bdi->wb.list_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* After a task dirtied this many pages, balance_dirty_pages_ratelimited_nr()
|
||||
* will look to see if it needs to start dirty throttling.
|
||||
*
|
||||
* If dirty_poll_interval is too low, big NUMA machines will call the expensive
|
||||
* global_page_state() too often. So scale it near-sqrt to the safety margin
|
||||
* (the number of pages we may dirty without exceeding the dirty limits).
|
||||
*/
|
||||
static unsigned long dirty_poll_interval(unsigned long dirty,
|
||||
unsigned long thresh)
|
||||
{
|
||||
if (thresh > dirty)
|
||||
return 1UL << (ilog2(thresh - dirty) >> 1);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
|
||||
unsigned long bdi_dirty)
|
||||
{
|
||||
unsigned long bw = bdi->avg_write_bandwidth;
|
||||
unsigned long hi = ilog2(bw);
|
||||
unsigned long lo = ilog2(bdi->dirty_ratelimit);
|
||||
unsigned long t;
|
||||
|
||||
/* target for 20ms max pause on 1-dd case */
|
||||
t = HZ / 50;
|
||||
|
||||
/*
|
||||
* Scale up pause time for concurrent dirtiers in order to reduce CPU
|
||||
* overheads.
|
||||
*
|
||||
* (N * 20ms) on 2^N concurrent tasks.
|
||||
*/
|
||||
if (hi > lo)
|
||||
t += (hi - lo) * (20 * HZ) / 1024;
|
||||
|
||||
/*
|
||||
* Limit pause time for small memory systems. If sleeping for too long
|
||||
* time, a small pool of dirty/writeback pages may go empty and disk go
|
||||
* idle.
|
||||
*
|
||||
* 8 serves as the safety ratio.
|
||||
*/
|
||||
if (bdi_dirty)
|
||||
t = min(t, bdi_dirty * HZ / (8 * bw + 1));
|
||||
|
||||
/*
|
||||
* The pause time will be settled within range (max_pause/4, max_pause).
|
||||
* Apply a minimal value of 4 to get a non-zero max_pause/4.
|
||||
*/
|
||||
return clamp_val(t, 4, MAX_PAUSE);
|
||||
}
|
||||
|
||||
/*
|
||||
* balance_dirty_pages() must be called by processes which are generating dirty
|
||||
* data. It looks at the number of dirty pages in the machine and will force
|
||||
* the caller to perform writeback if the system is over `vm_dirty_ratio'.
|
||||
* the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
|
||||
* If we're over `background_thresh' then the writeback threads are woken to
|
||||
* perform some writeout.
|
||||
*/
|
||||
static void balance_dirty_pages(struct address_space *mapping,
|
||||
unsigned long write_chunk)
|
||||
unsigned long pages_dirtied)
|
||||
{
|
||||
unsigned long nr_reclaimable, bdi_nr_reclaimable;
|
||||
unsigned long nr_reclaimable; /* = file_dirty + unstable_nfs */
|
||||
unsigned long bdi_reclaimable;
|
||||
unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */
|
||||
unsigned long bdi_dirty;
|
||||
unsigned long freerun;
|
||||
unsigned long background_thresh;
|
||||
unsigned long dirty_thresh;
|
||||
unsigned long bdi_thresh;
|
||||
unsigned long task_bdi_thresh;
|
||||
unsigned long min_task_bdi_thresh;
|
||||
unsigned long pages_written = 0;
|
||||
unsigned long pause = 1;
|
||||
long pause = 0;
|
||||
long uninitialized_var(max_pause);
|
||||
bool dirty_exceeded = false;
|
||||
bool clear_dirty_exceeded = true;
|
||||
unsigned long task_ratelimit;
|
||||
unsigned long uninitialized_var(dirty_ratelimit);
|
||||
unsigned long pos_ratio;
|
||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||
unsigned long start_time = jiffies;
|
||||
|
||||
for (;;) {
|
||||
/*
|
||||
* Unstable writes are a feature of certain networked
|
||||
* filesystems (i.e. NFS) in which data may have been
|
||||
* written to the server's write cache, but has not yet
|
||||
* been flushed to permanent storage.
|
||||
*/
|
||||
nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
|
||||
global_page_state(NR_UNSTABLE_NFS);
|
||||
nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
|
||||
@ -681,12 +1039,28 @@ static void balance_dirty_pages(struct address_space *mapping,
|
||||
* catch-up. This avoids (excessively) small writeouts
|
||||
* when the bdi limits are ramping up.
|
||||
*/
|
||||
if (nr_dirty <= (background_thresh + dirty_thresh) / 2)
|
||||
freerun = dirty_freerun_ceiling(dirty_thresh,
|
||||
background_thresh);
|
||||
if (nr_dirty <= freerun)
|
||||
break;
|
||||
|
||||
if (unlikely(!writeback_in_progress(bdi)))
|
||||
bdi_start_background_writeback(bdi);
|
||||
|
||||
/*
|
||||
* bdi_thresh is not treated as some limiting factor as
|
||||
* dirty_thresh, due to reasons
|
||||
* - in JBOD setup, bdi_thresh can fluctuate a lot
|
||||
* - in a system with HDD and USB key, the USB key may somehow
|
||||
* go into state (bdi_dirty >> bdi_thresh) either because
|
||||
* bdi_dirty starts high, or because bdi_thresh drops low.
|
||||
* In this case we don't want to hard throttle the USB key
|
||||
* dirtiers for 100 seconds until bdi_dirty drops under
|
||||
* bdi_thresh. Instead the auxiliary bdi control line in
|
||||
* bdi_position_ratio() will let the dirtier task progress
|
||||
* at some rate <= (write_bw / 2) for bringing down bdi_dirty.
|
||||
*/
|
||||
bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
|
||||
min_task_bdi_thresh = task_min_dirty_limit(bdi_thresh);
|
||||
task_bdi_thresh = task_dirty_limit(current, bdi_thresh);
|
||||
|
||||
/*
|
||||
* In order to avoid the stacked BDI deadlock we need
|
||||
@ -698,56 +1072,69 @@ static void balance_dirty_pages(struct address_space *mapping,
|
||||
* actually dirty; with m+n sitting in the percpu
|
||||
* deltas.
|
||||
*/
|
||||
if (task_bdi_thresh < 2 * bdi_stat_error(bdi)) {
|
||||
bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
|
||||
bdi_dirty = bdi_nr_reclaimable +
|
||||
if (bdi_thresh < 2 * bdi_stat_error(bdi)) {
|
||||
bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
|
||||
bdi_dirty = bdi_reclaimable +
|
||||
bdi_stat_sum(bdi, BDI_WRITEBACK);
|
||||
} else {
|
||||
bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
|
||||
bdi_dirty = bdi_nr_reclaimable +
|
||||
bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
|
||||
bdi_dirty = bdi_reclaimable +
|
||||
bdi_stat(bdi, BDI_WRITEBACK);
|
||||
}
|
||||
|
||||
/*
|
||||
* The bdi thresh is somehow "soft" limit derived from the
|
||||
* global "hard" limit. The former helps to prevent heavy IO
|
||||
* bdi or process from holding back light ones; The latter is
|
||||
* the last resort safeguard.
|
||||
*/
|
||||
dirty_exceeded = (bdi_dirty > task_bdi_thresh) ||
|
||||
dirty_exceeded = (bdi_dirty > bdi_thresh) ||
|
||||
(nr_dirty > dirty_thresh);
|
||||
clear_dirty_exceeded = (bdi_dirty <= min_task_bdi_thresh) &&
|
||||
(nr_dirty <= dirty_thresh);
|
||||
|
||||
if (!dirty_exceeded)
|
||||
break;
|
||||
|
||||
if (!bdi->dirty_exceeded)
|
||||
if (dirty_exceeded && !bdi->dirty_exceeded)
|
||||
bdi->dirty_exceeded = 1;
|
||||
|
||||
bdi_update_bandwidth(bdi, dirty_thresh, nr_dirty,
|
||||
bdi_thresh, bdi_dirty, start_time);
|
||||
bdi_update_bandwidth(bdi, dirty_thresh, background_thresh,
|
||||
nr_dirty, bdi_thresh, bdi_dirty,
|
||||
start_time);
|
||||
|
||||
/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
|
||||
* Unstable writes are a feature of certain networked
|
||||
* filesystems (i.e. NFS) in which data may have been
|
||||
* written to the server's write cache, but has not yet
|
||||
* been flushed to permanent storage.
|
||||
* Only move pages to writeback if this bdi is over its
|
||||
* threshold otherwise wait until the disk writes catch
|
||||
* up.
|
||||
*/
|
||||
trace_balance_dirty_start(bdi);
|
||||
if (bdi_nr_reclaimable > task_bdi_thresh) {
|
||||
pages_written += writeback_inodes_wb(&bdi->wb,
|
||||
write_chunk);
|
||||
trace_balance_dirty_written(bdi, pages_written);
|
||||
if (pages_written >= write_chunk)
|
||||
break; /* We've done our duty */
|
||||
max_pause = bdi_max_pause(bdi, bdi_dirty);
|
||||
|
||||
dirty_ratelimit = bdi->dirty_ratelimit;
|
||||
pos_ratio = bdi_position_ratio(bdi, dirty_thresh,
|
||||
background_thresh, nr_dirty,
|
||||
bdi_thresh, bdi_dirty);
|
||||
if (unlikely(pos_ratio == 0)) {
|
||||
pause = max_pause;
|
||||
goto pause;
|
||||
}
|
||||
task_ratelimit = (u64)dirty_ratelimit *
|
||||
pos_ratio >> RATELIMIT_CALC_SHIFT;
|
||||
pause = (HZ * pages_dirtied) / (task_ratelimit | 1);
|
||||
if (unlikely(pause <= 0)) {
|
||||
trace_balance_dirty_pages(bdi,
|
||||
dirty_thresh,
|
||||
background_thresh,
|
||||
nr_dirty,
|
||||
bdi_thresh,
|
||||
bdi_dirty,
|
||||
dirty_ratelimit,
|
||||
task_ratelimit,
|
||||
pages_dirtied,
|
||||
pause,
|
||||
start_time);
|
||||
pause = 1; /* avoid resetting nr_dirtied_pause below */
|
||||
break;
|
||||
}
|
||||
pause = min(pause, max_pause);
|
||||
|
||||
pause:
|
||||
trace_balance_dirty_pages(bdi,
|
||||
dirty_thresh,
|
||||
background_thresh,
|
||||
nr_dirty,
|
||||
bdi_thresh,
|
||||
bdi_dirty,
|
||||
dirty_ratelimit,
|
||||
task_ratelimit,
|
||||
pages_dirtied,
|
||||
pause,
|
||||
start_time);
|
||||
__set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
io_schedule_timeout(pause);
|
||||
trace_balance_dirty_wait(bdi);
|
||||
|
||||
dirty_thresh = hard_dirty_limit(dirty_thresh);
|
||||
/*
|
||||
@ -756,24 +1143,30 @@ static void balance_dirty_pages(struct address_space *mapping,
|
||||
* 200ms is typically more than enough to curb heavy dirtiers;
|
||||
* (b) the pause time limit makes the dirtiers more responsive.
|
||||
*/
|
||||
if (nr_dirty < dirty_thresh &&
|
||||
bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 &&
|
||||
time_after(jiffies, start_time + MAX_PAUSE))
|
||||
if (nr_dirty < dirty_thresh)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Increase the delay for each loop, up to our previous
|
||||
* default of taking a 100ms nap.
|
||||
*/
|
||||
pause <<= 1;
|
||||
if (pause > HZ / 10)
|
||||
pause = HZ / 10;
|
||||
}
|
||||
|
||||
/* Clear dirty_exceeded flag only when no task can exceed the limit */
|
||||
if (clear_dirty_exceeded && bdi->dirty_exceeded)
|
||||
if (!dirty_exceeded && bdi->dirty_exceeded)
|
||||
bdi->dirty_exceeded = 0;
|
||||
|
||||
current->nr_dirtied = 0;
|
||||
if (pause == 0) { /* in freerun area */
|
||||
current->nr_dirtied_pause =
|
||||
dirty_poll_interval(nr_dirty, dirty_thresh);
|
||||
} else if (pause <= max_pause / 4 &&
|
||||
pages_dirtied >= current->nr_dirtied_pause) {
|
||||
current->nr_dirtied_pause = clamp_val(
|
||||
dirty_ratelimit * (max_pause / 2) / HZ,
|
||||
pages_dirtied + pages_dirtied / 8,
|
||||
pages_dirtied * 4);
|
||||
} else if (pause >= max_pause) {
|
||||
current->nr_dirtied_pause = 1 | clamp_val(
|
||||
dirty_ratelimit * (max_pause / 2) / HZ,
|
||||
pages_dirtied / 4,
|
||||
pages_dirtied - pages_dirtied / 8);
|
||||
}
|
||||
|
||||
if (writeback_in_progress(bdi))
|
||||
return;
|
||||
|
||||
@ -785,8 +1178,10 @@ static void balance_dirty_pages(struct address_space *mapping,
|
||||
* In normal mode, we start background writeout at the lower
|
||||
* background_thresh, to keep the amount of dirty memory low.
|
||||
*/
|
||||
if ((laptop_mode && pages_written) ||
|
||||
(!laptop_mode && (nr_reclaimable > background_thresh)))
|
||||
if (laptop_mode)
|
||||
return;
|
||||
|
||||
if (nr_reclaimable > background_thresh)
|
||||
bdi_start_background_writeback(bdi);
|
||||
}
|
||||
|
||||
@ -800,7 +1195,7 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
|
||||
}
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
|
||||
static DEFINE_PER_CPU(int, bdp_ratelimits);
|
||||
|
||||
/**
|
||||
* balance_dirty_pages_ratelimited_nr - balance dirty memory state
|
||||
@ -820,31 +1215,39 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
|
||||
unsigned long nr_pages_dirtied)
|
||||
{
|
||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||
unsigned long ratelimit;
|
||||
unsigned long *p;
|
||||
int ratelimit;
|
||||
int *p;
|
||||
|
||||
if (!bdi_cap_account_dirty(bdi))
|
||||
return;
|
||||
|
||||
ratelimit = ratelimit_pages;
|
||||
if (mapping->backing_dev_info->dirty_exceeded)
|
||||
ratelimit = 8;
|
||||
ratelimit = current->nr_dirtied_pause;
|
||||
if (bdi->dirty_exceeded)
|
||||
ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
|
||||
|
||||
current->nr_dirtied += nr_pages_dirtied;
|
||||
|
||||
/*
|
||||
* Check the rate limiting. Also, we do not want to throttle real-time
|
||||
* tasks in balance_dirty_pages(). Period.
|
||||
*/
|
||||
preempt_disable();
|
||||
/*
|
||||
* This prevents one CPU to accumulate too many dirtied pages without
|
||||
* calling into balance_dirty_pages(), which can happen when there are
|
||||
* 1000+ tasks, all of them start dirtying pages at exactly the same
|
||||
* time, hence all honoured too large initial task->nr_dirtied_pause.
|
||||
*/
|
||||
p = &__get_cpu_var(bdp_ratelimits);
|
||||
*p += nr_pages_dirtied;
|
||||
if (unlikely(*p >= ratelimit)) {
|
||||
ratelimit = sync_writeback_pages(*p);
|
||||
if (unlikely(current->nr_dirtied >= ratelimit))
|
||||
*p = 0;
|
||||
preempt_enable();
|
||||
balance_dirty_pages(mapping, ratelimit);
|
||||
return;
|
||||
else {
|
||||
*p += nr_pages_dirtied;
|
||||
if (unlikely(*p >= ratelimit_pages)) {
|
||||
*p = 0;
|
||||
ratelimit = 0;
|
||||
}
|
||||
}
|
||||
preempt_enable();
|
||||
|
||||
if (unlikely(current->nr_dirtied >= ratelimit))
|
||||
balance_dirty_pages(mapping, current->nr_dirtied);
|
||||
}
|
||||
EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
|
||||
|
||||
@ -900,7 +1303,8 @@ void laptop_mode_timer_fn(unsigned long data)
|
||||
* threshold
|
||||
*/
|
||||
if (bdi_has_dirty_io(&q->backing_dev_info))
|
||||
bdi_start_writeback(&q->backing_dev_info, nr_pages);
|
||||
bdi_start_writeback(&q->backing_dev_info, nr_pages,
|
||||
WB_REASON_LAPTOP_TIMER);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -939,22 +1343,17 @@ void laptop_sync_completion(void)
|
||||
*
|
||||
* Here we set ratelimit_pages to a level which ensures that when all CPUs are
|
||||
* dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
|
||||
* thresholds before writeback cuts in.
|
||||
*
|
||||
* But the limit should not be set too high. Because it also controls the
|
||||
* amount of memory which the balance_dirty_pages() caller has to write back.
|
||||
* If this is too large then the caller will block on the IO queue all the
|
||||
* time. So limit it to four megabytes - the balance_dirty_pages() caller
|
||||
* will write six megabyte chunks, max.
|
||||
* thresholds.
|
||||
*/
|
||||
|
||||
void writeback_set_ratelimit(void)
|
||||
{
|
||||
ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
|
||||
unsigned long background_thresh;
|
||||
unsigned long dirty_thresh;
|
||||
global_dirty_limits(&background_thresh, &dirty_thresh);
|
||||
ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
|
||||
if (ratelimit_pages < 16)
|
||||
ratelimit_pages = 16;
|
||||
if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
|
||||
ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
|
||||
}
|
||||
|
||||
static int __cpuinit
|
||||
@ -1324,6 +1723,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
|
||||
__inc_zone_page_state(page, NR_FILE_DIRTY);
|
||||
__inc_zone_page_state(page, NR_DIRTIED);
|
||||
__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
|
||||
__inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
|
||||
task_dirty_inc(current);
|
||||
task_io_account_write(PAGE_CACHE_SIZE);
|
||||
}
|
||||
|
@ -2266,7 +2266,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
|
||||
*/
|
||||
writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
|
||||
if (total_scanned > writeback_threshold) {
|
||||
wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
|
||||
wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
|
||||
WB_REASON_TRY_TO_FREE_PAGES);
|
||||
sc->may_writepage = 1;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user