mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
writeback: move over_bground_thresh() to mm/page-writeback.c
and rename it to wb_over_bg_thresh(). The function is closely tied to the dirty throttling mechanism implemented in page-writeback.c. This relocation will allow future updates necessary for cgroup writeback support. While at it, add function comment. This is pure reorganization and doesn't introduce any behavioral changes. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jan Kara <jack@suse.cz> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Greg Thelen <gthelen@google.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
9fc3a43e17
commit
aa661bbe1e
@ -1071,22 +1071,6 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
|
||||
return nr_pages - work.nr_pages;
|
||||
}
|
||||
|
||||
static bool over_bground_thresh(struct bdi_writeback *wb)
|
||||
{
|
||||
unsigned long background_thresh, dirty_thresh;
|
||||
|
||||
global_dirty_limits(&background_thresh, &dirty_thresh);
|
||||
|
||||
if (global_page_state(NR_FILE_DIRTY) +
|
||||
global_page_state(NR_UNSTABLE_NFS) > background_thresh)
|
||||
return true;
|
||||
|
||||
if (wb_stat(wb, WB_RECLAIMABLE) > wb_calc_thresh(wb, background_thresh))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Explicit flushing or periodic writeback of "old" data.
|
||||
*
|
||||
@ -1136,7 +1120,7 @@ static long wb_writeback(struct bdi_writeback *wb,
|
||||
* For background writeout, stop when we are below the
|
||||
* background dirty threshold
|
||||
*/
|
||||
if (work->for_background && !over_bground_thresh(wb))
|
||||
if (work->for_background && !wb_over_bg_thresh(wb))
|
||||
break;
|
||||
|
||||
/*
|
||||
@ -1227,7 +1211,7 @@ static unsigned long get_nr_dirty_pages(void)
|
||||
|
||||
static long wb_check_background_flush(struct bdi_writeback *wb)
|
||||
{
|
||||
if (over_bground_thresh(wb)) {
|
||||
if (wb_over_bg_thresh(wb)) {
|
||||
|
||||
struct wb_writeback_work work = {
|
||||
.nr_pages = LONG_MAX,
|
||||
|
@ -207,6 +207,7 @@ unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
|
||||
void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
|
||||
void page_writeback_init(void);
|
||||
void balance_dirty_pages_ratelimited(struct address_space *mapping);
|
||||
bool wb_over_bg_thresh(struct bdi_writeback *wb);
|
||||
|
||||
typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
|
||||
void *data);
|
||||
|
@ -1740,6 +1740,29 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
|
||||
}
|
||||
EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
|
||||
|
||||
/**
|
||||
* wb_over_bg_thresh - does @wb need to be written back?
|
||||
* @wb: bdi_writeback of interest
|
||||
*
|
||||
* Determines whether background writeback should keep writing @wb or it's
|
||||
* clean enough. Returns %true if writeback should continue.
|
||||
*/
|
||||
bool wb_over_bg_thresh(struct bdi_writeback *wb)
|
||||
{
|
||||
unsigned long background_thresh, dirty_thresh;
|
||||
|
||||
global_dirty_limits(&background_thresh, &dirty_thresh);
|
||||
|
||||
if (global_page_state(NR_FILE_DIRTY) +
|
||||
global_page_state(NR_UNSTABLE_NFS) > background_thresh)
|
||||
return true;
|
||||
|
||||
if (wb_stat(wb, WB_RECLAIMABLE) > wb_calc_thresh(wb, background_thresh))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void throttle_vm_writeout(gfp_t gfp_mask)
|
||||
{
|
||||
unsigned long background_thresh;
|
||||
|
Loading…
Reference in New Issue
Block a user