mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
mm/vmscan: throttle reclaim when no progress is being made
Memcg reclaim throttles on congestion if no reclaim progress is made. This makes little sense, it might be due to writeback or a host of other factors. For !memcg reclaim, it's messy. Direct reclaim primarily is throttled in the page allocator if it is failing to make progress. Kswapd throttles if too many pages are under writeback and marked for immediate reclaim. This patch explicitly throttles if reclaim is failing to make progress. [vbabka@suse.cz: Remove redundant code] Link: https://lkml.kernel.org/r/20211022144651.19914-4-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andreas Dilger <adilger.kernel@dilger.ca> Cc: "Darrick J . Wong" <djwong@kernel.org> Cc: Dave Chinner <david@fromorbit.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Matthew Wilcox <willy@infradead.org> Cc: Michal Hocko <mhocko@suse.com> Cc: NeilBrown <neilb@suse.de> Cc: Rik van Riel <riel@surriel.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d818fca1ca
commit
69392a403f
@ -276,6 +276,7 @@ enum lru_list {
|
|||||||
enum vmscan_throttle_state {
|
enum vmscan_throttle_state {
|
||||||
VMSCAN_THROTTLE_WRITEBACK,
|
VMSCAN_THROTTLE_WRITEBACK,
|
||||||
VMSCAN_THROTTLE_ISOLATED,
|
VMSCAN_THROTTLE_ISOLATED,
|
||||||
|
VMSCAN_THROTTLE_NOPROGRESS,
|
||||||
NR_VMSCAN_THROTTLE,
|
NR_VMSCAN_THROTTLE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -29,11 +29,13 @@
|
|||||||
|
|
||||||
#define _VMSCAN_THROTTLE_WRITEBACK (1 << VMSCAN_THROTTLE_WRITEBACK)
|
#define _VMSCAN_THROTTLE_WRITEBACK (1 << VMSCAN_THROTTLE_WRITEBACK)
|
||||||
#define _VMSCAN_THROTTLE_ISOLATED (1 << VMSCAN_THROTTLE_ISOLATED)
|
#define _VMSCAN_THROTTLE_ISOLATED (1 << VMSCAN_THROTTLE_ISOLATED)
|
||||||
|
#define _VMSCAN_THROTTLE_NOPROGRESS (1 << VMSCAN_THROTTLE_NOPROGRESS)
|
||||||
|
|
||||||
#define show_throttle_flags(flags) \
|
#define show_throttle_flags(flags) \
|
||||||
(flags) ? __print_flags(flags, "|", \
|
(flags) ? __print_flags(flags, "|", \
|
||||||
{_VMSCAN_THROTTLE_WRITEBACK, "VMSCAN_THROTTLE_WRITEBACK"}, \
|
{_VMSCAN_THROTTLE_WRITEBACK, "VMSCAN_THROTTLE_WRITEBACK"}, \
|
||||||
{_VMSCAN_THROTTLE_ISOLATED, "VMSCAN_THROTTLE_ISOLATED"} \
|
{_VMSCAN_THROTTLE_ISOLATED, "VMSCAN_THROTTLE_ISOLATED"}, \
|
||||||
|
{_VMSCAN_THROTTLE_NOPROGRESS, "VMSCAN_THROTTLE_NOPROGRESS"} \
|
||||||
) : "VMSCAN_THROTTLE_NONE"
|
) : "VMSCAN_THROTTLE_NONE"
|
||||||
|
|
||||||
|
|
||||||
|
@ -3487,19 +3487,11 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
|
|||||||
|
|
||||||
/* try to free all pages in this cgroup */
|
/* try to free all pages in this cgroup */
|
||||||
while (nr_retries && page_counter_read(&memcg->memory)) {
|
while (nr_retries && page_counter_read(&memcg->memory)) {
|
||||||
int progress;
|
|
||||||
|
|
||||||
if (signal_pending(current))
|
if (signal_pending(current))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
|
||||||
progress = try_to_free_mem_cgroup_pages(memcg, 1,
|
if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true))
|
||||||
GFP_KERNEL, true);
|
|
||||||
if (!progress) {
|
|
||||||
nr_retries--;
|
nr_retries--;
|
||||||
/* maybe some writeback is necessary */
|
|
||||||
congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
28
mm/vmscan.c
28
mm/vmscan.c
@ -3322,6 +3322,33 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
|
|||||||
return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
|
return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc)
|
||||||
|
{
|
||||||
|
/* If reclaim is making progress, wake any throttled tasks. */
|
||||||
|
if (sc->nr_reclaimed) {
|
||||||
|
wait_queue_head_t *wqh;
|
||||||
|
|
||||||
|
wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS];
|
||||||
|
if (waitqueue_active(wqh))
|
||||||
|
wake_up(wqh);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do not throttle kswapd on NOPROGRESS as it will throttle on
|
||||||
|
* VMSCAN_THROTTLE_WRITEBACK if there are too many pages under
|
||||||
|
* writeback and marked for immediate reclaim at the tail of
|
||||||
|
* the LRU.
|
||||||
|
*/
|
||||||
|
if (current_is_kswapd())
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Throttle if making no progress at high prioities. */
|
||||||
|
if (sc->priority < DEF_PRIORITY - 2)
|
||||||
|
reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS, HZ/10);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is the direct reclaim path, for page-allocating processes. We only
|
* This is the direct reclaim path, for page-allocating processes. We only
|
||||||
* try to reclaim pages from zones which will satisfy the caller's allocation
|
* try to reclaim pages from zones which will satisfy the caller's allocation
|
||||||
@ -3406,6 +3433,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
|
|||||||
continue;
|
continue;
|
||||||
last_pgdat = zone->zone_pgdat;
|
last_pgdat = zone->zone_pgdat;
|
||||||
shrink_node(zone->zone_pgdat, sc);
|
shrink_node(zone->zone_pgdat, sc);
|
||||||
|
consider_reclaim_throttle(zone->zone_pgdat, sc);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user