mm/gup: clear the LRU flag of a page before adding to LRU batch

If a large number of CMA memory are configured in system (for example,
the CMA memory accounts for 50% of the system memory), starting a
virtual virtual machine with device passthrough, it will call
pin_user_pages_remote(..., FOLL_LONGTERM, ...) to pin memory.  Normally
if a page is present and in CMA area, pin_user_pages_remote() will
migrate the page from CMA area to non-CMA area because of FOLL_LONGTERM
flag.  But the current code will cause the migration failure due to
unexpected page refcounts, and eventually cause the virtual machine
fail to start.

If a page is added in LRU batch, its refcount increases one, remove the
page from LRU batch decreases one.  Page migration requires the page is
not referenced by others except page mapping.  Before migrating a page,
we should try to drain the page from LRU batch in case the page is in
it, however, folio_test_lru() is not sufficient to tell whether the
page is in LRU batch or not, if the page is in LRU batch, the migration
will fail.

To solve the problem above, we modify the logic of adding to LRU batch.
Before adding a page to LRU batch, we clear the LRU flag of the page
so that we can check whether the page is in LRU batch by
folio_test_lru(page).  It's quite valuable, because likely we don't
want to blindly drain the LRU batch simply because there is some
unexpected reference on a page, as described above.

This change makes the LRU flag of a page invisible for longer, which
may impact some programs.  For example, as long as a page is on a LRU
batch, we cannot isolate it, and we cannot check if it's an LRU page. 
Further, a page can now only be on exactly one LRU batch.  This doesn't
seem to matter much, because a new page is allocated from buddy and
added to the lru batch, or be isolated, it's LRU flag may also be
invisible for a long time.

Link: https://lkml.kernel.org/r/1720075944-27201-1-git-send-email-yangge1116@126.com
Link: https://lkml.kernel.org/r/1720008153-16035-1-git-send-email-yangge1116@126.com
Fixes: 9a4e9f3b2d ("mm: update get_user_pages_longterm to migrate pages allocated from CMA region")
Signed-off-by: yangge <yangge1116@126.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Barry Song <21cnbao@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
yangge 2024-07-03 20:02:33 +08:00 committed by Andrew Morton
parent af649773fb
commit 33dfe9204f

View File

@ -211,10 +211,6 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
for (i = 0; i < folio_batch_count(fbatch); i++) { for (i = 0; i < folio_batch_count(fbatch); i++) {
struct folio *folio = fbatch->folios[i]; struct folio *folio = fbatch->folios[i];
/* block memcg migration while the folio moves between lru */
if (move_fn != lru_add_fn && !folio_test_clear_lru(folio))
continue;
folio_lruvec_relock_irqsave(folio, &lruvec, &flags); folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
move_fn(lruvec, folio); move_fn(lruvec, folio);
@ -255,11 +251,16 @@ static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio)
void folio_rotate_reclaimable(struct folio *folio) void folio_rotate_reclaimable(struct folio *folio)
{ {
if (!folio_test_locked(folio) && !folio_test_dirty(folio) && if (!folio_test_locked(folio) && !folio_test_dirty(folio) &&
!folio_test_unevictable(folio) && folio_test_lru(folio)) { !folio_test_unevictable(folio)) {
struct folio_batch *fbatch; struct folio_batch *fbatch;
unsigned long flags; unsigned long flags;
folio_get(folio); folio_get(folio);
if (!folio_test_clear_lru(folio)) {
folio_put(folio);
return;
}
local_lock_irqsave(&lru_rotate.lock, flags); local_lock_irqsave(&lru_rotate.lock, flags);
fbatch = this_cpu_ptr(&lru_rotate.fbatch); fbatch = this_cpu_ptr(&lru_rotate.fbatch);
folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn); folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn);
@ -352,11 +353,15 @@ static void folio_activate_drain(int cpu)
void folio_activate(struct folio *folio) void folio_activate(struct folio *folio)
{ {
if (folio_test_lru(folio) && !folio_test_active(folio) && if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
!folio_test_unevictable(folio)) {
struct folio_batch *fbatch; struct folio_batch *fbatch;
folio_get(folio); folio_get(folio);
if (!folio_test_clear_lru(folio)) {
folio_put(folio);
return;
}
local_lock(&cpu_fbatches.lock); local_lock(&cpu_fbatches.lock);
fbatch = this_cpu_ptr(&cpu_fbatches.activate); fbatch = this_cpu_ptr(&cpu_fbatches.activate);
folio_batch_add_and_move(fbatch, folio, folio_activate_fn); folio_batch_add_and_move(fbatch, folio, folio_activate_fn);
@ -700,6 +705,11 @@ void deactivate_file_folio(struct folio *folio)
return; return;
folio_get(folio); folio_get(folio);
if (!folio_test_clear_lru(folio)) {
folio_put(folio);
return;
}
local_lock(&cpu_fbatches.lock); local_lock(&cpu_fbatches.lock);
fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file); fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file);
folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn); folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn);
@ -716,11 +726,16 @@ void deactivate_file_folio(struct folio *folio)
*/ */
void folio_deactivate(struct folio *folio) void folio_deactivate(struct folio *folio)
{ {
if (folio_test_lru(folio) && !folio_test_unevictable(folio) && if (!folio_test_unevictable(folio) && (folio_test_active(folio) ||
(folio_test_active(folio) || lru_gen_enabled())) { lru_gen_enabled())) {
struct folio_batch *fbatch; struct folio_batch *fbatch;
folio_get(folio); folio_get(folio);
if (!folio_test_clear_lru(folio)) {
folio_put(folio);
return;
}
local_lock(&cpu_fbatches.lock); local_lock(&cpu_fbatches.lock);
fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate); fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate);
folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn); folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn);
@ -737,12 +752,16 @@ void folio_deactivate(struct folio *folio)
*/ */
void folio_mark_lazyfree(struct folio *folio) void folio_mark_lazyfree(struct folio *folio)
{ {
if (folio_test_lru(folio) && folio_test_anon(folio) && if (folio_test_anon(folio) && folio_test_swapbacked(folio) &&
folio_test_swapbacked(folio) && !folio_test_swapcache(folio) && !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) {
!folio_test_unevictable(folio)) {
struct folio_batch *fbatch; struct folio_batch *fbatch;
folio_get(folio); folio_get(folio);
if (!folio_test_clear_lru(folio)) {
folio_put(folio);
return;
}
local_lock(&cpu_fbatches.lock); local_lock(&cpu_fbatches.lock);
fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree); fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree);
folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn); folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn);