migrate_pages: try migrate in batch asynchronously firstly

When we have locked more than one folios, we cannot wait the lock or bit
(e.g., page lock, buffer head lock, writeback bit) synchronously. 
Otherwise deadlock may be triggered.  This make it hard to batch the
synchronous migration directly.

This patch re-enables batching synchronous migration via trying to migrate
in batch asynchronously firstly.  And any folios that are failed to be
migrated asynchronously will be migrated synchronously one by one.

Test shows that this can restore the TLB flushing batching performance for
synchronous migration effectively.

Link: https://lkml.kernel.org/r/20230303030155.160983-4-ying.huang@intel.com
Fixes: 5dfab109d5 ("migrate_pages: batch _unmap and _move")
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Tested-by: Hugh Dickins <hughd@google.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: "Xu, Pengfei" <pengfei.xu@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Stefan Roesch <shr@devkernel.io>
Cc: Tejun Heo <tj@kernel.org>
Cc: Xin Hao <xhao@linux.alibaba.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Huang Ying 2023-03-03 11:01:55 +08:00 committed by Andrew Morton
parent a21d213321
commit 2ef7dbb269

View File

@ -1112,9 +1112,8 @@ static void migrate_folio_done(struct folio *src,
/* Obtain the lock on page, remove all ptes. */
static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page,
unsigned long private, struct folio *src,
struct folio **dstp, int force,
enum migrate_mode mode, enum migrate_reason reason,
struct list_head *ret)
struct folio **dstp, enum migrate_mode mode,
enum migrate_reason reason, struct list_head *ret)
{
struct folio *dst;
int rc = -EAGAIN;
@ -1144,7 +1143,7 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page
dst->private = NULL;
if (!folio_trylock(src)) {
if (!force || mode == MIGRATE_ASYNC)
if (mode == MIGRATE_ASYNC)
goto out;
/*
@ -1182,8 +1181,6 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page
rc = -EBUSY;
goto out;
}
if (!force)
goto out;
folio_wait_writeback(src);
}
@ -1497,6 +1494,9 @@ static inline int try_split_folio(struct folio *folio, struct list_head *split_f
#define NR_MAX_BATCHED_MIGRATION 512
#endif
#define NR_MAX_MIGRATE_PAGES_RETRY 10
#define NR_MAX_MIGRATE_ASYNC_RETRY 3
#define NR_MAX_MIGRATE_SYNC_RETRY \
(NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
struct migrate_pages_stats {
int nr_succeeded; /* Normal and large folios migrated successfully, in
@ -1678,8 +1678,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
}
rc = migrate_folio_unmap(get_new_page, put_new_page, private,
folio, &dst, pass > 2, mode,
reason, ret_folios);
folio, &dst, mode, reason, ret_folios);
/*
* The rules are:
* Success: folio will be freed
@ -1857,6 +1856,51 @@ out:
return rc;
}
static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
free_page_t put_new_page, unsigned long private,
enum migrate_mode mode, int reason, struct list_head *ret_folios,
struct list_head *split_folios, struct migrate_pages_stats *stats)
{
int rc, nr_failed = 0;
LIST_HEAD(folios);
struct migrate_pages_stats astats;
memset(&astats, 0, sizeof(astats));
/* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
rc = migrate_pages_batch(from, get_new_page, put_new_page, private, MIGRATE_ASYNC,
reason, &folios, split_folios, &astats,
NR_MAX_MIGRATE_ASYNC_RETRY);
stats->nr_succeeded += astats.nr_succeeded;
stats->nr_thp_succeeded += astats.nr_thp_succeeded;
stats->nr_thp_split += astats.nr_thp_split;
if (rc < 0) {
stats->nr_failed_pages += astats.nr_failed_pages;
stats->nr_thp_failed += astats.nr_thp_failed;
list_splice_tail(&folios, ret_folios);
return rc;
}
stats->nr_thp_failed += astats.nr_thp_split;
nr_failed += astats.nr_thp_split;
/*
* Fall back to migrate all failed folios one by one synchronously. All
* failed folios except split THPs will be retried, so their failure
* isn't counted
*/
list_splice_tail_init(&folios, from);
while (!list_empty(from)) {
list_move(from->next, &folios);
rc = migrate_pages_batch(&folios, get_new_page, put_new_page,
private, mode, reason, ret_folios,
split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
list_splice_tail_init(&folios, ret_folios);
if (rc < 0)
return rc;
nr_failed += rc;
}
return nr_failed;
}
/*
* migrate_pages - migrate the folios specified in a list, to the free folios
* supplied as the target for the page migration
@ -1888,7 +1932,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
{
int rc, rc_gather;
int nr_pages, batch;
int nr_pages;
struct folio *folio, *folio2;
LIST_HEAD(folios);
LIST_HEAD(ret_folios);
@ -1904,10 +1948,6 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
if (rc_gather < 0)
goto out;
if (mode == MIGRATE_ASYNC)
batch = NR_MAX_BATCHED_MIGRATION;
else
batch = 1;
again:
nr_pages = 0;
list_for_each_entry_safe(folio, folio2, from, lru) {
@ -1918,16 +1958,20 @@ again:
}
nr_pages += folio_nr_pages(folio);
if (nr_pages >= batch)
if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
break;
}
if (nr_pages >= batch)
if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
list_cut_before(&folios, from, &folio2->lru);
else
list_splice_init(from, &folios);
rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
mode, reason, &ret_folios, &split_folios, &stats,
NR_MAX_MIGRATE_PAGES_RETRY);
if (mode == MIGRATE_ASYNC)
rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
mode, reason, &ret_folios, &split_folios, &stats,
NR_MAX_MIGRATE_PAGES_RETRY);
else
rc = migrate_pages_sync(&folios, get_new_page, put_new_page, private,
mode, reason, &ret_folios, &split_folios, &stats);
list_splice_tail_init(&folios, &ret_folios);
if (rc < 0) {
rc_gather = rc;