mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
memory-failure: use a folio in me_huge_page()
This function was already explicitly calling compound_head(); unfortunately the compiler can't know that and elide the redundant calls to compound_head() buried in page_mapping(), unlock_page(), etc. Switch to using a folio, which does let us elide these calls. Link: https://lkml.kernel.org/r/20231117161447.2461643-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Naoya Horiguchi <naoya.horiguchi@nec.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
f709239357
commit
b6fd410c32
@ -1182,25 +1182,25 @@ static int me_swapcache_clean(struct page_state *ps, struct page *p)
|
||||
*/
|
||||
static int me_huge_page(struct page_state *ps, struct page *p)
|
||||
{
|
||||
struct folio *folio = page_folio(p);
|
||||
int res;
|
||||
struct page *hpage = compound_head(p);
|
||||
struct address_space *mapping;
|
||||
bool extra_pins = false;
|
||||
|
||||
mapping = page_mapping(hpage);
|
||||
mapping = folio_mapping(folio);
|
||||
if (mapping) {
|
||||
res = truncate_error_page(hpage, page_to_pfn(p), mapping);
|
||||
res = truncate_error_page(&folio->page, page_to_pfn(p), mapping);
|
||||
/* The page is kept in page cache. */
|
||||
extra_pins = true;
|
||||
unlock_page(hpage);
|
||||
folio_unlock(folio);
|
||||
} else {
|
||||
unlock_page(hpage);
|
||||
folio_unlock(folio);
|
||||
/*
|
||||
* migration entry prevents later access on error hugepage,
|
||||
* so we can free and dissolve it into buddy to save healthy
|
||||
* subpages.
|
||||
*/
|
||||
put_page(hpage);
|
||||
folio_put(folio);
|
||||
if (__page_handle_poison(p) >= 0) {
|
||||
page_ref_inc(p);
|
||||
res = MF_RECOVERED;
|
||||
|
Loading…
Reference in New Issue
Block a user