mm/secretmem: use a folio in secretmem_fault()

Saves four implicit call to compound_head().

Link: https://lkml.kernel.org/r/20230812062612.3184990-1-zhangpeng362@huawei.com
Signed-off-by: ZhangPeng <zhangpeng362@huawei.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Nanyong Sun <sunnanyong@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
ZhangPeng 2023-08-12 14:26:12 +08:00 committed by Andrew Morton
parent 1b6754fea4
commit 7e2fca52ef

View File

@ -55,6 +55,7 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
gfp_t gfp = vmf->gfp_mask;
unsigned long addr;
struct page *page;
struct folio *folio;
vm_fault_t ret;
int err;
@ -66,23 +67,24 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
retry:
page = find_lock_page(mapping, offset);
if (!page) {
page = alloc_page(gfp | __GFP_ZERO);
if (!page) {
folio = folio_alloc(gfp | __GFP_ZERO, 0);
if (!folio) {
ret = VM_FAULT_OOM;
goto out;
}
page = &folio->page;
err = set_direct_map_invalid_noflush(page);
if (err) {
put_page(page);
folio_put(folio);
ret = vmf_error(err);
goto out;
}
__SetPageUptodate(page);
err = add_to_page_cache_lru(page, mapping, offset, gfp);
__folio_mark_uptodate(folio);
err = filemap_add_folio(mapping, folio, offset, gfp);
if (unlikely(err)) {
put_page(page);
folio_put(folio);
/*
* If a split of large page was required, it
* already happened when we marked the page invalid