mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
fde1c4ecf9
The new boot flow when it comes to initialization of gigantic pages is as follows: - At boot time, for a gigantic page during __alloc_bootmem_hugepage, the region after the first struct page is marked as noinit. - This results in only the first struct page to be initialized in reserve_bootmem_region. As the tail struct pages are not initialized at this point, there can be a significant saving in boot time if HVO succeeds later on. - Later on in the boot, the head page is prepped and the first HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page) - 1 tail struct pages are initialized. - HVO is attempted. If it is not successful, then the rest of the tail struct pages are initialized. If it is successful, no more tail struct pages need to be initialized saving significant boot time. The WARN_ON for increased ref count in gather_bootmem_prealloc was changed to a VM_BUG_ON. This is OK as there should be no speculative references this early in boot process. The VM_BUG_ON's are there just in case such code is introduced. [akpm@linux-foundation.org: make it nicer for 80 cols] Link: https://lkml.kernel.org/r/20230913105401.519709-5-usama.arif@bytedance.com Signed-off-by: Usama Arif <usama.arif@bytedance.com> Reviewed-by: Muchun Song <songmuchun@bytedance.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Fam Zheng <fam.zheng@bytedance.com> Cc: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Punit Agrawal <punit.agrawal@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
62 lines
1.7 KiB
C
62 lines
1.7 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* HugeTLB Vmemmap Optimization (HVO)
|
|
*
|
|
* Copyright (c) 2020, ByteDance. All rights reserved.
|
|
*
|
|
* Author: Muchun Song <songmuchun@bytedance.com>
|
|
*/
|
|
#ifndef _LINUX_HUGETLB_VMEMMAP_H
|
|
#define _LINUX_HUGETLB_VMEMMAP_H
|
|
#include <linux/hugetlb.h>
|
|
|
|
/*
|
|
* Reserve one vmemmap page, all vmemmap addresses are mapped to it. See
|
|
* Documentation/vm/vmemmap_dedup.rst.
|
|
*/
|
|
#define HUGETLB_VMEMMAP_RESERVE_SIZE PAGE_SIZE
|
|
#define HUGETLB_VMEMMAP_RESERVE_PAGES (HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page))
|
|
|
|
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
|
|
int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
|
|
void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
|
|
|
|
static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
|
|
{
|
|
return pages_per_huge_page(h) * sizeof(struct page);
|
|
}
|
|
|
|
/*
|
|
* Return how many vmemmap size associated with a HugeTLB page that can be
|
|
* optimized and can be freed to the buddy allocator.
|
|
*/
|
|
static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
|
|
{
|
|
int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE;
|
|
|
|
if (!is_power_of_2(sizeof(struct page)))
|
|
return 0;
|
|
return size > 0 ? size : 0;
|
|
}
|
|
#else
|
|
static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
|
|
{
|
|
}
|
|
|
|
static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
|
|
|
|
static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h)
|
|
{
|
|
return hugetlb_vmemmap_optimizable_size(h) != 0;
|
|
}
|
|
#endif /* _LINUX_HUGETLB_VMEMMAP_H */
|