2021-07-01 01:47:13 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
2022-06-28 09:22:30 +00:00
|
|
|
* HugeTLB Vmemmap Optimization (HVO)
|
2021-07-01 01:47:13 +00:00
|
|
|
*
|
2022-06-28 09:22:30 +00:00
|
|
|
* Copyright (c) 2020, ByteDance. All rights reserved.
|
2021-07-01 01:47:13 +00:00
|
|
|
*
|
|
|
|
* Author: Muchun Song <songmuchun@bytedance.com>
|
|
|
|
*/
|
|
|
|
#ifndef _LINUX_HUGETLB_VMEMMAP_H
|
|
|
|
#define _LINUX_HUGETLB_VMEMMAP_H
|
|
|
|
#include <linux/hugetlb.h>
|
|
|
|
|
2021-07-01 01:47:17 +00:00
|
|
|
/*
|
2022-06-28 09:22:33 +00:00
|
|
|
* Reserve one vmemmap page, all vmemmap addresses are mapped to it. See
|
|
|
|
* Documentation/vm/vmemmap_dedup.rst.
|
2021-07-01 01:47:17 +00:00
|
|
|
*/
|
2022-06-28 09:22:33 +00:00
|
|
|
#define HUGETLB_VMEMMAP_RESERVE_SIZE PAGE_SIZE
|
2023-09-13 10:54:01 +00:00
|
|
|
#define HUGETLB_VMEMMAP_RESERVE_PAGES (HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page))
|
|
|
|
|
|
|
|
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
|
2023-10-11 14:45:57 +00:00
|
|
|
int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio);
|
2023-10-19 02:31:06 +00:00
|
|
|
long hugetlb_vmemmap_restore_folios(const struct hstate *h,
|
|
|
|
struct list_head *folio_list,
|
|
|
|
struct list_head *non_hvo_folios);
|
2023-10-11 14:45:57 +00:00
|
|
|
void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio);
|
2023-10-19 02:31:05 +00:00
|
|
|
void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list);
|
2022-06-28 09:22:33 +00:00
|
|
|
|
|
|
|
static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
|
2021-07-01 01:47:17 +00:00
|
|
|
{
|
2022-06-28 09:22:33 +00:00
|
|
|
return pages_per_huge_page(h) * sizeof(struct page);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return how many vmemmap size associated with a HugeTLB page that can be
|
|
|
|
* optimized and can be freed to the buddy allocator.
|
|
|
|
*/
|
|
|
|
static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
|
|
|
|
{
|
|
|
|
int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE;
|
|
|
|
|
|
|
|
if (!is_power_of_2(sizeof(struct page)))
|
|
|
|
return 0;
|
|
|
|
return size > 0 ? size : 0;
|
2021-07-01 01:47:17 +00:00
|
|
|
}
|
2021-07-01 01:47:13 +00:00
|
|
|
#else
|
2023-10-11 14:45:57 +00:00
|
|
|
static inline int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio)
|
2021-07-01 01:47:21 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-10-19 02:31:06 +00:00
|
|
|
static long hugetlb_vmemmap_restore_folios(const struct hstate *h,
|
|
|
|
struct list_head *folio_list,
|
|
|
|
struct list_head *non_hvo_folios)
|
|
|
|
{
|
|
|
|
list_splice_init(folio_list, non_hvo_folios);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-10-11 14:45:57 +00:00
|
|
|
static inline void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
|
2021-07-01 01:47:13 +00:00
|
|
|
{
|
|
|
|
}
|
2021-07-01 01:47:17 +00:00
|
|
|
|
2023-10-19 02:31:05 +00:00
|
|
|
static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2022-06-28 09:22:33 +00:00
|
|
|
static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
|
2021-07-01 01:47:33 +00:00
|
|
|
{
|
2022-06-28 09:22:33 +00:00
|
|
|
return 0;
|
2021-07-01 01:47:33 +00:00
|
|
|
}
|
2022-06-28 09:22:33 +00:00
|
|
|
#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
|
2021-07-01 01:47:33 +00:00
|
|
|
|
2022-06-28 09:22:33 +00:00
|
|
|
static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h)
|
2021-07-01 01:47:17 +00:00
|
|
|
{
|
2022-06-28 09:22:33 +00:00
|
|
|
return hugetlb_vmemmap_optimizable_size(h) != 0;
|
2021-07-01 01:47:17 +00:00
|
|
|
}
|
2021-07-01 01:47:13 +00:00
|
|
|
#endif /* _LINUX_HUGETLB_VMEMMAP_H */
|