mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 13:22:23 +00:00
mm/treewide: drop pXd_large()
They're not used anymore, drop all of them. Link: https://lkml.kernel.org/r/20240305043750.93762-10-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Konovalov <andreyknvl@gmail.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Kirill A. Shutemov <kirill@shutemov.name> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Muchun Song <muchun.song@linux.dev> Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
0a845e0f63
commit
e72c7c2b88
@ -213,7 +213,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
|
||||
|
||||
#define pmd_pfn(pmd) (__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
|
||||
|
||||
#define pmd_large(pmd) (pmd_val(pmd) & 2)
|
||||
#define pmd_leaf(pmd) (pmd_val(pmd) & 2)
|
||||
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
|
||||
#define pmd_present(pmd) (pmd_val(pmd))
|
||||
|
@ -118,7 +118,6 @@
|
||||
PMD_TYPE_TABLE)
|
||||
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
|
||||
PMD_TYPE_SECT)
|
||||
#define pmd_large(pmd) pmd_sect(pmd)
|
||||
#define pmd_leaf(pmd) pmd_sect(pmd)
|
||||
|
||||
#define pud_clear(pudp) \
|
||||
|
@ -723,7 +723,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
|
||||
/*
|
||||
* Read each entry once. As above, a non-leaf entry can be promoted to
|
||||
* a huge page _during_ this walk. Re-reading the entry could send the
|
||||
* walk into the weeks, e.g. p*d_large() returns false (sees the old
|
||||
* walk into the weeks, e.g. p*d_leaf() returns false (sees the old
|
||||
* value) and then p*d_offset() walks into the target huge page instead
|
||||
* of the old page table (sees the new value).
|
||||
*/
|
||||
|
@ -1437,17 +1437,15 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
|
||||
}
|
||||
|
||||
/*
|
||||
* Like pmd_huge() and pmd_large(), but works regardless of config options
|
||||
* Like pmd_huge(), but works regardless of config options
|
||||
*/
|
||||
#define pmd_leaf pmd_leaf
|
||||
#define pmd_large pmd_leaf
|
||||
static inline bool pmd_leaf(pmd_t pmd)
|
||||
{
|
||||
return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
|
||||
}
|
||||
|
||||
#define pud_leaf pud_leaf
|
||||
#define pud_large pud_leaf
|
||||
static inline bool pud_leaf(pud_t pud)
|
||||
{
|
||||
return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
|
||||
|
@ -101,10 +101,6 @@ void poking_init(void);
|
||||
extern unsigned long ioremap_bot;
|
||||
extern const pgprot_t protection_map[16];
|
||||
|
||||
#ifndef pmd_large
|
||||
#define pmd_large(pmd) 0
|
||||
#endif
|
||||
|
||||
/* can we use this in kvm */
|
||||
unsigned long vmalloc_to_phys(void *vmalloc_addr);
|
||||
|
||||
|
@ -705,16 +705,16 @@ static inline int pud_none(pud_t pud)
|
||||
return pud_val(pud) == _REGION3_ENTRY_EMPTY;
|
||||
}
|
||||
|
||||
#define pud_leaf pud_large
|
||||
static inline int pud_large(pud_t pud)
|
||||
#define pud_leaf pud_leaf
|
||||
static inline int pud_leaf(pud_t pud)
|
||||
{
|
||||
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
|
||||
return 0;
|
||||
return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
|
||||
}
|
||||
|
||||
#define pmd_leaf pmd_large
|
||||
static inline int pmd_large(pmd_t pmd)
|
||||
#define pmd_leaf pmd_leaf
|
||||
static inline int pmd_leaf(pmd_t pmd)
|
||||
{
|
||||
return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
|
||||
}
|
||||
|
@ -680,8 +680,8 @@ static inline unsigned long pte_special(pte_t pte)
|
||||
return pte_val(pte) & _PAGE_SPECIAL;
|
||||
}
|
||||
|
||||
#define pmd_leaf pmd_large
|
||||
static inline unsigned long pmd_large(pmd_t pmd)
|
||||
#define pmd_leaf pmd_leaf
|
||||
static inline unsigned long pmd_leaf(pmd_t pmd)
|
||||
{
|
||||
pte_t pte = __pte(pmd_val(pmd));
|
||||
|
||||
@ -867,8 +867,8 @@ static inline pmd_t *pud_pgtable(pud_t pud)
|
||||
/* only used by the stubbed out hugetlb gup code, should never be called */
|
||||
#define p4d_page(p4d) NULL
|
||||
|
||||
#define pud_leaf pud_large
|
||||
static inline unsigned long pud_large(pud_t pud)
|
||||
#define pud_leaf pud_leaf
|
||||
static inline unsigned long pud_leaf(pud_t pud)
|
||||
{
|
||||
pte_t pte = __pte(pud_val(pud));
|
||||
|
||||
|
@ -251,8 +251,8 @@ static inline unsigned long pgd_pfn(pgd_t pgd)
|
||||
return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#define p4d_leaf p4d_large
|
||||
static inline int p4d_large(p4d_t p4d)
|
||||
#define p4d_leaf p4d_leaf
|
||||
static inline int p4d_leaf(p4d_t p4d)
|
||||
{
|
||||
/* No 512 GiB pages yet */
|
||||
return 0;
|
||||
@ -260,14 +260,14 @@ static inline int p4d_large(p4d_t p4d)
|
||||
|
||||
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
|
||||
|
||||
#define pmd_leaf pmd_large
|
||||
static inline int pmd_large(pmd_t pte)
|
||||
#define pmd_leaf pmd_leaf
|
||||
static inline int pmd_leaf(pmd_t pte)
|
||||
{
|
||||
return pmd_flags(pte) & _PAGE_PSE;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
|
||||
/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_leaf */
|
||||
static inline int pmd_trans_huge(pmd_t pmd)
|
||||
{
|
||||
return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
|
||||
@ -1085,8 +1085,8 @@ static inline pmd_t *pud_pgtable(pud_t pud)
|
||||
*/
|
||||
#define pud_page(pud) pfn_to_page(pud_pfn(pud))
|
||||
|
||||
#define pud_leaf pud_large
|
||||
static inline int pud_large(pud_t pud)
|
||||
#define pud_leaf pud_leaf
|
||||
static inline int pud_leaf(pud_t pud)
|
||||
{
|
||||
return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
|
||||
(_PAGE_PSE | _PAGE_PRESENT);
|
||||
@ -1096,11 +1096,6 @@ static inline int pud_bad(pud_t pud)
|
||||
{
|
||||
return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
|
||||
}
|
||||
#else
|
||||
static inline int pud_large(pud_t pud)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS > 3
|
||||
|
@ -3110,7 +3110,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
|
||||
/*
|
||||
* Read each entry once. As above, a non-leaf entry can be promoted to
|
||||
* a huge page _during_ this walk. Re-reading the entry could send the
|
||||
* walk into the weeks, e.g. p*d_large() returns false (sees the old
|
||||
* walk into the weeks, e.g. p*d_leaf() returns false (sees the old
|
||||
* value) and then p*d_offset() walks into the target huge page instead
|
||||
* of the old page table (sees the new value).
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user