mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 06:01:57 +00:00
powerpc/64s/mm: Move __real_pte stubs into hash-4k.h
The stub versions of __real_pte() etc are only used with HPT & 4K pages, so move them into the hash-4k.h header. Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20240821080729.872034-1-mpe@ellerman.id.au
This commit is contained in:
parent
d6b34416b0
commit
8ae4f16f7d
@ -74,6 +74,26 @@
|
||||
#define remap_4k_pfn(vma, addr, pfn, prot) \
|
||||
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
|
||||
|
||||
/*
|
||||
* With 4K page size the real_pte machinery is all nops.
|
||||
*/
|
||||
#define __real_pte(e, p, o) ((real_pte_t){(e)})
|
||||
#define __rpte_to_pte(r) ((r).pte)
|
||||
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
|
||||
|
||||
#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
|
||||
do { \
|
||||
index = 0; \
|
||||
shift = mmu_psize_defs[psize].shift; \
|
||||
|
||||
#define pte_iterate_hashed_end() } while(0)
|
||||
|
||||
/*
|
||||
* We expect this to be called only for user addresses or kernel virtual
|
||||
* addresses other than the linear mapping.
|
||||
*/
|
||||
#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
|
||||
|
||||
/*
|
||||
* 4K PTE format is different from 64K PTE format. Saving the hash_slot is just
|
||||
* a matter of returning the PTE bits that need to be modified. On 64K PTE,
|
||||
|
@ -330,32 +330,6 @@ static inline unsigned long pud_leaf_size(pud_t pud)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* This is the default implementation of various PTE accessors, it's
|
||||
* used in all cases except Book3S with 64K pages where we have a
|
||||
* concept of sub-pages
|
||||
*/
|
||||
#ifndef __real_pte
|
||||
|
||||
#define __real_pte(e, p, o) ((real_pte_t){(e)})
|
||||
#define __rpte_to_pte(r) ((r).pte)
|
||||
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
|
||||
|
||||
#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
|
||||
do { \
|
||||
index = 0; \
|
||||
shift = mmu_psize_defs[psize].shift; \
|
||||
|
||||
#define pte_iterate_hashed_end() } while(0)
|
||||
|
||||
/*
|
||||
* We expect this to be called only for user addresses or kernel virtual
|
||||
* addresses other than the linear mapping.
|
||||
*/
|
||||
#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
|
||||
|
||||
#endif /* __real_pte */
|
||||
|
||||
static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, unsigned long clr,
|
||||
unsigned long set, int huge)
|
||||
|
Loading…
Reference in New Issue
Block a user