xtensa: implement the new page table range API

Add PFN_PTE_SHIFT, update_mmu_cache_range(), flush_dcache_folio() and
flush_icache_pages().

Link: https://lkml.kernel.org/r/20230802151406.3735276-30-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-08-02 16:13:57 +01:00 committed by Andrew Morton
parent a3e1c9372c
commit 4fbb7e7f47
3 changed files with 64 additions and 48 deletions

View File

@ -119,8 +119,14 @@ void flush_cache_page(struct vm_area_struct*,
#define flush_cache_vmap(start,end) flush_cache_all() #define flush_cache_vmap(start,end) flush_cache_all()
#define flush_cache_vunmap(start,end) flush_cache_all() #define flush_cache_vunmap(start,end) flush_cache_all()
void flush_dcache_folio(struct folio *folio);
#define flush_dcache_folio flush_dcache_folio
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *); static inline void flush_dcache_page(struct page *page)
{
flush_dcache_folio(page_folio(page));
}
void local_flush_cache_range(struct vm_area_struct *vma, void local_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
@ -156,6 +162,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
/* This is not required, see Documentation/core-api/cachetlb.rst */ /* This is not required, see Documentation/core-api/cachetlb.rst */
#define flush_icache_page(vma,page) do { } while (0) #define flush_icache_page(vma,page) do { } while (0)
#define flush_icache_pages(vma, page, nr) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)

View File

@ -274,6 +274,7 @@ static inline pte_t pte_mkwrite(pte_t pte)
* and a page entry and page directory to the page they refer to. * and a page entry and page directory to the page they refer to.
*/ */
#define PFN_PTE_SHIFT PAGE_SHIFT
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pte_same(a,b) (pte_val(a) == pte_val(b)) #define pte_same(a,b) (pte_val(a) == pte_val(b))
#define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_page(x) pfn_to_page(pte_pfn(x))
@ -301,15 +302,9 @@ static inline void update_pte(pte_t *ptep, pte_t pteval)
struct mm_struct; struct mm_struct;
static inline void static inline void set_pte(pte_t *ptep, pte_t pte)
set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
{ {
update_pte(ptep, pteval); update_pte(ptep, pte);
}
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
update_pte(ptep, pteval);
} }
static inline void static inline void
@ -407,8 +402,11 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
#else #else
extern void update_mmu_cache(struct vm_area_struct * vma, struct vm_fault;
unsigned long address, pte_t *ptep); void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, unsigned int nr);
#define update_mmu_cache(vma, address, ptep) \
update_mmu_cache_range(NULL, vma, address, ptep, 1)
typedef pte_t *pte_addr_t; typedef pte_t *pte_addr_t;

View File

@ -121,9 +121,9 @@ EXPORT_SYMBOL(copy_user_highpage);
* *
*/ */
void flush_dcache_page(struct page *page) void flush_dcache_folio(struct folio *folio)
{ {
struct address_space *mapping = page_mapping_file(page); struct address_space *mapping = folio_flush_mapping(folio);
/* /*
* If we have a mapping but the page is not mapped to user-space * If we have a mapping but the page is not mapped to user-space
@ -132,14 +132,14 @@ void flush_dcache_page(struct page *page)
*/ */
if (mapping && !mapping_mapped(mapping)) { if (mapping && !mapping_mapped(mapping)) {
if (!test_bit(PG_arch_1, &page->flags)) if (!test_bit(PG_arch_1, &folio->flags))
set_bit(PG_arch_1, &page->flags); set_bit(PG_arch_1, &folio->flags);
return; return;
} else { } else {
unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
unsigned long phys = page_to_phys(page); unsigned long temp = folio_pos(folio);
unsigned long temp = page->index << PAGE_SHIFT; unsigned int i, nr = folio_nr_pages(folio);
unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys)); unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
unsigned long virt; unsigned long virt;
@ -154,22 +154,26 @@ void flush_dcache_page(struct page *page)
return; return;
preempt_disable(); preempt_disable();
virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); for (i = 0; i < nr; i++) {
__flush_invalidate_dcache_page_alias(virt, phys); virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
if (alias)
__flush_invalidate_dcache_page_alias(virt, phys); __flush_invalidate_dcache_page_alias(virt, phys);
if (mapping) virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
__invalidate_icache_page_alias(virt, phys);
if (alias)
__flush_invalidate_dcache_page_alias(virt, phys);
if (mapping)
__invalidate_icache_page_alias(virt, phys);
phys += PAGE_SIZE;
temp += PAGE_SIZE;
}
preempt_enable(); preempt_enable();
} }
/* There shouldn't be an entry in the cache for this page anymore. */ /* There shouldn't be an entry in the cache for this page anymore. */
} }
EXPORT_SYMBOL(flush_dcache_page); EXPORT_SYMBOL(flush_dcache_folio);
/* /*
* For now, flush the whole cache. FIXME?? * For now, flush the whole cache. FIXME??
@ -207,45 +211,52 @@ EXPORT_SYMBOL(local_flush_cache_page);
#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */ #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
void void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) unsigned long addr, pte_t *ptep, unsigned int nr)
{ {
unsigned long pfn = pte_pfn(*ptep); unsigned long pfn = pte_pfn(*ptep);
struct page *page; struct folio *folio;
unsigned int i;
if (!pfn_valid(pfn)) if (!pfn_valid(pfn))
return; return;
page = pfn_to_page(pfn); folio = page_folio(pfn_to_page(pfn));
/* Invalidate old entry in TLBs */ /* Invalidate old entries in TLBs */
for (i = 0; i < nr; i++)
flush_tlb_page(vma, addr); flush_tlb_page(vma, addr + i * PAGE_SIZE);
nr = folio_nr_pages(folio);
#if (DCACHE_WAY_SIZE > PAGE_SIZE) #if (DCACHE_WAY_SIZE > PAGE_SIZE)
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags)) {
unsigned long phys = page_to_phys(page); unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
unsigned long tmp; unsigned long tmp;
preempt_disable(); preempt_disable();
tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); for (i = 0; i < nr; i++) {
__flush_invalidate_dcache_page_alias(tmp, phys); tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page_alias(tmp, phys);
__flush_invalidate_dcache_page_alias(tmp, phys); tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
__invalidate_icache_page_alias(tmp, phys); __flush_invalidate_dcache_page_alias(tmp, phys);
__invalidate_icache_page_alias(tmp, phys);
phys += PAGE_SIZE;
}
preempt_enable(); preempt_enable();
clear_bit(PG_arch_1, &page->flags); clear_bit(PG_arch_1, &folio->flags);
} }
#else #else
if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags) if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags)
&& (vma->vm_flags & VM_EXEC) != 0) { && (vma->vm_flags & VM_EXEC) != 0) {
unsigned long paddr = (unsigned long)kmap_atomic(page); for (i = 0; i < nr; i++) {
__flush_dcache_page(paddr); void *paddr = kmap_local_folio(folio, i * PAGE_SIZE);
__invalidate_icache_page(paddr); __flush_dcache_page((unsigned long)paddr);
set_bit(PG_arch_1, &page->flags); __invalidate_icache_page((unsigned long)paddr);
kunmap_atomic((void *)paddr); kunmap_local(paddr);
}
set_bit(PG_arch_1, &folio->flags);
} }
#endif #endif
} }