forked from Minki/linux
parisc architecture fixes for kernel v5.18
Rewrite the cache flush code for PA8800/PA8900 CPUs to flush using the virtual address of user and kernel pages instead of using tmpalias flushes. Testing showed, that tmpalias flushes don't work reliable on PA8800/PA8900 CPUs. Fix flush code to allow 32-bit kernels to run on 64-bit capable machines, e.g. a 32-bit kernel on C3700 machines. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQS86RI+GtKfB8BJu973ErUQojoPXwUCYoZDMAAKCRD3ErUQojoP XzUAAQDtNB0FFsAtbeDrM+cQTJGZjB5cVbsSvRDwQR4MQjEfKwEAz7o6Qy3WPRmM tPov2dzXFSvt0H2iHFSp8iXM9uMzkw8= =B/Cd -----END PGP SIGNATURE----- Merge tag 'for-5.18/parisc-4' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux Pull parisc architecture fixes from Helge Deller: "We had two big outstanding issues after v5.18-rc6: a) 32-bit kernels on 64-bit machines (e.g. on a C3700 which is able to run 32- and 64-bit kernels) failed early in userspace. b) 64-bit kernels on PA8800/PA8900 CPUs (e.g. in a C8000) showed random userspace segfaults. We assumed that those problems were caused by the tmpalias flushes. Dave did a lot of testing and reorganization of the current flush code and fixed the 32-bit cache flushing. For PA8800/PA8900 CPUs he switched the code to flush using the virtual address of user and kernel pages instead of using tmpalias flushes. The tmpalias flushes don't seem to work reliable on such CPUs. We tested the patches on a wide range machines (715/64, B160L, C3000, C3700, C8000, rp3440) and they have been in for-next without any conflicts. Summary: - Rewrite the cache flush code for PA8800/PA8900 CPUs to flush using the virtual address of user and kernel pages instead of using tmpalias flushes. Testing showed, that tmpalias flushes don't work reliably on PA8800/PA8900 CPUs - Fix flush code to allow 32-bit kernels to run on 64-bit capable machines, e.g. a 32-bit kernel on C3700 machines" * tag 'for-5.18/parisc-4' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: parisc: Fix patch code locking and flushing parisc: Rewrite cache flush code for PA8800/PA8900 parisc: Disable debug code regarding cache flushes in handle_nadtlb_fault()
This commit is contained in:
commit
b015dcd62b
@ -59,20 +59,12 @@ void flush_dcache_page(struct page *page);
|
||||
flush_kernel_icache_range_asm(s,e); \
|
||||
} while (0)
|
||||
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { \
|
||||
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
|
||||
memcpy(dst, src, len); \
|
||||
flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
|
||||
} while (0)
|
||||
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { \
|
||||
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
|
||||
memcpy(dst, src, len); \
|
||||
} while (0)
|
||||
|
||||
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long user_vaddr, void *dst, void *src, int len);
|
||||
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long user_vaddr, void *dst, void *src, int len);
|
||||
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
unsigned long pfn);
|
||||
void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
|
||||
@ -80,16 +72,7 @@ void flush_cache_range(struct vm_area_struct *vma,
|
||||
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
|
||||
|
||||
#define ARCH_HAS_FLUSH_ANON_PAGE
|
||||
static inline void
|
||||
flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
|
||||
{
|
||||
if (PageAnon(page)) {
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
preempt_disable();
|
||||
flush_dcache_page_asm(page_to_phys(page), vmaddr);
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
|
||||
|
||||
#define ARCH_HAS_FLUSH_ON_KUNMAP
|
||||
static inline void kunmap_flush_on_unmap(void *addr)
|
||||
|
@ -26,12 +26,14 @@
|
||||
#define copy_page(to, from) copy_page_asm((void *)(to), (void *)(from))
|
||||
|
||||
struct page;
|
||||
struct vm_area_struct;
|
||||
|
||||
void clear_page_asm(void *page);
|
||||
void copy_page_asm(void *to, void *from);
|
||||
#define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
|
||||
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
|
||||
struct page *pg);
|
||||
void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr,
|
||||
struct vm_area_struct *vma);
|
||||
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
|
||||
|
||||
/*
|
||||
* These are used to make use of C type-checking..
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/shmparam.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
int split_tlb __ro_after_init;
|
||||
int dcache_stride __ro_after_init;
|
||||
@ -91,7 +92,7 @@ static inline void flush_data_cache(void)
|
||||
}
|
||||
|
||||
|
||||
/* Virtual address of pfn. */
|
||||
/* Kernel virtual address of pfn. */
|
||||
#define pfn_va(pfn) __va(PFN_PHYS(pfn))
|
||||
|
||||
void
|
||||
@ -124,11 +125,13 @@ show_cache_info(struct seq_file *m)
|
||||
cache_info.ic_size/1024 );
|
||||
if (cache_info.dc_loop != 1)
|
||||
snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
|
||||
seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
|
||||
seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
|
||||
cache_info.dc_size/1024,
|
||||
(cache_info.dc_conf.cc_wt ? "WT":"WB"),
|
||||
(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
|
||||
((cache_info.dc_loop == 1) ? "direct mapped" : buf));
|
||||
((cache_info.dc_loop == 1) ? "direct mapped" : buf),
|
||||
cache_info.dc_conf.cc_alias
|
||||
);
|
||||
seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
|
||||
cache_info.it_size,
|
||||
cache_info.dt_size,
|
||||
@ -324,25 +327,81 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline void
|
||||
__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
unsigned long physaddr)
|
||||
static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
||||
{
|
||||
if (!static_branch_likely(&parisc_has_cache))
|
||||
return;
|
||||
unsigned long flags, space, pgd, prot;
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
unsigned long pgd_lock;
|
||||
#endif
|
||||
|
||||
vmaddr &= PAGE_MASK;
|
||||
|
||||
preempt_disable();
|
||||
purge_dcache_page_asm(physaddr, vmaddr);
|
||||
|
||||
/* Set context for flush */
|
||||
local_irq_save(flags);
|
||||
prot = mfctl(8);
|
||||
space = mfsp(SR_USER);
|
||||
pgd = mfctl(25);
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
pgd_lock = mfctl(28);
|
||||
#endif
|
||||
switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
|
||||
local_irq_restore(flags);
|
||||
|
||||
flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_icache_page_asm(physaddr, vmaddr);
|
||||
flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
|
||||
/* Restore previous context */
|
||||
local_irq_save(flags);
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
mtctl(pgd_lock, 28);
|
||||
#endif
|
||||
mtctl(pgd, 25);
|
||||
mtsp(space, SR_USER);
|
||||
mtctl(prot, 8);
|
||||
local_irq_restore(flags);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
pte_t *ptep = NULL;
|
||||
pgd_t *pgd = mm->pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
if (!pgd_none(*pgd)) {
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (!p4d_none(*p4d)) {
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (!pud_none(*pud)) {
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (!pmd_none(*pmd))
|
||||
ptep = pte_offset_map(pmd, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
return ptep;
|
||||
}
|
||||
|
||||
static inline bool pte_needs_flush(pte_t pte)
|
||||
{
|
||||
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
|
||||
== (_PAGE_PRESENT | _PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
void flush_dcache_page(struct page *page)
|
||||
{
|
||||
struct address_space *mapping = page_mapping_file(page);
|
||||
struct vm_area_struct *mpnt;
|
||||
unsigned long offset;
|
||||
unsigned long addr, old_addr = 0;
|
||||
unsigned long count = 0;
|
||||
pgoff_t pgoff;
|
||||
|
||||
if (mapping && !mapping_mapped(mapping)) {
|
||||
@ -357,33 +416,52 @@ void flush_dcache_page(struct page *page)
|
||||
|
||||
pgoff = page->index;
|
||||
|
||||
/* We have carefully arranged in arch_get_unmapped_area() that
|
||||
/*
|
||||
* We have carefully arranged in arch_get_unmapped_area() that
|
||||
* *any* mappings of a file are always congruently mapped (whether
|
||||
* declared as MAP_PRIVATE or MAP_SHARED), so we only need
|
||||
* to flush one address here for them all to become coherent */
|
||||
|
||||
* to flush one address here for them all to become coherent
|
||||
* on machines that support equivalent aliasing
|
||||
*/
|
||||
flush_dcache_mmap_lock(mapping);
|
||||
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
|
||||
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
|
||||
addr = mpnt->vm_start + offset;
|
||||
if (parisc_requires_coherency()) {
|
||||
pte_t *ptep;
|
||||
|
||||
/* The TLB is the engine of coherence on parisc: The
|
||||
* CPU is entitled to speculate any page with a TLB
|
||||
* mapping, so here we kill the mapping then flush the
|
||||
* page along a special flush only alias mapping.
|
||||
* This guarantees that the page is no-longer in the
|
||||
* cache for any process and nor may it be
|
||||
* speculatively read in (until the user or kernel
|
||||
* specifically accesses it, of course) */
|
||||
|
||||
flush_tlb_page(mpnt, addr);
|
||||
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
|
||||
!= (addr & (SHM_COLOUR - 1))) {
|
||||
__flush_cache_page(mpnt, addr, page_to_phys(page));
|
||||
if (parisc_requires_coherency() && old_addr)
|
||||
printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
|
||||
old_addr = addr;
|
||||
ptep = get_ptep(mpnt->vm_mm, addr);
|
||||
if (ptep && pte_needs_flush(*ptep))
|
||||
flush_user_cache_page(mpnt, addr);
|
||||
} else {
|
||||
/*
|
||||
* The TLB is the engine of coherence on parisc:
|
||||
* The CPU is entitled to speculate any page
|
||||
* with a TLB mapping, so here we kill the
|
||||
* mapping then flush the page along a special
|
||||
* flush only alias mapping. This guarantees that
|
||||
* the page is no-longer in the cache for any
|
||||
* process and nor may it be speculatively read
|
||||
* in (until the user or kernel specifically
|
||||
* accesses it, of course)
|
||||
*/
|
||||
flush_tlb_page(mpnt, addr);
|
||||
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
|
||||
!= (addr & (SHM_COLOUR - 1))) {
|
||||
__flush_cache_page(mpnt, addr, page_to_phys(page));
|
||||
/*
|
||||
* Software is allowed to have any number
|
||||
* of private mappings to a page.
|
||||
*/
|
||||
if (!(mpnt->vm_flags & VM_SHARED))
|
||||
continue;
|
||||
if (old_addr)
|
||||
pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
|
||||
old_addr, addr, mpnt->vm_file);
|
||||
old_addr = addr;
|
||||
}
|
||||
}
|
||||
WARN_ON(++count == 4096);
|
||||
}
|
||||
flush_dcache_mmap_unlock(mapping);
|
||||
}
|
||||
@ -403,7 +481,7 @@ void __init parisc_setup_cache_timing(void)
|
||||
{
|
||||
unsigned long rangetime, alltime;
|
||||
unsigned long size;
|
||||
unsigned long threshold;
|
||||
unsigned long threshold, threshold2;
|
||||
|
||||
alltime = mfctl(16);
|
||||
flush_data_cache();
|
||||
@ -417,11 +495,16 @@ void __init parisc_setup_cache_timing(void)
|
||||
printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
|
||||
alltime, size, rangetime);
|
||||
|
||||
threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
|
||||
if (threshold > cache_info.dc_size)
|
||||
threshold = cache_info.dc_size;
|
||||
if (threshold)
|
||||
parisc_cache_flush_threshold = threshold;
|
||||
threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
|
||||
pr_info("Calculated flush threshold is %lu KiB\n",
|
||||
threshold/1024);
|
||||
|
||||
/*
|
||||
* The threshold computed above isn't very reliable. The following
|
||||
* heuristic works reasonably well on c8000/rp3440.
|
||||
*/
|
||||
threshold2 = cache_info.dc_size * num_online_cpus();
|
||||
parisc_cache_flush_threshold = threshold2;
|
||||
printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
|
||||
parisc_cache_flush_threshold/1024);
|
||||
|
||||
@ -477,19 +560,47 @@ void flush_kernel_dcache_page_addr(void *addr)
|
||||
}
|
||||
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
|
||||
|
||||
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
|
||||
struct page *pg)
|
||||
static void flush_cache_page_if_present(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr, unsigned long pfn)
|
||||
{
|
||||
/* Copy using kernel mapping. No coherency is needed (all in
|
||||
kunmap) for the `to' page. However, the `from' page needs to
|
||||
be flushed through a mapping equivalent to the user mapping
|
||||
before it can be accessed through the kernel mapping. */
|
||||
preempt_disable();
|
||||
flush_dcache_page_asm(__pa(vfrom), vaddr);
|
||||
copy_page_asm(vto, vfrom);
|
||||
preempt_enable();
|
||||
pte_t *ptep = get_ptep(vma->vm_mm, vmaddr);
|
||||
|
||||
/*
|
||||
* The pte check is racy and sometimes the flush will trigger
|
||||
* a non-access TLB miss. Hopefully, the page has already been
|
||||
* flushed.
|
||||
*/
|
||||
if (ptep && pte_needs_flush(*ptep))
|
||||
flush_cache_page(vma, vmaddr, pfn);
|
||||
}
|
||||
|
||||
void copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long vaddr, struct vm_area_struct *vma)
|
||||
{
|
||||
void *kto, *kfrom;
|
||||
|
||||
kfrom = kmap_local_page(from);
|
||||
kto = kmap_local_page(to);
|
||||
flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
|
||||
copy_page_asm(kto, kfrom);
|
||||
kunmap_local(kto);
|
||||
kunmap_local(kfrom);
|
||||
}
|
||||
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long user_vaddr, void *dst, void *src, int len)
|
||||
{
|
||||
flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
|
||||
memcpy(dst, src, len);
|
||||
flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
|
||||
}
|
||||
|
||||
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long user_vaddr, void *dst, void *src, int len)
|
||||
{
|
||||
flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
|
||||
memcpy(dst, src, len);
|
||||
}
|
||||
EXPORT_SYMBOL(copy_user_page);
|
||||
|
||||
/* __flush_tlb_range()
|
||||
*
|
||||
@ -520,92 +631,105 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long mm_total_size(struct mm_struct *mm)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long usize = 0;
|
||||
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next)
|
||||
usize += vma->vm_end - vma->vm_start;
|
||||
return usize;
|
||||
}
|
||||
|
||||
static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
|
||||
{
|
||||
pte_t *ptep = NULL;
|
||||
|
||||
if (!pgd_none(*pgd)) {
|
||||
p4d_t *p4d = p4d_offset(pgd, addr);
|
||||
if (!p4d_none(*p4d)) {
|
||||
pud_t *pud = pud_offset(p4d, addr);
|
||||
if (!pud_none(*pud)) {
|
||||
pmd_t *pmd = pmd_offset(pud, addr);
|
||||
if (!pmd_none(*pmd))
|
||||
ptep = pte_offset_map(pmd, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
return ptep;
|
||||
}
|
||||
|
||||
static void flush_cache_pages(struct vm_area_struct *vma, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long addr, pfn;
|
||||
pte_t *ptep;
|
||||
|
||||
for (addr = start; addr < end; addr += PAGE_SIZE) {
|
||||
ptep = get_ptep(mm->pgd, addr);
|
||||
if (ptep) {
|
||||
pfn = pte_pfn(*ptep);
|
||||
flush_cache_page(vma, addr, pfn);
|
||||
/*
|
||||
* The vma can contain pages that aren't present. Although
|
||||
* the pte search is expensive, we need the pte to find the
|
||||
* page pfn and to check whether the page should be flushed.
|
||||
*/
|
||||
ptep = get_ptep(vma->vm_mm, addr);
|
||||
if (ptep && pte_needs_flush(*ptep)) {
|
||||
if (parisc_requires_coherency()) {
|
||||
flush_user_cache_page(vma, addr);
|
||||
} else {
|
||||
pfn = pte_pfn(*ptep);
|
||||
if (WARN_ON(!pfn_valid(pfn)))
|
||||
return;
|
||||
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned long mm_total_size(struct mm_struct *mm)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long usize = 0;
|
||||
|
||||
for (vma = mm->mmap; vma && usize < parisc_cache_flush_threshold; vma = vma->vm_next)
|
||||
usize += vma->vm_end - vma->vm_start;
|
||||
return usize;
|
||||
}
|
||||
|
||||
void flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
/* Flushing the whole cache on each cpu takes forever on
|
||||
rp3440, etc. So, avoid it if the mm isn't too big. */
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
mm_total_size(mm) >= parisc_cache_flush_threshold) {
|
||||
if (mm->context.space_id)
|
||||
flush_tlb_all();
|
||||
/*
|
||||
* Flushing the whole cache on each cpu takes forever on
|
||||
* rp3440, etc. So, avoid it if the mm isn't too big.
|
||||
*
|
||||
* Note that we must flush the entire cache on machines
|
||||
* with aliasing caches to prevent random segmentation
|
||||
* faults.
|
||||
*/
|
||||
if (!parisc_requires_coherency()
|
||||
|| mm_total_size(mm) >= parisc_cache_flush_threshold) {
|
||||
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
|
||||
return;
|
||||
flush_tlb_all();
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
|
||||
/* Flush mm */
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next)
|
||||
flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end);
|
||||
flush_cache_pages(vma, vma->vm_start, vma->vm_end);
|
||||
}
|
||||
|
||||
void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
|
||||
{
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
end - start >= parisc_cache_flush_threshold) {
|
||||
if (vma->vm_mm->context.space_id)
|
||||
flush_tlb_range(vma, start, end);
|
||||
if (!parisc_requires_coherency()
|
||||
|| end - start >= parisc_cache_flush_threshold) {
|
||||
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
|
||||
return;
|
||||
flush_tlb_range(vma, start, end);
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
|
||||
flush_cache_pages(vma, vma->vm_mm, start, end);
|
||||
flush_cache_pages(vma, start, end);
|
||||
}
|
||||
|
||||
void
|
||||
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
|
||||
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
|
||||
{
|
||||
if (pfn_valid(pfn)) {
|
||||
if (likely(vma->vm_mm->context.space_id)) {
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
} else {
|
||||
__purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
}
|
||||
if (WARN_ON(!pfn_valid(pfn)))
|
||||
return;
|
||||
if (parisc_requires_coherency())
|
||||
flush_user_cache_page(vma, vmaddr);
|
||||
else
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
}
|
||||
|
||||
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
|
||||
{
|
||||
if (!PageAnon(page))
|
||||
return;
|
||||
|
||||
if (parisc_requires_coherency()) {
|
||||
flush_user_cache_page(vma, vmaddr);
|
||||
return;
|
||||
}
|
||||
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
preempt_disable();
|
||||
flush_dcache_page_asm(page_to_phys(page), vmaddr);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void flush_kernel_vmap_range(void *vaddr, int size)
|
||||
|
@ -40,10 +40,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags,
|
||||
|
||||
*need_unmap = 1;
|
||||
set_fixmap(fixmap, page_to_phys(page));
|
||||
if (flags)
|
||||
raw_spin_lock_irqsave(&patch_lock, *flags);
|
||||
else
|
||||
__acquire(&patch_lock);
|
||||
raw_spin_lock_irqsave(&patch_lock, *flags);
|
||||
|
||||
return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
|
||||
}
|
||||
@ -52,10 +49,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
|
||||
{
|
||||
clear_fixmap(fixmap);
|
||||
|
||||
if (flags)
|
||||
raw_spin_unlock_irqrestore(&patch_lock, *flags);
|
||||
else
|
||||
__release(&patch_lock);
|
||||
raw_spin_unlock_irqrestore(&patch_lock, *flags);
|
||||
}
|
||||
|
||||
void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
|
||||
@ -67,8 +61,9 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
|
||||
int mapped;
|
||||
|
||||
/* Make sure we don't have any aliases in cache */
|
||||
flush_kernel_vmap_range(addr, len);
|
||||
flush_icache_range(start, end);
|
||||
flush_kernel_dcache_range_asm(start, end);
|
||||
flush_kernel_icache_range_asm(start, end);
|
||||
flush_tlb_kernel_range(start, end);
|
||||
|
||||
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped);
|
||||
|
||||
@ -81,8 +76,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
|
||||
* We're crossing a page boundary, so
|
||||
* need to remap
|
||||
*/
|
||||
flush_kernel_vmap_range((void *)fixmap,
|
||||
(p-fixmap) * sizeof(*p));
|
||||
flush_kernel_dcache_range_asm((unsigned long)fixmap,
|
||||
(unsigned long)p);
|
||||
flush_tlb_kernel_range((unsigned long)fixmap,
|
||||
(unsigned long)p);
|
||||
if (mapped)
|
||||
patch_unmap(FIX_TEXT_POKE0, &flags);
|
||||
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags,
|
||||
@ -90,10 +87,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
|
||||
}
|
||||
}
|
||||
|
||||
flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p));
|
||||
flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p);
|
||||
flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p);
|
||||
if (mapped)
|
||||
patch_unmap(FIX_TEXT_POKE0, &flags);
|
||||
flush_icache_range(start, end);
|
||||
}
|
||||
|
||||
void __kprobes __patch_text(void *addr, u32 insn)
|
||||
|
@ -22,6 +22,8 @@
|
||||
|
||||
#include <asm/traps.h>
|
||||
|
||||
#define DEBUG_NATLB 0
|
||||
|
||||
/* Various important other fields */
|
||||
#define bit22set(x) (x & 0x00000200)
|
||||
#define bits23_25set(x) (x & 0x000001c0)
|
||||
@ -450,8 +452,8 @@ handle_nadtlb_fault(struct pt_regs *regs)
|
||||
fallthrough;
|
||||
case 0x380:
|
||||
/* PDC and FIC instructions */
|
||||
if (printk_ratelimit()) {
|
||||
pr_warn("BUG: nullifying cache flush/purge instruction\n");
|
||||
if (DEBUG_NATLB && printk_ratelimit()) {
|
||||
pr_warn("WARNING: nullifying cache flush/purge instruction\n");
|
||||
show_regs(regs);
|
||||
}
|
||||
if (insn & 0x20) {
|
||||
|
Loading…
Reference in New Issue
Block a user