[ARM] 3762/1: Fix ptrace cache coherency bug for ARM1136 VIPT nonaliasing Harvard caches
Patch from George G. Davis
Resolve ARM1136 VIPT non-aliasing cache coherency issues observed when
using ptrace to set breakpoints and cleanup copy_{to,from}_user_page()
while we're here as requested by Russell King because "it's also far
too heavy on non-v6 CPUs".
NOTES:
1. Only access_process_vm() calls copy_{to,from}_user_page().
2. access_process_vm() calls get_user_pages() to pin down the "page".
3. get_user_pages() calls flush_dcache_page(page) which ensures cache
   coherency between kernel and userspace mappings of "page".  However
   flush_dcache_page(page) may not invalidate I-Cache over this range
   for all cases, specifically, I-Cache is not invalidated for the VIPT
   non-aliasing case.  So memory is consistent between kernel and user
   space mappings of "page" but I-Cache may still be hot over this
   range.  IOW, we don't have to worry about flush_cache_page() before
   memcpy().
4. Now, for the copy_to_user_page() case, after memcpy(), we must flush
   the caches so memory is consistent with kernel cache entries and
   invalidate the I-Cache if this mm region is executable.  We don't
   need to do anything after memcpy() for the copy_from_user_page()
   case since kernel cache entries will be invalidated via the same
   process above if we access "page" again.  The flush_ptrace_access()
   function (borrowed from SPARC64 implementation) is added to handle
   cache flushing after memcpy() for the copy_to_user_page() case.
Signed-off-by: George G. Davis <gdavis@mvista.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
			
			
This commit is contained in:
		
							parent
							
								
									57bcdafcb1
								
							
						
					
					
						commit
						a188ad2bc7
					
				@ -87,6 +87,32 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
 | 
			
		||||
	if (cache_is_vipt_aliasing())
 | 
			
		||||
		flush_pfn_alias(pfn, user_addr);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
 | 
			
		||||
			 unsigned long uaddr, void *kaddr,
 | 
			
		||||
			 unsigned long len, int write)
 | 
			
		||||
{
 | 
			
		||||
	if (cache_is_vivt()) {
 | 
			
		||||
		if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
 | 
			
		||||
			unsigned long addr = (unsigned long)kaddr;
 | 
			
		||||
			__cpuc_coherent_kern_range(addr, addr + len);
 | 
			
		||||
		}
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (cache_is_vipt_aliasing()) {
 | 
			
		||||
		flush_pfn_alias(page_to_pfn(page), uaddr);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* VIPT non-aliasing cache */
 | 
			
		||||
	if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) &&
 | 
			
		||||
	    vma->vm_flags | VM_EXEC) {
 | 
			
		||||
		unsigned long addr = (unsigned long)kaddr;
 | 
			
		||||
		/* only flushing the kernel mapping on non-aliasing VIPT */
 | 
			
		||||
		__cpuc_coherent_kern_range(addr, addr + len);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
#else
 | 
			
		||||
#define flush_pfn_alias(pfn,vaddr)	do { } while (0)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
@ -247,14 +247,12 @@ extern void dmac_flush_range(unsigned long, unsigned long);
 | 
			
		||||
 */
 | 
			
		||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
 | 
			
		||||
	do {							\
 | 
			
		||||
		flush_cache_page(vma, vaddr, page_to_pfn(page));\
 | 
			
		||||
		memcpy(dst, src, len);				\
 | 
			
		||||
		flush_dcache_page(page);			\
 | 
			
		||||
		flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
 | 
			
		||||
	} while (0)
 | 
			
		||||
 | 
			
		||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
 | 
			
		||||
	do {							\
 | 
			
		||||
		flush_cache_page(vma, vaddr, page_to_pfn(page));\
 | 
			
		||||
		memcpy(dst, src, len);				\
 | 
			
		||||
	} while (0)
 | 
			
		||||
 | 
			
		||||
@ -285,10 +283,24 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned l
 | 
			
		||||
		__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
 | 
			
		||||
			 unsigned long uaddr, void *kaddr,
 | 
			
		||||
			 unsigned long len, int write)
 | 
			
		||||
{
 | 
			
		||||
	if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
 | 
			
		||||
		unsigned long addr = (unsigned long)kaddr;
 | 
			
		||||
		__cpuc_coherent_kern_range(addr, addr + len);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
#else
 | 
			
		||||
extern void flush_cache_mm(struct mm_struct *mm);
 | 
			
		||||
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 | 
			
		||||
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
 | 
			
		||||
extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
 | 
			
		||||
				unsigned long uaddr, void *kaddr,
 | 
			
		||||
				unsigned long len, int write);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user