forked from Minki/linux
sh64: Wire up the shared __flush_xxx_region() flushers.
Now with all of the prep work out of the way, kill off the SH-5 variants and use the SH-4 version directly. This also takes advantage of the unrolling that was previously done for the new version. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
43bc61d86f
commit
795687265d
@ -9,7 +9,7 @@ mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \
|
||||
extable_64.o
|
||||
|
||||
ifndef CONFIG_CACHE_OFF
|
||||
obj-y += cache-sh5.o
|
||||
obj-y += cache-sh5.o flush-sh4.o
|
||||
endif
|
||||
|
||||
obj-y += $(mmu-y)
|
||||
|
@ -539,54 +539,6 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
|
||||
sh64_dcache_purge_user_pages(mm, start, end);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Purge the range of addresses from the D-cache.
|
||||
*
|
||||
* The addresses lie in the superpage mapping. There's no harm if we
|
||||
* overpurge at either end - just a small performance loss.
|
||||
*/
|
||||
void __flush_purge_region(void *start, int size)
|
||||
{
|
||||
unsigned long long ullend, addr, aligned_start;
|
||||
|
||||
aligned_start = (unsigned long long)(signed long long)(signed long) start;
|
||||
addr = L1_CACHE_ALIGN(aligned_start);
|
||||
ullend = (unsigned long long) (signed long long) (signed long) start + size;
|
||||
|
||||
while (addr <= ullend) {
|
||||
__asm__ __volatile__ ("ocbp %0, 0" : : "r" (addr));
|
||||
addr += L1_CACHE_BYTES;
|
||||
}
|
||||
}
|
||||
|
||||
void __flush_wback_region(void *start, int size)
|
||||
{
|
||||
unsigned long long ullend, addr, aligned_start;
|
||||
|
||||
aligned_start = (unsigned long long)(signed long long)(signed long) start;
|
||||
addr = L1_CACHE_ALIGN(aligned_start);
|
||||
ullend = (unsigned long long) (signed long long) (signed long) start + size;
|
||||
|
||||
while (addr < ullend) {
|
||||
__asm__ __volatile__ ("ocbwb %0, 0" : : "r" (addr));
|
||||
addr += L1_CACHE_BYTES;
|
||||
}
|
||||
}
|
||||
|
||||
void __flush_invalidate_region(void *start, int size)
|
||||
{
|
||||
unsigned long long ullend, addr, aligned_start;
|
||||
|
||||
aligned_start = (unsigned long long)(signed long long)(signed long) start;
|
||||
addr = L1_CACHE_ALIGN(aligned_start);
|
||||
ullend = (unsigned long long) (signed long long) (signed long) start + size;
|
||||
|
||||
while (addr < ullend) {
|
||||
__asm__ __volatile__ ("ocbi %0, 0" : : "r" (addr));
|
||||
addr += L1_CACHE_BYTES;
|
||||
}
|
||||
}
|
||||
#endif /* !CONFIG_DCACHE_DISABLED */
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user