forked from Minki/linux
sh: rework nommu for generic cache.c use.
This does a bit of reorganizing for allowing nommu to use the new and generic cache.c, no functional changes. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
cbbe2f68f6
commit
dde5e3ffb7
@ -76,5 +76,7 @@ void kmap_coherent_init(void);
|
|||||||
void *kmap_coherent(struct page *page, unsigned long addr);
|
void *kmap_coherent(struct page *page, unsigned long addr);
|
||||||
void kunmap_coherent(void);
|
void kunmap_coherent(void);
|
||||||
|
|
||||||
|
#define PG_dcache_dirty PG_arch_1
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* __ASM_SH_CACHEFLUSH_H */
|
#endif /* __ASM_SH_CACHEFLUSH_H */
|
||||||
|
@ -68,18 +68,13 @@ extern void clear_user_page(void *to, unsigned long address, struct page *page);
|
|||||||
extern void copy_user_page(void *to, void *from, unsigned long address,
|
extern void copy_user_page(void *to, void *from, unsigned long address,
|
||||||
struct page *page);
|
struct page *page);
|
||||||
|
|
||||||
#elif defined(CONFIG_MMU)
|
#else
|
||||||
extern void copy_user_highpage(struct page *to, struct page *from,
|
extern void copy_user_highpage(struct page *to, struct page *from,
|
||||||
unsigned long vaddr, struct vm_area_struct *vma);
|
unsigned long vaddr, struct vm_area_struct *vma);
|
||||||
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
|
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
|
||||||
extern void clear_user_highpage(struct page *page, unsigned long vaddr);
|
extern void clear_user_highpage(struct page *page, unsigned long vaddr);
|
||||||
#define clear_user_highpage clear_user_highpage
|
#define clear_user_highpage clear_user_highpage
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
#define clear_user_page(page, vaddr, pg) clear_page(page)
|
|
||||||
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -15,8 +15,6 @@
|
|||||||
* SH4. Unlike the SH4 this is a unified cache so we need to do some work
|
* SH4. Unlike the SH4 this is a unified cache so we need to do some work
|
||||||
* in mmap when 'exec'ing a new binary
|
* in mmap when 'exec'ing a new binary
|
||||||
*/
|
*/
|
||||||
#define PG_dcache_dirty PG_arch_1
|
|
||||||
|
|
||||||
void flush_cache_all(void);
|
void flush_cache_all(void);
|
||||||
void flush_cache_mm(struct mm_struct *mm);
|
void flush_cache_mm(struct mm_struct *mm);
|
||||||
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
|
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
|
||||||
|
@ -38,6 +38,4 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
|
|||||||
/* Initialization of P3 area for copy_user_page */
|
/* Initialization of P3 area for copy_user_page */
|
||||||
void p3_cache_init(void);
|
void p3_cache_init(void);
|
||||||
|
|
||||||
#define PG_dcache_dirty PG_arch_1
|
|
||||||
|
|
||||||
#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
|
#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
|
||||||
|
@ -268,11 +268,9 @@ asmlinkage void __init sh_cpu_init(void)
|
|||||||
cache_init();
|
cache_init();
|
||||||
|
|
||||||
if (raw_smp_processor_id() == 0) {
|
if (raw_smp_processor_id() == 0) {
|
||||||
#ifdef CONFIG_MMU
|
|
||||||
shm_align_mask = max_t(unsigned long,
|
shm_align_mask = max_t(unsigned long,
|
||||||
current_cpu_data.dcache.way_size - 1,
|
current_cpu_data.dcache.way_size - 1,
|
||||||
PAGE_SIZE - 1);
|
PAGE_SIZE - 1);
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Boot CPU sets the cache shape */
|
/* Boot CPU sets the cache shape */
|
||||||
detect_cache_shape();
|
detect_cache_shape();
|
||||||
|
@ -14,10 +14,10 @@
|
|||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
|
||||||
#ifdef CONFIG_MMU
|
|
||||||
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
|
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
|
||||||
EXPORT_SYMBOL(shm_align_mask);
|
EXPORT_SYMBOL(shm_align_mask);
|
||||||
|
|
||||||
|
#ifdef CONFIG_MMU
|
||||||
/*
|
/*
|
||||||
* To avoid cache aliases, we map the shared page with same color.
|
* To avoid cache aliases, we map the shared page with same color.
|
||||||
*/
|
*/
|
||||||
|
@ -50,11 +50,6 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void __update_cache(struct vm_area_struct *vma,
|
|
||||||
unsigned long address, pte_t pte)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init kmap_coherent_init(void)
|
void __init kmap_coherent_init(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user