mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
csky: Fix TLB maintenance synchronization problem
TLB invalidate didn't contain a barrier operation in csky cpu and we need to prevent previous PTW response after TLB invalidation instruction. Of cause, the ASID changing also needs to take care of the issue. CPU0 CPU1 =============== =============== set_pte sync_is() -> See the previous set_pte for all harts tlbi.vas -> Invalidate all harts TLB entry & flush pipeline Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
This commit is contained in:
parent
c109f42450
commit
3b756ccddb
@ -89,9 +89,10 @@ static inline void tlb_invalid_indexed(void)
|
||||
cpwcr("cpcr8", 0x02000000);
|
||||
}
|
||||
|
||||
static inline void setup_pgd(pgd_t *pgd)
|
||||
static inline void setup_pgd(pgd_t *pgd, int asid)
|
||||
{
|
||||
cpwcr("cpcr29", __pa(pgd) | BIT(0));
|
||||
write_mmu_entryhi(asid);
|
||||
}
|
||||
|
||||
static inline pgd_t *get_pgd(void)
|
||||
|
@ -78,8 +78,13 @@ static inline void tlb_read(void)
|
||||
static inline void tlb_invalid_all(void)
|
||||
{
|
||||
#ifdef CONFIG_CPU_HAS_TLBI
|
||||
asm volatile("tlbi.alls\n":::"memory");
|
||||
sync_is();
|
||||
asm volatile(
|
||||
"tlbi.alls \n"
|
||||
"sync.i \n"
|
||||
:
|
||||
:
|
||||
: "memory");
|
||||
#else
|
||||
mtcr("cr<8, 15>", 0x04000000);
|
||||
#endif
|
||||
@ -88,8 +93,13 @@ static inline void tlb_invalid_all(void)
|
||||
static inline void local_tlb_invalid_all(void)
|
||||
{
|
||||
#ifdef CONFIG_CPU_HAS_TLBI
|
||||
asm volatile("tlbi.all\n":::"memory");
|
||||
sync_is();
|
||||
asm volatile(
|
||||
"tlbi.all \n"
|
||||
"sync.i \n"
|
||||
:
|
||||
:
|
||||
: "memory");
|
||||
#else
|
||||
tlb_invalid_all();
|
||||
#endif
|
||||
@ -100,12 +110,27 @@ static inline void tlb_invalid_indexed(void)
|
||||
mtcr("cr<8, 15>", 0x02000000);
|
||||
}
|
||||
|
||||
static inline void setup_pgd(pgd_t *pgd)
|
||||
#define NOP32 ".long 0x4820c400\n"
|
||||
|
||||
static inline void setup_pgd(pgd_t *pgd, int asid)
|
||||
{
|
||||
#ifdef CONFIG_CPU_HAS_TLBI
|
||||
mtcr("cr<28, 15>", __pa(pgd) | BIT(0));
|
||||
sync_is();
|
||||
#else
|
||||
mb();
|
||||
#endif
|
||||
mtcr("cr<29, 15>", __pa(pgd) | BIT(0));
|
||||
asm volatile(
|
||||
#ifdef CONFIG_CPU_HAS_TLBI
|
||||
"mtcr %1, cr<28, 15> \n"
|
||||
#endif
|
||||
"mtcr %1, cr<29, 15> \n"
|
||||
"mtcr %0, cr< 4, 15> \n"
|
||||
".rept 64 \n"
|
||||
NOP32
|
||||
".endr \n"
|
||||
:
|
||||
:"r"(asid), "r"(__pa(pgd) | BIT(0))
|
||||
:"memory");
|
||||
}
|
||||
|
||||
static inline pgd_t *get_pgd(void)
|
||||
|
@ -30,8 +30,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
if (prev != next)
|
||||
check_and_switch_context(next, cpu);
|
||||
|
||||
setup_pgd(next->pgd);
|
||||
write_mmu_entryhi(next->context.asid.counter);
|
||||
setup_pgd(next->pgd, next->context.asid.counter);
|
||||
|
||||
flush_icache_deferred(next);
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ void __init mmu_init(unsigned long min_pfn, unsigned long max_pfn)
|
||||
/* Setup page mask to 4k */
|
||||
write_mmu_pagemask(0);
|
||||
|
||||
setup_pgd(swapper_pg_dir);
|
||||
setup_pgd(swapper_pg_dir, 0);
|
||||
}
|
||||
|
||||
void __init fixrange_init(unsigned long start, unsigned long end,
|
||||
|
@ -24,7 +24,13 @@ void flush_tlb_all(void)
|
||||
void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
#ifdef CONFIG_CPU_HAS_TLBI
|
||||
asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm)));
|
||||
sync_is();
|
||||
asm volatile(
|
||||
"tlbi.asids %0 \n"
|
||||
"sync.i \n"
|
||||
:
|
||||
: "r" (cpu_asid(mm))
|
||||
: "memory");
|
||||
#else
|
||||
tlb_invalid_all();
|
||||
#endif
|
||||
@ -53,11 +59,17 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
end &= TLB_ENTRY_SIZE_MASK;
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_TLBI
|
||||
sync_is();
|
||||
while (start < end) {
|
||||
asm volatile("tlbi.vas %0"::"r"(start | newpid));
|
||||
asm volatile(
|
||||
"tlbi.vas %0 \n"
|
||||
:
|
||||
: "r" (start | newpid)
|
||||
: "memory");
|
||||
|
||||
start += 2*PAGE_SIZE;
|
||||
}
|
||||
sync_is();
|
||||
asm volatile("sync.i\n");
|
||||
#else
|
||||
{
|
||||
unsigned long flags, oldpid;
|
||||
@ -87,11 +99,17 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
end &= TLB_ENTRY_SIZE_MASK;
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_TLBI
|
||||
sync_is();
|
||||
while (start < end) {
|
||||
asm volatile("tlbi.vaas %0"::"r"(start));
|
||||
asm volatile(
|
||||
"tlbi.vaas %0 \n"
|
||||
:
|
||||
: "r" (start)
|
||||
: "memory");
|
||||
|
||||
start += 2*PAGE_SIZE;
|
||||
}
|
||||
sync_is();
|
||||
asm volatile("sync.i\n");
|
||||
#else
|
||||
{
|
||||
unsigned long flags, oldpid;
|
||||
@ -121,8 +139,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
|
||||
addr &= TLB_ENTRY_SIZE_MASK;
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_TLBI
|
||||
asm volatile("tlbi.vas %0"::"r"(addr | newpid));
|
||||
sync_is();
|
||||
asm volatile(
|
||||
"tlbi.vas %0 \n"
|
||||
"sync.i \n"
|
||||
:
|
||||
: "r" (addr | newpid)
|
||||
: "memory");
|
||||
#else
|
||||
{
|
||||
int oldpid, idx;
|
||||
@ -147,8 +170,13 @@ void flush_tlb_one(unsigned long addr)
|
||||
addr &= TLB_ENTRY_SIZE_MASK;
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_TLBI
|
||||
asm volatile("tlbi.vaas %0"::"r"(addr));
|
||||
sync_is();
|
||||
asm volatile(
|
||||
"tlbi.vaas %0 \n"
|
||||
"sync.i \n"
|
||||
:
|
||||
: "r" (addr)
|
||||
: "memory");
|
||||
#else
|
||||
{
|
||||
int oldpid, idx;
|
||||
|
Loading…
Reference in New Issue
Block a user