mirror of
https://github.com/torvalds/linux.git
synced 2024-11-19 10:31:48 +00:00
93dc68876b
On Cortex-A15 (r0p0..r3p2) the TLBI/DSB are not adequately shooting down all use of the old entries. This patch implements the erratum workaround which consists of: 1. Dummy TLBIMVAIS and DSB on the CPU doing the TLBI operation. 2. Send IPI to the CPUs that are running the same mm (and ASID) as the one being invalidated (or all the online CPUs for global pages). 3. CPU receiving the IPI executes a DMB and CLREX (part of the exception return code already). Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
228 lines
5.8 KiB
C
228 lines
5.8 KiB
C
/*
|
|
* linux/arch/arm/mm/context.c
|
|
*
|
|
* Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
|
|
* Copyright (C) 2012 ARM Limited
|
|
*
|
|
* Author: Will Deacon <will.deacon@arm.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
#include <linux/init.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/percpu.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/smp_plat.h>
|
|
#include <asm/thread_notify.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
/*
|
|
* On ARMv6, we have the following structure in the Context ID:
|
|
*
|
|
* 31 7 0
|
|
* +-------------------------+-----------+
|
|
* | process ID | ASID |
|
|
* +-------------------------+-----------+
|
|
* | context ID |
|
|
* +-------------------------------------+
|
|
*
|
|
* The ASID is used to tag entries in the CPU caches and TLBs.
|
|
* The context ID is used by debuggers and trace logic, and
|
|
* should be unique within all running processes.
|
|
*
|
|
* In big endian operation, the two 32 bit words are swapped if accesed by
|
|
* non 64-bit operations.
|
|
*/
|
|
#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
|
|
#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
|
|
|
|
#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1)
|
|
#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK)
|
|
|
|
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
|
|
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
|
|
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
|
|
|
|
DEFINE_PER_CPU(atomic64_t, active_asids);
|
|
static DEFINE_PER_CPU(u64, reserved_asids);
|
|
static cpumask_t tlb_flush_pending;
|
|
|
|
#ifdef CONFIG_ARM_LPAE
|
|
static void cpu_set_reserved_ttbr0(void)
|
|
{
|
|
unsigned long ttbl = __pa(swapper_pg_dir);
|
|
unsigned long ttbh = 0;
|
|
|
|
/*
|
|
* Set TTBR0 to swapper_pg_dir which contains only global entries. The
|
|
* ASID is set to 0.
|
|
*/
|
|
asm volatile(
|
|
" mcrr p15, 0, %0, %1, c2 @ set TTBR0\n"
|
|
:
|
|
: "r" (ttbl), "r" (ttbh));
|
|
isb();
|
|
}
|
|
#else
|
|
static void cpu_set_reserved_ttbr0(void)
|
|
{
|
|
u32 ttb;
|
|
/* Copy TTBR1 into TTBR0 */
|
|
asm volatile(
|
|
" mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
|
|
" mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
|
|
: "=r" (ttb));
|
|
isb();
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_PID_IN_CONTEXTIDR
|
|
static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
|
|
void *t)
|
|
{
|
|
u32 contextidr;
|
|
pid_t pid;
|
|
struct thread_info *thread = t;
|
|
|
|
if (cmd != THREAD_NOTIFY_SWITCH)
|
|
return NOTIFY_DONE;
|
|
|
|
pid = task_pid_nr(thread->task) << ASID_BITS;
|
|
asm volatile(
|
|
" mrc p15, 0, %0, c13, c0, 1\n"
|
|
" and %0, %0, %2\n"
|
|
" orr %0, %0, %1\n"
|
|
" mcr p15, 0, %0, c13, c0, 1\n"
|
|
: "=r" (contextidr), "+r" (pid)
|
|
: "I" (~ASID_MASK));
|
|
isb();
|
|
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
static struct notifier_block contextidr_notifier_block = {
|
|
.notifier_call = contextidr_notifier,
|
|
};
|
|
|
|
static int __init contextidr_notifier_init(void)
|
|
{
|
|
return thread_register_notifier(&contextidr_notifier_block);
|
|
}
|
|
arch_initcall(contextidr_notifier_init);
|
|
#endif
|
|
|
|
static void flush_context(unsigned int cpu)
|
|
{
|
|
int i;
|
|
u64 asid;
|
|
|
|
/* Update the list of reserved ASIDs and the ASID bitmap. */
|
|
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
|
|
for_each_possible_cpu(i) {
|
|
if (i == cpu) {
|
|
asid = 0;
|
|
} else {
|
|
asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
|
|
__set_bit(ASID_TO_IDX(asid), asid_map);
|
|
}
|
|
per_cpu(reserved_asids, i) = asid;
|
|
}
|
|
|
|
/* Queue a TLB invalidate and flush the I-cache if necessary. */
|
|
if (!tlb_ops_need_broadcast())
|
|
cpumask_set_cpu(cpu, &tlb_flush_pending);
|
|
else
|
|
cpumask_setall(&tlb_flush_pending);
|
|
|
|
if (icache_is_vivt_asid_tagged())
|
|
__flush_icache_all();
|
|
}
|
|
|
|
static int is_reserved_asid(u64 asid)
|
|
{
|
|
int cpu;
|
|
for_each_possible_cpu(cpu)
|
|
if (per_cpu(reserved_asids, cpu) == asid)
|
|
return 1;
|
|
return 0;
|
|
}
|
|
|
|
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
|
{
|
|
u64 asid = atomic64_read(&mm->context.id);
|
|
u64 generation = atomic64_read(&asid_generation);
|
|
|
|
if (asid != 0 && is_reserved_asid(asid)) {
|
|
/*
|
|
* Our current ASID was active during a rollover, we can
|
|
* continue to use it and this was just a false alarm.
|
|
*/
|
|
asid = generation | (asid & ~ASID_MASK);
|
|
} else {
|
|
/*
|
|
* Allocate a free ASID. If we can't find one, take a
|
|
* note of the currently active ASIDs and mark the TLBs
|
|
* as requiring flushes.
|
|
*/
|
|
asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
|
|
if (asid == NUM_USER_ASIDS) {
|
|
generation = atomic64_add_return(ASID_FIRST_VERSION,
|
|
&asid_generation);
|
|
flush_context(cpu);
|
|
asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
|
|
}
|
|
__set_bit(asid, asid_map);
|
|
asid = generation | IDX_TO_ASID(asid);
|
|
cpumask_clear(mm_cpumask(mm));
|
|
}
|
|
|
|
return asid;
|
|
}
|
|
|
|
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
|
|
{
|
|
unsigned long flags;
|
|
unsigned int cpu = smp_processor_id();
|
|
u64 asid;
|
|
|
|
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
|
|
__check_vmalloc_seq(mm);
|
|
|
|
/*
|
|
* Required during context switch to avoid speculative page table
|
|
* walking with the wrong TTBR.
|
|
*/
|
|
cpu_set_reserved_ttbr0();
|
|
|
|
asid = atomic64_read(&mm->context.id);
|
|
if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
|
|
&& atomic64_xchg(&per_cpu(active_asids, cpu), asid))
|
|
goto switch_mm_fastpath;
|
|
|
|
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
|
|
/* Check that our ASID belongs to the current generation. */
|
|
asid = atomic64_read(&mm->context.id);
|
|
if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
|
|
asid = new_context(mm, cpu);
|
|
atomic64_set(&mm->context.id, asid);
|
|
}
|
|
|
|
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
|
|
local_flush_bp_all();
|
|
local_flush_tlb_all();
|
|
dummy_flush_tlb_a15_erratum();
|
|
}
|
|
|
|
atomic64_set(&per_cpu(active_asids, cpu), asid);
|
|
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
|
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
|
|
|
|
switch_mm_fastpath:
|
|
cpu_switch_mm(mm->pgd, mm);
|
|
}
|