mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
ARM: 7862/1: pcpu: replace __get_cpu_var_uses
This is the ARM part of Christoph's patchset cleaning up the various uses of __get_cpu_var across the tree. The idea is to convert __get_cpu_var into either an explicit address calculation using this_cpu_ptr() or into a use of this_cpu operations that use the offset. Thereby address calculations are avoided and fewer registers are used when code is generated. [will: fixed debug ref counting checks and pcpu array accesses] Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
39792c7cf3
commit
1436c1aa62
@ -344,13 +344,13 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
|
||||
/* Breakpoint */
|
||||
ctrl_base = ARM_BASE_BCR;
|
||||
val_base = ARM_BASE_BVR;
|
||||
slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
|
||||
slots = this_cpu_ptr(bp_on_reg);
|
||||
max_slots = core_num_brps;
|
||||
} else {
|
||||
/* Watchpoint */
|
||||
ctrl_base = ARM_BASE_WCR;
|
||||
val_base = ARM_BASE_WVR;
|
||||
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
||||
slots = this_cpu_ptr(wp_on_reg);
|
||||
max_slots = core_num_wrps;
|
||||
}
|
||||
|
||||
@ -396,12 +396,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
|
||||
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
|
||||
/* Breakpoint */
|
||||
base = ARM_BASE_BCR;
|
||||
slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
|
||||
slots = this_cpu_ptr(bp_on_reg);
|
||||
max_slots = core_num_brps;
|
||||
} else {
|
||||
/* Watchpoint */
|
||||
base = ARM_BASE_WCR;
|
||||
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
||||
slots = this_cpu_ptr(wp_on_reg);
|
||||
max_slots = core_num_wrps;
|
||||
}
|
||||
|
||||
@ -697,7 +697,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
|
||||
struct arch_hw_breakpoint *info;
|
||||
struct arch_hw_breakpoint_ctrl ctrl;
|
||||
|
||||
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
||||
slots = this_cpu_ptr(wp_on_reg);
|
||||
|
||||
for (i = 0; i < core_num_wrps; ++i) {
|
||||
rcu_read_lock();
|
||||
@ -768,7 +768,7 @@ static void watchpoint_single_step_handler(unsigned long pc)
|
||||
struct perf_event *wp, **slots;
|
||||
struct arch_hw_breakpoint *info;
|
||||
|
||||
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
||||
slots = this_cpu_ptr(wp_on_reg);
|
||||
|
||||
for (i = 0; i < core_num_wrps; ++i) {
|
||||
rcu_read_lock();
|
||||
@ -802,7 +802,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
|
||||
struct arch_hw_breakpoint *info;
|
||||
struct arch_hw_breakpoint_ctrl ctrl;
|
||||
|
||||
slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
|
||||
slots = this_cpu_ptr(bp_on_reg);
|
||||
|
||||
/* The exception entry code places the amended lr in the PC. */
|
||||
addr = regs->ARM_pc;
|
||||
|
@ -171,13 +171,13 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
|
||||
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
|
||||
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
|
||||
kcb->kprobe_status = kcb->prev_kprobe.status;
|
||||
}
|
||||
|
||||
static void __kprobes set_current_kprobe(struct kprobe *p)
|
||||
{
|
||||
__get_cpu_var(current_kprobe) = p;
|
||||
__this_cpu_write(current_kprobe, p);
|
||||
}
|
||||
|
||||
static void __kprobes
|
||||
@ -421,10 +421,10 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
||||
continue;
|
||||
|
||||
if (ri->rp && ri->rp->handler) {
|
||||
__get_cpu_var(current_kprobe) = &ri->rp->kp;
|
||||
__this_cpu_write(current_kprobe, &ri->rp->kp);
|
||||
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
ri->rp->handler(ri, regs);
|
||||
__get_cpu_var(current_kprobe) = NULL;
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
}
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
|
@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
|
||||
|
||||
static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
|
||||
{
|
||||
return &__get_cpu_var(cpu_hw_events);
|
||||
return this_cpu_ptr(&cpu_hw_events);
|
||||
}
|
||||
|
||||
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
|
||||
|
@ -65,7 +65,7 @@ static bool vgic_present;
|
||||
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
BUG_ON(preemptible());
|
||||
__get_cpu_var(kvm_arm_running_vcpu) = vcpu;
|
||||
__this_cpu_write(kvm_arm_running_vcpu, vcpu);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -75,7 +75,7 @@ static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
|
||||
struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
|
||||
{
|
||||
BUG_ON(preemptible());
|
||||
return __get_cpu_var(kvm_arm_running_vcpu);
|
||||
return __this_cpu_read(kvm_arm_running_vcpu);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -815,7 +815,7 @@ static void cpu_init_hyp_mode(void *dummy)
|
||||
|
||||
boot_pgd_ptr = kvm_mmu_get_boot_httbr();
|
||||
pgd_ptr = kvm_mmu_get_httbr();
|
||||
stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
|
||||
stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
|
||||
hyp_stack_ptr = stack_page + PAGE_SIZE;
|
||||
vector_ptr = (unsigned long)__kvm_hyp_vector;
|
||||
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/local.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/system_misc.h>
|
||||
|
||||
@ -89,8 +88,8 @@ early_param("nodebugmon", early_debug_disable);
|
||||
* Keep track of debug users on each core.
|
||||
* The ref counts are per-cpu so we use a local_t type.
|
||||
*/
|
||||
static DEFINE_PER_CPU(local_t, mde_ref_count);
|
||||
static DEFINE_PER_CPU(local_t, kde_ref_count);
|
||||
static DEFINE_PER_CPU(int, mde_ref_count);
|
||||
static DEFINE_PER_CPU(int, kde_ref_count);
|
||||
|
||||
void enable_debug_monitors(enum debug_el el)
|
||||
{
|
||||
@ -98,11 +97,11 @@ void enable_debug_monitors(enum debug_el el)
|
||||
|
||||
WARN_ON(preemptible());
|
||||
|
||||
if (local_inc_return(&__get_cpu_var(mde_ref_count)) == 1)
|
||||
if (this_cpu_inc_return(mde_ref_count) == 1)
|
||||
enable = DBG_MDSCR_MDE;
|
||||
|
||||
if (el == DBG_ACTIVE_EL1 &&
|
||||
local_inc_return(&__get_cpu_var(kde_ref_count)) == 1)
|
||||
this_cpu_inc_return(kde_ref_count) == 1)
|
||||
enable |= DBG_MDSCR_KDE;
|
||||
|
||||
if (enable && debug_enabled) {
|
||||
@ -118,11 +117,11 @@ void disable_debug_monitors(enum debug_el el)
|
||||
|
||||
WARN_ON(preemptible());
|
||||
|
||||
if (local_dec_and_test(&__get_cpu_var(mde_ref_count)))
|
||||
if (this_cpu_dec_return(mde_ref_count) == 0)
|
||||
disable = ~DBG_MDSCR_MDE;
|
||||
|
||||
if (el == DBG_ACTIVE_EL1 &&
|
||||
local_dec_and_test(&__get_cpu_var(kde_ref_count)))
|
||||
this_cpu_dec_return(kde_ref_count) == 0)
|
||||
disable &= ~DBG_MDSCR_KDE;
|
||||
|
||||
if (disable) {
|
||||
|
@ -184,14 +184,14 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
|
||||
/* Breakpoint */
|
||||
ctrl_reg = AARCH64_DBG_REG_BCR;
|
||||
val_reg = AARCH64_DBG_REG_BVR;
|
||||
slots = __get_cpu_var(bp_on_reg);
|
||||
slots = this_cpu_ptr(bp_on_reg);
|
||||
max_slots = core_num_brps;
|
||||
reg_enable = !debug_info->bps_disabled;
|
||||
} else {
|
||||
/* Watchpoint */
|
||||
ctrl_reg = AARCH64_DBG_REG_WCR;
|
||||
val_reg = AARCH64_DBG_REG_WVR;
|
||||
slots = __get_cpu_var(wp_on_reg);
|
||||
slots = this_cpu_ptr(wp_on_reg);
|
||||
max_slots = core_num_wrps;
|
||||
reg_enable = !debug_info->wps_disabled;
|
||||
}
|
||||
@ -230,12 +230,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
|
||||
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
|
||||
/* Breakpoint */
|
||||
base = AARCH64_DBG_REG_BCR;
|
||||
slots = __get_cpu_var(bp_on_reg);
|
||||
slots = this_cpu_ptr(bp_on_reg);
|
||||
max_slots = core_num_brps;
|
||||
} else {
|
||||
/* Watchpoint */
|
||||
base = AARCH64_DBG_REG_WCR;
|
||||
slots = __get_cpu_var(wp_on_reg);
|
||||
slots = this_cpu_ptr(wp_on_reg);
|
||||
max_slots = core_num_wrps;
|
||||
}
|
||||
|
||||
@ -505,11 +505,11 @@ static void toggle_bp_registers(int reg, enum debug_el el, int enable)
|
||||
|
||||
switch (reg) {
|
||||
case AARCH64_DBG_REG_BCR:
|
||||
slots = __get_cpu_var(bp_on_reg);
|
||||
slots = this_cpu_ptr(bp_on_reg);
|
||||
max_slots = core_num_brps;
|
||||
break;
|
||||
case AARCH64_DBG_REG_WCR:
|
||||
slots = __get_cpu_var(wp_on_reg);
|
||||
slots = this_cpu_ptr(wp_on_reg);
|
||||
max_slots = core_num_wrps;
|
||||
break;
|
||||
default:
|
||||
@ -546,7 +546,7 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr,
|
||||
struct debug_info *debug_info;
|
||||
struct arch_hw_breakpoint_ctrl ctrl;
|
||||
|
||||
slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
|
||||
slots = this_cpu_ptr(bp_on_reg);
|
||||
addr = instruction_pointer(regs);
|
||||
debug_info = ¤t->thread.debug;
|
||||
|
||||
@ -596,7 +596,7 @@ unlock:
|
||||
user_enable_single_step(current);
|
||||
} else {
|
||||
toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
|
||||
kernel_step = &__get_cpu_var(stepping_kernel_bp);
|
||||
kernel_step = this_cpu_ptr(&stepping_kernel_bp);
|
||||
|
||||
if (*kernel_step != ARM_KERNEL_STEP_NONE)
|
||||
return 0;
|
||||
@ -623,7 +623,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
|
||||
struct arch_hw_breakpoint *info;
|
||||
struct arch_hw_breakpoint_ctrl ctrl;
|
||||
|
||||
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
||||
slots = this_cpu_ptr(wp_on_reg);
|
||||
debug_info = ¤t->thread.debug;
|
||||
|
||||
for (i = 0; i < core_num_wrps; ++i) {
|
||||
@ -698,7 +698,7 @@ unlock:
|
||||
user_enable_single_step(current);
|
||||
} else {
|
||||
toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
|
||||
kernel_step = &__get_cpu_var(stepping_kernel_bp);
|
||||
kernel_step = this_cpu_ptr(&stepping_kernel_bp);
|
||||
|
||||
if (*kernel_step != ARM_KERNEL_STEP_NONE)
|
||||
return 0;
|
||||
@ -722,7 +722,7 @@ int reinstall_suspended_bps(struct pt_regs *regs)
|
||||
struct debug_info *debug_info = ¤t->thread.debug;
|
||||
int handled_exception = 0, *kernel_step;
|
||||
|
||||
kernel_step = &__get_cpu_var(stepping_kernel_bp);
|
||||
kernel_step = this_cpu_ptr(&stepping_kernel_bp);
|
||||
|
||||
/*
|
||||
* Called from single-step exception handler.
|
||||
|
@ -1044,7 +1044,7 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
|
||||
*/
|
||||
regs = get_irq_regs();
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
||||
struct perf_event *event = cpuc->events[idx];
|
||||
struct hw_perf_event *hwc;
|
||||
@ -1257,7 +1257,7 @@ device_initcall(register_pmu_driver);
|
||||
|
||||
static struct pmu_hw_events *armpmu_get_cpu_events(void)
|
||||
{
|
||||
return &__get_cpu_var(cpu_hw_events);
|
||||
return this_cpu_ptr(&cpu_hw_events);
|
||||
}
|
||||
|
||||
static void __init cpu_pmu_init(struct arm_pmu *armpmu)
|
||||
|
Loading…
Reference in New Issue
Block a user