mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 14:42:24 +00:00
perf counters: consolidate hw_perf save/restore APIs
Impact: cleanup Rename them to better match up the usual IRQ disable/enable APIs: hw_perf_disable_all() => hw_perf_save_disable() hw_perf_restore_ctrl() => hw_perf_restore() Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
5c92d12411
commit
01b2838c42
@ -118,13 +118,13 @@ void hw_perf_enable_all(void)
|
||||
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0);
|
||||
}
|
||||
|
||||
void hw_perf_restore_ctrl(u64 ctrl)
|
||||
void hw_perf_restore(u64 ctrl)
|
||||
{
|
||||
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hw_perf_restore_ctrl);
|
||||
EXPORT_SYMBOL_GPL(hw_perf_restore);
|
||||
|
||||
u64 hw_perf_disable_all(void)
|
||||
u64 hw_perf_save_disable(void)
|
||||
{
|
||||
u64 ctrl;
|
||||
|
||||
@ -132,7 +132,7 @@ u64 hw_perf_disable_all(void)
|
||||
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
|
||||
return ctrl;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hw_perf_disable_all);
|
||||
EXPORT_SYMBOL_GPL(hw_perf_save_disable);
|
||||
|
||||
static inline void
|
||||
__x86_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx)
|
||||
|
@ -270,11 +270,11 @@ static atomic_t c3_cpu_count;
|
||||
/* Common C-state entry for C2, C3, .. */
|
||||
static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
|
||||
{
|
||||
u64 pctrl;
|
||||
u64 perf_flags;
|
||||
|
||||
/* Don't trace irqs off for idle */
|
||||
stop_critical_timings();
|
||||
pctrl = hw_perf_disable_all();
|
||||
perf_flags = hw_perf_save_disable();
|
||||
if (cstate->entry_method == ACPI_CSTATE_FFH) {
|
||||
/* Call into architectural FFH based C-state */
|
||||
acpi_processor_ffh_cstate_enter(cstate);
|
||||
@ -287,7 +287,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
|
||||
gets asserted in time to freeze execution properly. */
|
||||
unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
}
|
||||
hw_perf_restore_ctrl(pctrl);
|
||||
hw_perf_restore(perf_flags);
|
||||
start_critical_timings();
|
||||
}
|
||||
#endif /* !CONFIG_CPU_IDLE */
|
||||
@ -1433,7 +1433,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
|
||||
|
||||
/* Don't trace irqs off for idle */
|
||||
stop_critical_timings();
|
||||
pctrl = hw_perf_disable_all();
|
||||
pctrl = hw_perf_save_disable();
|
||||
if (cx->entry_method == ACPI_CSTATE_FFH) {
|
||||
/* Call into architectural FFH based C-state */
|
||||
acpi_processor_ffh_cstate_enter(cx);
|
||||
@ -1448,7 +1448,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
|
||||
gets asserted in time to freeze execution properly. */
|
||||
unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
}
|
||||
hw_perf_restore_ctrl(pctrl);
|
||||
hw_perf_restore(pctrl);
|
||||
start_critical_timings();
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ enum perf_counter_record_type {
|
||||
* Hardware event to monitor via a performance monitoring counter:
|
||||
*/
|
||||
struct perf_counter_hw_event {
|
||||
u64 type;
|
||||
s64 type;
|
||||
|
||||
u64 irq_period;
|
||||
u32 record_type;
|
||||
@ -206,8 +206,8 @@ extern void perf_counter_task_tick(struct task_struct *task, int cpu);
|
||||
extern void perf_counter_init_task(struct task_struct *task);
|
||||
extern void perf_counter_notify(struct pt_regs *regs);
|
||||
extern void perf_counter_print_debug(void);
|
||||
extern void hw_perf_restore_ctrl(u64 ctrl);
|
||||
extern u64 hw_perf_disable_all(void);
|
||||
extern u64 hw_perf_save_disable(void);
|
||||
extern void hw_perf_restore(u64 ctrl);
|
||||
extern void atomic64_counter_set(struct perf_counter *counter, u64 val64);
|
||||
extern u64 atomic64_counter_read(struct perf_counter *counter);
|
||||
|
||||
@ -221,8 +221,8 @@ perf_counter_task_tick(struct task_struct *task, int cpu) { }
|
||||
static inline void perf_counter_init_task(struct task_struct *task) { }
|
||||
static inline void perf_counter_notify(struct pt_regs *regs) { }
|
||||
static inline void perf_counter_print_debug(void) { }
|
||||
static inline void hw_perf_restore_ctrl(u64 ctrl) { }
|
||||
static inline u64 hw_perf_disable_all(void) { return 0; }
|
||||
static inline void hw_perf_restore(u64 ctrl) { }
|
||||
static inline u64 hw_perf_save_disable(void) { return 0; }
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_PERF_COUNTER_H */
|
||||
|
@ -43,8 +43,8 @@ hw_perf_counter_init(struct perf_counter *counter)
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
u64 __weak hw_perf_disable_all(void) { return 0; }
|
||||
void __weak hw_perf_restore_ctrl(u64 ctrl) { }
|
||||
u64 __weak hw_perf_save_disable(void) { return 0; }
|
||||
void __weak hw_perf_restore(u64 ctrl) { }
|
||||
void __weak hw_perf_counter_setup(void) { }
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
@ -180,9 +180,9 @@ static void __perf_counter_remove_from_context(void *info)
|
||||
* Protect the list operation against NMI by disabling the
|
||||
* counters on a global level. NOP for non NMI based counters.
|
||||
*/
|
||||
perf_flags = hw_perf_disable_all();
|
||||
perf_flags = hw_perf_save_disable();
|
||||
list_del_counter(counter, ctx);
|
||||
hw_perf_restore_ctrl(perf_flags);
|
||||
hw_perf_restore(perf_flags);
|
||||
|
||||
if (!ctx->task) {
|
||||
/*
|
||||
@ -273,9 +273,9 @@ static void __perf_install_in_context(void *info)
|
||||
* Protect the list operation against NMI by disabling the
|
||||
* counters on a global level. NOP for non NMI based counters.
|
||||
*/
|
||||
perf_flags = hw_perf_disable_all();
|
||||
perf_flags = hw_perf_save_disable();
|
||||
list_add_counter(counter, ctx);
|
||||
hw_perf_restore_ctrl(perf_flags);
|
||||
hw_perf_restore(perf_flags);
|
||||
|
||||
ctx->nr_counters++;
|
||||
|
||||
@ -495,13 +495,13 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
|
||||
/*
|
||||
* Rotate the first entry last (works just fine for group counters too):
|
||||
*/
|
||||
perf_flags = hw_perf_disable_all();
|
||||
perf_flags = hw_perf_save_disable();
|
||||
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
|
||||
list_del(&counter->list_entry);
|
||||
list_add_tail(&counter->list_entry, &ctx->counter_list);
|
||||
break;
|
||||
}
|
||||
hw_perf_restore_ctrl(perf_flags);
|
||||
hw_perf_restore(perf_flags);
|
||||
|
||||
spin_unlock(&ctx->lock);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user