arm64: arch_timer: mark functions as __always_inline

If CONFIG_FUNCTION_GRAPH_TRACER is enabled function
arch_counter_get_cntvct() is marked as notrace. However, function
__arch_counter_get_cntvct is marked as inline. If
CONFIG_OPTIMIZE_INLINING is set that will make the two functions
tracable which they shouldn't.

Rework so that functions __arch_counter_get_* are marked with
__always_inline so they will be inlined even if CONFIG_OPTIMIZE_INLINING
is turned on.

Fixes: 0ea415390c ("clocksource/arm_arch_timer: Use arch_timer_read_counter to access stable counters")
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
Anders Roxell 2019-06-03 11:14:02 +02:00 committed by Will Deacon
parent 262afe92fa
commit f31e98bfae

View File

@ -193,7 +193,7 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
: "=r" (tmp) : "r" (_val)); \
} while (0)
static inline u64 __arch_counter_get_cntpct_stable(void)
static __always_inline u64 __arch_counter_get_cntpct_stable(void)
{
u64 cnt;
@ -203,7 +203,7 @@ static inline u64 __arch_counter_get_cntpct_stable(void)
return cnt;
}
static inline u64 __arch_counter_get_cntpct(void)
static __always_inline u64 __arch_counter_get_cntpct(void)
{
u64 cnt;
@ -213,7 +213,7 @@ static inline u64 __arch_counter_get_cntpct(void)
return cnt;
}
static inline u64 __arch_counter_get_cntvct_stable(void)
static __always_inline u64 __arch_counter_get_cntvct_stable(void)
{
u64 cnt;
@ -223,7 +223,7 @@ static inline u64 __arch_counter_get_cntvct_stable(void)
return cnt;
}
static inline u64 __arch_counter_get_cntvct(void)
static __always_inline u64 __arch_counter_get_cntvct(void)
{
u64 cnt;