mirror of
https://github.com/torvalds/linux.git
synced 2024-11-16 00:52:01 +00:00
x86/smpboot: Make TSC synchronization function call based
Spin-waiting on the control CPU until the AP reaches the TSC synchronization is just a waste especially in the case that there is no synchronization required. As the synchronization has to run with interrupts disabled the control CPU part can just be done from a SMP function call. The upcoming AP issues that call async only in the case that synchronization is required. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Michael Kelley <mikelley@microsoft.com> Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name> Tested-by: Helge Deller <deller@gmx.de> # parisc Tested-by: Guilherme G. Piccoli <gpiccoli@igalia.com> # Steam Deck Link: https://lore.kernel.org/r/20230512205256.148255496@linutronix.de
This commit is contained in:
parent
d4f28f07c2
commit
9d349d47f0
@ -55,12 +55,10 @@ extern bool tsc_async_resets;
|
||||
#ifdef CONFIG_X86_TSC
|
||||
extern bool tsc_store_and_check_tsc_adjust(bool bootcpu);
|
||||
extern void tsc_verify_tsc_adjust(bool resume);
|
||||
extern void check_tsc_sync_source(int cpu);
|
||||
extern void check_tsc_sync_target(void);
|
||||
#else
|
||||
static inline bool tsc_store_and_check_tsc_adjust(bool bootcpu) { return false; }
|
||||
static inline void tsc_verify_tsc_adjust(bool resume) { }
|
||||
static inline void check_tsc_sync_source(int cpu) { }
|
||||
static inline void check_tsc_sync_target(void) { }
|
||||
#endif
|
||||
|
||||
|
@ -275,11 +275,7 @@ static void notrace start_secondary(void *unused)
|
||||
*/
|
||||
smp_callin();
|
||||
|
||||
/*
|
||||
* Check TSC synchronization with the control CPU, which will do
|
||||
* its part of this from wait_cpu_online(), making it an implicit
|
||||
* synchronization point.
|
||||
*/
|
||||
/* Check TSC synchronization with the control CPU. */
|
||||
check_tsc_sync_target();
|
||||
|
||||
/*
|
||||
@ -1141,21 +1137,11 @@ static void wait_cpu_callin(unsigned int cpu)
|
||||
}
|
||||
|
||||
/*
|
||||
* Bringup step four: Synchronize the TSC and wait for the target AP
|
||||
* to reach set_cpu_online() in start_secondary().
|
||||
* Bringup step four: Wait for the target AP to reach set_cpu_online() in
|
||||
* start_secondary().
|
||||
*/
|
||||
static void wait_cpu_online(unsigned int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Check TSC synchronization with the AP (keep irqs disabled
|
||||
* while doing so):
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
check_tsc_sync_source(cpu);
|
||||
local_irq_restore(flags);
|
||||
|
||||
/*
|
||||
* Wait for the AP to mark itself online, so the core caller
|
||||
* can drop sparse_irq_lock.
|
||||
|
@ -245,7 +245,6 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu)
|
||||
*/
|
||||
static atomic_t start_count;
|
||||
static atomic_t stop_count;
|
||||
static atomic_t skip_test;
|
||||
static atomic_t test_runs;
|
||||
|
||||
/*
|
||||
@ -344,20 +343,13 @@ static inline unsigned int loop_timeout(int cpu)
|
||||
}
|
||||
|
||||
/*
|
||||
* Source CPU calls into this - it waits for the freshly booted
|
||||
* target CPU to arrive and then starts the measurement:
|
||||
* The freshly booted CPU initiates this via an async SMP function call.
|
||||
*/
|
||||
void check_tsc_sync_source(int cpu)
|
||||
static void check_tsc_sync_source(void *__cpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned long)__cpu;
|
||||
int cpus = 2;
|
||||
|
||||
/*
|
||||
* No need to check if we already know that the TSC is not
|
||||
* synchronized or if we have no TSC.
|
||||
*/
|
||||
if (unsynchronized_tsc())
|
||||
return;
|
||||
|
||||
/*
|
||||
* Set the maximum number of test runs to
|
||||
* 1 if the CPU does not provide the TSC_ADJUST MSR
|
||||
@ -368,16 +360,9 @@ void check_tsc_sync_source(int cpu)
|
||||
else
|
||||
atomic_set(&test_runs, 3);
|
||||
retry:
|
||||
/*
|
||||
* Wait for the target to start or to skip the test:
|
||||
*/
|
||||
while (atomic_read(&start_count) != cpus - 1) {
|
||||
if (atomic_read(&skip_test) > 0) {
|
||||
atomic_set(&skip_test, 0);
|
||||
return;
|
||||
}
|
||||
/* Wait for the target to start. */
|
||||
while (atomic_read(&start_count) != cpus - 1)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/*
|
||||
* Trigger the target to continue into the measurement too:
|
||||
@ -397,14 +382,14 @@ retry:
|
||||
if (!nr_warps) {
|
||||
atomic_set(&test_runs, 0);
|
||||
|
||||
pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
|
||||
pr_debug("TSC synchronization [CPU#%d -> CPU#%u]: passed\n",
|
||||
smp_processor_id(), cpu);
|
||||
|
||||
} else if (atomic_dec_and_test(&test_runs) || random_warps) {
|
||||
/* Force it to 0 if random warps brought us here */
|
||||
atomic_set(&test_runs, 0);
|
||||
|
||||
pr_warn("TSC synchronization [CPU#%d -> CPU#%d]:\n",
|
||||
pr_warn("TSC synchronization [CPU#%d -> CPU#%u]:\n",
|
||||
smp_processor_id(), cpu);
|
||||
pr_warn("Measured %Ld cycles TSC warp between CPUs, "
|
||||
"turning off TSC clock.\n", max_warp);
|
||||
@ -457,11 +442,12 @@ void check_tsc_sync_target(void)
|
||||
* SoCs the TSC is frequency synchronized, but still the TSC ADJUST
|
||||
* register might have been wreckaged by the BIOS..
|
||||
*/
|
||||
if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) {
|
||||
atomic_inc(&skip_test);
|
||||
if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Kick the control CPU into the TSC synchronization function */
|
||||
smp_call_function_single(cpumask_first(cpu_online_mask), check_tsc_sync_source,
|
||||
(unsigned long *)(unsigned long)cpu, 0);
|
||||
retry:
|
||||
/*
|
||||
* Register this CPU's participation and wait for the
|
||||
|
Loading…
Reference in New Issue
Block a user