mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
Updates for SMP and CPU hotplug:
- Switch the smp_call_function*() @csd argument to call_single_data_t type, which is a cache-line aligned typedef of the underlying struct __call_single_data. This ensures that the call data is not crossing a cacheline which avoids bouncing an extra cache-line for the SMP function call - Prevent offlining of the last housekeeping CPU when CPU isolation is active. Offlining the last housekeeping CPU makes no sense in general, but also caused the scheduler to panic due to the empty CPU mask when rebuilding the scheduler domains. - Remove an unused CPU hotplug state -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmU+vdYTHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYocb3EAChhdVZCBm3UoDcrWtGuS7mMkTuLuUK rheS9OtUt/uDEI0YZT5bD2R7KqdDVVNqbG1RLyICIWpQMvuMojZyu6fMCUjkONzS iioun/2lZP4Q9TyAn0rdr9/GrPxb1/glYnLuz8ZJcY+jC63skNIDVfzJdhJ81/sx t4BSswVsi75qZtBWWYFIzMQaJ1nUGJ5SZPYJV/WuQLf+pronoeWu+2VZHnaDqr3h 1N3oTQRbg0syPBg6trRuLEnn3384LYtdq7CHjeREX2jn2sU8yr+xzjKup5ShtSCR 7Amka/IlCTe2+FNS0F+6e3RGCH9Man1W593DqjUeIQT/Z0O2u4l9vNuVTv5GjX6C fqDVd4hwVRd7/OGmaSPPY+pn9QK6B1WYU3BaAxACcGE6GaY69PU2jREnuCpt/pu9 Pg4xYDqClVwzvq3YYoU7YISya2TXjyJticxg3FtPUzrpVu0LIIq3IAcO7Nej+AzS uSwhIRkqyT20CO/fRXhn5KQ2h66G6QNLPnEMtK/35K24Am7MGqwJd7wnGxKPu3RO zAcRkQofouS0UcVbNY4UbV4vD6lpEAvy1RdxNPWt5DOIk5f83E176Yyc+vB8jAjG YEM8ZnS3gFd7jvNC37rk9FfjlAIL9Z9QcrhtHJJ/h5y9sgCqzsV96B8c2KR3Ggs0 BQbaSJhdB89BwQ== =tP7K -----END PGP SIGNATURE----- Merge tag 'smp-core-2023-10-29-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull SMP and CPU hotplug updates from Thomas Gleixner: - Switch the smp_call_function*() @csd argument to call_single_data_t type, which is a cache-line aligned typedef of the underlying struct __call_single_data. This ensures that the call data is not crossing a cacheline which avoids bouncing an extra cache-line for the SMP function call - Prevent offlining of the last housekeeping CPU when CPU isolation is active. Offlining the last housekeeping CPU makes no sense in general, but also caused the scheduler to panic due to the empty CPU mask when rebuilding the scheduler domains. - Remove an unused CPU hotplug state * tag 'smp-core-2023-10-29-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: cpu/hotplug: Don't offline the last non-isolated CPU cpu/hotplug: Remove unused cpuhp_state CPUHP_AP_X86_VDSO_VMA_ONLINE smp: Change function signatures to use call_single_data_t
This commit is contained in:
commit
c891e98ab3
@ -204,7 +204,6 @@ enum cpuhp_state {
|
||||
CPUHP_AP_KVM_ONLINE,
|
||||
CPUHP_AP_SCHED_WAIT_EMPTY,
|
||||
CPUHP_AP_SMPBOOT_THREADS,
|
||||
CPUHP_AP_X86_VDSO_VMA_ONLINE,
|
||||
CPUHP_AP_IRQ_AFFINITY_ONLINE,
|
||||
CPUHP_AP_BLK_MQ_ONLINE,
|
||||
CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
|
||||
|
@ -53,7 +53,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
|
||||
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
|
||||
void *info, bool wait, const struct cpumask *mask);
|
||||
|
||||
int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
|
||||
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
|
||||
|
||||
/*
|
||||
* Cpus stopping functions in panic. All have default weak definitions.
|
||||
|
@ -12,7 +12,7 @@ TRACE_EVENT(csd_queue_cpu,
|
||||
TP_PROTO(const unsigned int cpu,
|
||||
unsigned long callsite,
|
||||
smp_call_func_t func,
|
||||
struct __call_single_data *csd),
|
||||
call_single_data_t *csd),
|
||||
|
||||
TP_ARGS(cpu, callsite, func, csd),
|
||||
|
||||
@ -39,7 +39,7 @@ TRACE_EVENT(csd_queue_cpu,
|
||||
*/
|
||||
DECLARE_EVENT_CLASS(csd_function,
|
||||
|
||||
TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
|
||||
TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
|
||||
|
||||
TP_ARGS(func, csd),
|
||||
|
||||
@ -57,12 +57,12 @@ DECLARE_EVENT_CLASS(csd_function,
|
||||
);
|
||||
|
||||
DEFINE_EVENT(csd_function, csd_function_entry,
|
||||
TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
|
||||
TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
|
||||
TP_ARGS(func, csd)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(csd_function, csd_function_exit,
|
||||
TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
|
||||
TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
|
||||
TP_ARGS(func, csd)
|
||||
);
|
||||
|
||||
|
11
kernel/cpu.c
11
kernel/cpu.c
@ -1515,11 +1515,14 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
|
||||
/*
|
||||
* Ensure that the control task does not run on the to be offlined
|
||||
* CPU to prevent a deadlock against cfs_b->period_timer.
|
||||
* Also keep at least one housekeeping cpu onlined to avoid generating
|
||||
* an empty sched_domain span.
|
||||
*/
|
||||
cpu = cpumask_any_but(cpu_online_mask, cpu);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return -EBUSY;
|
||||
return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
|
||||
for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
|
||||
if (cpu != work.cpu)
|
||||
return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
|
||||
}
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int cpu_down(unsigned int cpu, enum cpuhp_state target)
|
||||
|
26
kernel/smp.c
26
kernel/smp.c
@ -127,7 +127,7 @@ send_call_function_ipi_mask(struct cpumask *mask)
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
csd_do_func(smp_call_func_t func, void *info, struct __call_single_data *csd)
|
||||
csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd)
|
||||
{
|
||||
trace_csd_function_entry(func, csd);
|
||||
func(info);
|
||||
@ -174,7 +174,7 @@ module_param(csd_lock_timeout, ulong, 0444);
|
||||
static atomic_t csd_bug_count = ATOMIC_INIT(0);
|
||||
|
||||
/* Record current CSD work for current CPU, NULL to erase. */
|
||||
static void __csd_lock_record(struct __call_single_data *csd)
|
||||
static void __csd_lock_record(call_single_data_t *csd)
|
||||
{
|
||||
if (!csd) {
|
||||
smp_mb(); /* NULL cur_csd after unlock. */
|
||||
@ -189,13 +189,13 @@ static void __csd_lock_record(struct __call_single_data *csd)
|
||||
/* Or before unlock, as the case may be. */
|
||||
}
|
||||
|
||||
static __always_inline void csd_lock_record(struct __call_single_data *csd)
|
||||
static __always_inline void csd_lock_record(call_single_data_t *csd)
|
||||
{
|
||||
if (static_branch_unlikely(&csdlock_debug_enabled))
|
||||
__csd_lock_record(csd);
|
||||
}
|
||||
|
||||
static int csd_lock_wait_getcpu(struct __call_single_data *csd)
|
||||
static int csd_lock_wait_getcpu(call_single_data_t *csd)
|
||||
{
|
||||
unsigned int csd_type;
|
||||
|
||||
@ -210,7 +210,7 @@ static int csd_lock_wait_getcpu(struct __call_single_data *csd)
|
||||
* the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
|
||||
* so waiting on other types gets much less information.
|
||||
*/
|
||||
static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
|
||||
static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
|
||||
{
|
||||
int cpu = -1;
|
||||
int cpux;
|
||||
@ -276,7 +276,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
|
||||
* previous function call. For multi-cpu calls its even more interesting
|
||||
* as we'll have to ensure no other cpu is observing our csd.
|
||||
*/
|
||||
static void __csd_lock_wait(struct __call_single_data *csd)
|
||||
static void __csd_lock_wait(call_single_data_t *csd)
|
||||
{
|
||||
int bug_id = 0;
|
||||
u64 ts0, ts1;
|
||||
@ -290,7 +290,7 @@ static void __csd_lock_wait(struct __call_single_data *csd)
|
||||
smp_acquire__after_ctrl_dep();
|
||||
}
|
||||
|
||||
static __always_inline void csd_lock_wait(struct __call_single_data *csd)
|
||||
static __always_inline void csd_lock_wait(call_single_data_t *csd)
|
||||
{
|
||||
if (static_branch_unlikely(&csdlock_debug_enabled)) {
|
||||
__csd_lock_wait(csd);
|
||||
@ -300,17 +300,17 @@ static __always_inline void csd_lock_wait(struct __call_single_data *csd)
|
||||
smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
|
||||
}
|
||||
#else
|
||||
static void csd_lock_record(struct __call_single_data *csd)
|
||||
static void csd_lock_record(call_single_data_t *csd)
|
||||
{
|
||||
}
|
||||
|
||||
static __always_inline void csd_lock_wait(struct __call_single_data *csd)
|
||||
static __always_inline void csd_lock_wait(call_single_data_t *csd)
|
||||
{
|
||||
smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
|
||||
}
|
||||
#endif
|
||||
|
||||
static __always_inline void csd_lock(struct __call_single_data *csd)
|
||||
static __always_inline void csd_lock(call_single_data_t *csd)
|
||||
{
|
||||
csd_lock_wait(csd);
|
||||
csd->node.u_flags |= CSD_FLAG_LOCK;
|
||||
@ -323,7 +323,7 @@ static __always_inline void csd_lock(struct __call_single_data *csd)
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
static __always_inline void csd_unlock(struct __call_single_data *csd)
|
||||
static __always_inline void csd_unlock(call_single_data_t *csd)
|
||||
{
|
||||
WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
|
||||
|
||||
@ -376,7 +376,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
|
||||
* for execution on the given CPU. data must already have
|
||||
* ->func, ->info, and ->flags set.
|
||||
*/
|
||||
static int generic_exec_single(int cpu, struct __call_single_data *csd)
|
||||
static int generic_exec_single(int cpu, call_single_data_t *csd)
|
||||
{
|
||||
if (cpu == smp_processor_id()) {
|
||||
smp_call_func_t func = csd->func;
|
||||
@ -667,7 +667,7 @@ EXPORT_SYMBOL(smp_call_function_single);
|
||||
*
|
||||
* Return: %0 on success or negative errno value on error
|
||||
*/
|
||||
int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
|
||||
int smp_call_function_single_async(int cpu, call_single_data_t *csd)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
|
@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_single);
|
||||
|
||||
int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
|
||||
int smp_call_function_single_async(int cpu, call_single_data_t *csd)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user