forked from Minki/linux
smp,cpumask: introduce on_each_cpu_cond_mask
Introduce a variant of on_each_cpu_cond that iterates only over the CPUs in a cpumask, in order to avoid making callbacks for every single CPU in the system when we only need to test a subset. Cc: npiggin@gmail.com Cc: mingo@kernel.org Cc: will.deacon@arm.com Cc: songliubraving@fb.com Cc: kernel-team@fb.com Cc: hpa@zytor.com Cc: luto@kernel.org Signed-off-by: Rik van Riel <riel@surriel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: http://lkml.kernel.org/r/20180926035844.1420-5-riel@surriel.com
This commit is contained in:
parent
c3f7f2c7eb
commit
7d49b28a80
@ -53,6 +53,10 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
|||||||
smp_call_func_t func, void *info, bool wait,
|
smp_call_func_t func, void *info, bool wait,
|
||||||
gfp_t gfp_flags);
|
gfp_t gfp_flags);
|
||||||
|
|
||||||
|
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
|
||||||
|
smp_call_func_t func, void *info, bool wait,
|
||||||
|
gfp_t gfp_flags, const struct cpumask *mask);
|
||||||
|
|
||||||
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
|
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
17
kernel/smp.c
17
kernel/smp.c
@ -669,9 +669,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
|
|||||||
* You must not call this function with disabled interrupts or
|
* You must not call this function with disabled interrupts or
|
||||||
* from a hardware interrupt handler or from a bottom half handler.
|
* from a hardware interrupt handler or from a bottom half handler.
|
||||||
*/
|
*/
|
||||||
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
|
||||||
smp_call_func_t func, void *info, bool wait,
|
smp_call_func_t func, void *info, bool wait,
|
||||||
gfp_t gfp_flags)
|
gfp_t gfp_flags, const struct cpumask *mask)
|
||||||
{
|
{
|
||||||
cpumask_var_t cpus;
|
cpumask_var_t cpus;
|
||||||
int cpu, ret;
|
int cpu, ret;
|
||||||
@ -680,7 +680,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
|||||||
|
|
||||||
if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
|
if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
for_each_online_cpu(cpu)
|
for_each_cpu(cpu, mask)
|
||||||
if (cond_func(cpu, info))
|
if (cond_func(cpu, info))
|
||||||
__cpumask_set_cpu(cpu, cpus);
|
__cpumask_set_cpu(cpu, cpus);
|
||||||
on_each_cpu_mask(cpus, func, info, wait);
|
on_each_cpu_mask(cpus, func, info, wait);
|
||||||
@ -692,7 +692,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
|||||||
* just have to IPI them one by one.
|
* just have to IPI them one by one.
|
||||||
*/
|
*/
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
for_each_online_cpu(cpu)
|
for_each_cpu(cpu, mask)
|
||||||
if (cond_func(cpu, info)) {
|
if (cond_func(cpu, info)) {
|
||||||
ret = smp_call_function_single(cpu, func,
|
ret = smp_call_function_single(cpu, func,
|
||||||
info, wait);
|
info, wait);
|
||||||
@ -701,6 +701,15 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
|||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(on_each_cpu_cond_mask);
|
||||||
|
|
||||||
|
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
||||||
|
smp_call_func_t func, void *info, bool wait,
|
||||||
|
gfp_t gfp_flags)
|
||||||
|
{
|
||||||
|
on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
|
||||||
|
cpu_online_mask);
|
||||||
|
}
|
||||||
EXPORT_SYMBOL(on_each_cpu_cond);
|
EXPORT_SYMBOL(on_each_cpu_cond);
|
||||||
|
|
||||||
static void do_nothing(void *unused)
|
static void do_nothing(void *unused)
|
||||||
|
14
kernel/up.c
14
kernel/up.c
@ -68,9 +68,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
|
|||||||
* Preemption is disabled here to make sure the cond_func is called under the
|
* Preemption is disabled here to make sure the cond_func is called under the
|
||||||
* same condtions in UP and SMP.
|
* same condtions in UP and SMP.
|
||||||
*/
|
*/
|
||||||
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
|
||||||
smp_call_func_t func, void *info, bool wait,
|
smp_call_func_t func, void *info, bool wait,
|
||||||
gfp_t gfp_flags)
|
gfp_t gfp_flags, const struct cpumask *mask)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
@ -82,6 +82,14 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
|||||||
}
|
}
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(on_each_cpu_cond_mask);
|
||||||
|
|
||||||
|
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
||||||
|
smp_call_func_t func, void *info, bool wait,
|
||||||
|
gfp_t gfp_flags)
|
||||||
|
{
|
||||||
|
on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
|
||||||
|
}
|
||||||
EXPORT_SYMBOL(on_each_cpu_cond);
|
EXPORT_SYMBOL(on_each_cpu_cond);
|
||||||
|
|
||||||
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
|
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
|
||||||
|
Loading…
Reference in New Issue
Block a user