workqueue: Fold rebind_worker() within rebind_workers()

!CONFIG_SMP builds complain about rebind_worker() being unused. Its only
user, rebind_workers() is indeed only defined for CONFIG_SMP, so just fold
the two lines back up there.

Link: http://lore.kernel.org/r/20230113143102.2e94d74f@canb.auug.org.au
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Valentin Schneider <vschneid@redhat.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Valentin Schneider 2023-01-13 17:40:40 +00:00 committed by Tejun Heo
parent e02b931248
commit c63a2e52d5

View File

@ -1990,12 +1990,6 @@ static void unbind_worker(struct worker *worker)
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
} }
static void rebind_worker(struct worker *worker, struct worker_pool *pool)
{
kthread_set_per_cpu(worker->task, pool->cpu);
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask) < 0);
}
static void wake_dying_workers(struct list_head *cull_list) static void wake_dying_workers(struct list_head *cull_list)
{ {
struct worker *worker, *tmp; struct worker *worker, *tmp;
@ -5192,8 +5186,11 @@ static void rebind_workers(struct worker_pool *pool)
* of all workers first and then clear UNBOUND. As we're called * of all workers first and then clear UNBOUND. As we're called
* from CPU_ONLINE, the following shouldn't fail. * from CPU_ONLINE, the following shouldn't fail.
*/ */
for_each_pool_worker(worker, pool) for_each_pool_worker(worker, pool) {
rebind_worker(worker, pool); kthread_set_per_cpu(worker->task, pool->cpu);
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
pool->attrs->cpumask) < 0);
}
raw_spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);