workqueue: factor out initial worker creation into create_and_start_worker()

get_unbound_pool(), workqueue_cpu_up_callback() and init_workqueues()
have similar code pieces to create and start the initial worker factor
those out into create_and_start_worker().

This patch doesn't introduce any functional changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Tejun Heo 2013-03-13 19:47:39 -07:00
parent bc3a1afc92
commit ebf44d16ec

View File

@ -1792,6 +1792,26 @@ static void start_worker(struct worker *worker)
wake_up_process(worker->task); wake_up_process(worker->task);
} }
/**
* create_and_start_worker - create and start a worker for a pool
* @pool: the target pool
*
* Create and start a new worker for @pool.
*/
static int create_and_start_worker(struct worker_pool *pool)
{
struct worker *worker;
worker = create_worker(pool);
if (worker) {
spin_lock_irq(&pool->lock);
start_worker(worker);
spin_unlock_irq(&pool->lock);
}
return worker ? 0 : -ENOMEM;
}
/** /**
* destroy_worker - destroy a workqueue worker * destroy_worker - destroy a workqueue worker
* @worker: worker to be destroyed * @worker: worker to be destroyed
@ -3542,7 +3562,6 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
static DEFINE_MUTEX(create_mutex); static DEFINE_MUTEX(create_mutex);
u32 hash = wqattrs_hash(attrs); u32 hash = wqattrs_hash(attrs);
struct worker_pool *pool; struct worker_pool *pool;
struct worker *worker;
mutex_lock(&create_mutex); mutex_lock(&create_mutex);
@ -3568,14 +3587,9 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
goto fail; goto fail;
/* create and start the initial worker */ /* create and start the initial worker */
worker = create_worker(pool); if (create_and_start_worker(pool) < 0)
if (!worker)
goto fail; goto fail;
spin_lock_irq(&pool->lock);
start_worker(worker);
spin_unlock_irq(&pool->lock);
/* install */ /* install */
spin_lock_irq(&workqueue_lock); spin_lock_irq(&workqueue_lock);
hash_add(unbound_pool_hash, &pool->hash_node, hash); hash_add(unbound_pool_hash, &pool->hash_node, hash);
@ -4148,18 +4162,10 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
switch (action & ~CPU_TASKS_FROZEN) { switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
for_each_cpu_worker_pool(pool, cpu) { for_each_cpu_worker_pool(pool, cpu) {
struct worker *worker;
if (pool->nr_workers) if (pool->nr_workers)
continue; continue;
if (create_and_start_worker(pool) < 0)
worker = create_worker(pool);
if (!worker)
return NOTIFY_BAD; return NOTIFY_BAD;
spin_lock_irq(&pool->lock);
start_worker(worker);
spin_unlock_irq(&pool->lock);
} }
break; break;
@ -4409,15 +4415,8 @@ static int __init init_workqueues(void)
struct worker_pool *pool; struct worker_pool *pool;
for_each_cpu_worker_pool(pool, cpu) { for_each_cpu_worker_pool(pool, cpu) {
struct worker *worker;
pool->flags &= ~POOL_DISASSOCIATED; pool->flags &= ~POOL_DISASSOCIATED;
BUG_ON(create_and_start_worker(pool) < 0);
worker = create_worker(pool);
BUG_ON(!worker);
spin_lock_irq(&pool->lock);
start_worker(worker);
spin_unlock_irq(&pool->lock);
} }
} }