mirror of
https://github.com/torvalds/linux.git
synced 2024-12-25 12:21:37 +00:00
bdi: Move cgroup bdi_writeback to a dedicated low concurrency workqueue
From 0aa2e9b921d6db71150633ff290199554f0842a8 Mon Sep 17 00:00:00 2001 From: Tejun Heo <tj@kernel.org> Date: Wed, 23 May 2018 10:29:00 -0700 cgwb_release() punts the actual release to cgwb_release_workfn() on system_wq. Depending on the number of cgroups or block devices, there can be a lot of cgwb_release_workfn() in flight at the same time. We're periodically seeing close to 256 kworkers getting stuck with the following stack trace and overtime the entire system gets stuck. [<ffffffff810ee40c>] _synchronize_rcu_expedited.constprop.72+0x2fc/0x330 [<ffffffff810ee634>] synchronize_rcu_expedited+0x24/0x30 [<ffffffff811ccf23>] bdi_unregister+0x53/0x290 [<ffffffff811cd1e9>] release_bdi+0x89/0xc0 [<ffffffff811cd645>] wb_exit+0x85/0xa0 [<ffffffff811cdc84>] cgwb_release_workfn+0x54/0xb0 [<ffffffff810a68d0>] process_one_work+0x150/0x410 [<ffffffff810a71fd>] worker_thread+0x6d/0x520 [<ffffffff810ad3dc>] kthread+0x12c/0x160 [<ffffffff81969019>] ret_from_fork+0x29/0x40 [<ffffffffffffffff>] 0xffffffffffffffff The events leading to the lockup are... 1. A lot of cgwb_release_workfn() is queued at the same time and all system_wq kworkers are assigned to execute them. 2. They all end up calling synchronize_rcu_expedited(). One of them wins and tries to perform the expedited synchronization. 3. However, that invovles queueing rcu_exp_work to system_wq and waiting for it. Because #1 is holding all available kworkers on system_wq, rcu_exp_work can't be executed. cgwb_release_workfn() is waiting for synchronize_rcu_expedited() which in turn is waiting for cgwb_release_workfn() to free up some of the kworkers. We shouldn't be scheduling hundreds of cgwb_release_workfn() at the same time. There's nothing to be gained from that. This patch updates cgwb release path to use a dedicated percpu workqueue with @max_active of 1. While this resolves the problem at hand, it might be a good idea to isolate rcu_exp_work to its own workqueue too as it can be used from various paths and is prone to this sort of indirect A-A deadlocks. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
6df133a149
commit
f183464684
@ -412,6 +412,7 @@ static void wb_exit(struct bdi_writeback *wb)
|
||||
* protected.
|
||||
*/
|
||||
static DEFINE_SPINLOCK(cgwb_lock);
|
||||
static struct workqueue_struct *cgwb_release_wq;
|
||||
|
||||
/**
|
||||
* wb_congested_get_create - get or create a wb_congested
|
||||
@ -522,7 +523,7 @@ static void cgwb_release(struct percpu_ref *refcnt)
|
||||
{
|
||||
struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
|
||||
refcnt);
|
||||
schedule_work(&wb->release_work);
|
||||
queue_work(cgwb_release_wq, &wb->release_work);
|
||||
}
|
||||
|
||||
static void cgwb_kill(struct bdi_writeback *wb)
|
||||
@ -784,6 +785,21 @@ static void cgwb_bdi_register(struct backing_dev_info *bdi)
|
||||
spin_unlock_irq(&cgwb_lock);
|
||||
}
|
||||
|
||||
static int __init cgwb_init(void)
|
||||
{
|
||||
/*
|
||||
* There can be many concurrent release work items overwhelming
|
||||
* system_wq. Put them in a separate wq and limit concurrency.
|
||||
* There's no point in executing many of these in parallel.
|
||||
*/
|
||||
cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
|
||||
if (!cgwb_release_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(cgwb_init);
|
||||
|
||||
#else /* CONFIG_CGROUP_WRITEBACK */
|
||||
|
||||
static int cgwb_bdi_init(struct backing_dev_info *bdi)
|
||||
|
Loading…
Reference in New Issue
Block a user