mirror of
https://github.com/torvalds/linux.git
synced 2024-12-24 11:51:27 +00:00
Revert "softirq: Let ksoftirqd do its job"
This reverts the following commits:4cd13c21b2
("softirq: Let ksoftirqd do its job")3c53776e29
("Mark HI and TASKLET softirq synchronous")1342d8080f
("softirq: Don't skip softirq execution when softirq thread is parking") in a single change to avoid known bad intermediate states introduced by a patch series reverting them individually. Due to the mentioned commit, when the ksoftirqd threads take charge of softirq processing, the system can experience high latencies. In the past a few workarounds have been implemented for specific side-effects of the initial ksoftirqd enforcement commit: commit1ff688209e
("watchdog: core: make sure the watchdog_worker is not deferred") commit8d5755b3f7
("watchdog: softdog: fire watchdog even if softirqs do not get to run") commit217f697436
("net: busy-poll: allow preemption in sk_busy_loop()") commit3c53776e29
("Mark HI and TASKLET softirq synchronous") But the latency problem still exists in real-life workloads, see the link below. The reverted commit intended to solve a live-lock scenario that can now be addressed with the NAPI threaded mode, introduced with commit29863d41bb
("net: implement threaded-able napi poll loop support"), which is nowadays in a pretty stable status. While a complete solution to put softirq processing under nice resource control would be preferable, that has proven to be a very hard task. In the short term, remove the main pain point, and also simplify a bit the current softirq implementation. Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Jason Xing <kerneljasonxing@gmail.com> Reviewed-by: Jakub Kicinski <kuba@kernel.org> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: "Paul E. McKenney" <paulmck@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: netdev@vger.kernel.org Link: https://lore.kernel.org/netdev/305d7742212cbe98621b16be782b0562f1012cb6.camel@redhat.com Link: https://lore.kernel.org/r/57e66b364f1b6f09c9bc0316742c3b14f4ce83bd.1683526542.git.pabeni@redhat.com
This commit is contained in:
parent
ac9a78681b
commit
d15121be74
@ -80,21 +80,6 @@ static void wakeup_softirqd(void)
|
||||
wake_up_process(tsk);
|
||||
}
|
||||
|
||||
/*
|
||||
* If ksoftirqd is scheduled, we do not want to process pending softirqs
|
||||
* right now. Let ksoftirqd handle this at its own rate, to get fairness,
|
||||
* unless we're doing some of the synchronous softirqs.
|
||||
*/
|
||||
#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
|
||||
static bool ksoftirqd_running(unsigned long pending)
|
||||
{
|
||||
struct task_struct *tsk = __this_cpu_read(ksoftirqd);
|
||||
|
||||
if (pending & SOFTIRQ_NOW_MASK)
|
||||
return false;
|
||||
return tsk && task_is_running(tsk) && !__kthread_should_park(tsk);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
DEFINE_PER_CPU(int, hardirqs_enabled);
|
||||
DEFINE_PER_CPU(int, hardirq_context);
|
||||
@ -236,7 +221,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
|
||||
goto out;
|
||||
|
||||
pending = local_softirq_pending();
|
||||
if (!pending || ksoftirqd_running(pending))
|
||||
if (!pending)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
@ -432,9 +417,6 @@ static inline bool should_wake_ksoftirqd(void)
|
||||
|
||||
static inline void invoke_softirq(void)
|
||||
{
|
||||
if (ksoftirqd_running(local_softirq_pending()))
|
||||
return;
|
||||
|
||||
if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
|
||||
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||
/*
|
||||
@ -468,7 +450,7 @@ asmlinkage __visible void do_softirq(void)
|
||||
|
||||
pending = local_softirq_pending();
|
||||
|
||||
if (pending && !ksoftirqd_running(pending))
|
||||
if (pending)
|
||||
do_softirq_own_stack();
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
Loading…
Reference in New Issue
Block a user