mirror of
https://github.com/torvalds/linux.git
synced 2024-11-13 23:51:39 +00:00
670310dfba
Pull irq core updates from Thomas Gleixner: "A rather large update for the interrupt core code and the irq chip drivers: - Add a new bitmap matrix allocator and supporting changes, which is used to replace the x86 vector allocator which comes with separate pull request. This allows to replace the convoluted nested loop allocation function in x86 with a facility which supports the recently added property of managed interrupts proper and allows to switch to a best effort vector reservation scheme, which addresses problems with vector exhaustion. - A large update to the ARM GIC-V3-ITS driver adding support for range selectors. - New interrupt controllers: - Meson and Meson8 GPIO - BCM7271 L2 - Socionext EXIU If you expected that this will stop at some point, I have to disappoint you. There are new ones posted already. Sigh! - STM32 interrupt controller support for new platforms. - A pile of fixes, cleanups and updates to the MIPS GIC driver - The usual small fixes, cleanups and updates all over the place. Most visible one is to move the irq chip drivers Kconfig switches into a separate Kconfig menu" * 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) genirq: Fix type of shifting literal 1 in __setup_irq() irqdomain: Drop pointless NULL check in virq_debug_show_one genirq/proc: Return proper error code when irq_set_affinity() fails irq/work: Use llist_for_each_entry_safe irqchip: mips-gic: Print warning if inherited GIC base is used irqchip/mips-gic: Add pr_fmt and reword pr_* messages irqchip/stm32: Move the wakeup on interrupt mask irqchip/stm32: Fix initial values irqchip/stm32: Add stm32h7 support dt-bindings/interrupt-controllers: Add compatible string for stm32h7 irqchip/stm32: Add multi-bank management irqchip/stm32: Select GENERIC_IRQ_CHIP irqchip/exiu: Add support for Socionext Synquacer EXIU controller dt-bindings: Add description of Socionext EXIU interrupt controller irqchip/gic-v3-its: Fix VPE activate callback return value irqchip: mips-gic: Make IPI bitmaps static irqchip: mips-gic: Share register writes in gic_set_type() irqchip: mips-gic: Remove gic_vpes variable irqchip: mips-gic: Use num_possible_cpus() to reserve IPIs irqchip: mips-gic: Configure EIC when CPUs come online ...
196 lines
4.4 KiB
C
196 lines
4.4 KiB
C
/*
|
|
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
|
|
*
|
|
* Provides a framework for enqueueing and running callbacks from hardirq
|
|
* context. The enqueueing is NMI-safe.
|
|
*/
|
|
|
|
#include <linux/bug.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/export.h>
|
|
#include <linux/irq_work.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/hardirq.h>
|
|
#include <linux/irqflags.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/tick.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/smp.h>
|
|
#include <asm/processor.h>
|
|
|
|
|
|
static DEFINE_PER_CPU(struct llist_head, raised_list);
|
|
static DEFINE_PER_CPU(struct llist_head, lazy_list);
|
|
|
|
/*
|
|
* Claim the entry so that no one else will poke at it.
|
|
*/
|
|
static bool irq_work_claim(struct irq_work *work)
|
|
{
|
|
unsigned long flags, oflags, nflags;
|
|
|
|
/*
|
|
* Start with our best wish as a premise but only trust any
|
|
* flag value after cmpxchg() result.
|
|
*/
|
|
flags = work->flags & ~IRQ_WORK_PENDING;
|
|
for (;;) {
|
|
nflags = flags | IRQ_WORK_FLAGS;
|
|
oflags = cmpxchg(&work->flags, flags, nflags);
|
|
if (oflags == flags)
|
|
break;
|
|
if (oflags & IRQ_WORK_PENDING)
|
|
return false;
|
|
flags = oflags;
|
|
cpu_relax();
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
void __weak arch_irq_work_raise(void)
|
|
{
|
|
/*
|
|
* Lame architectures will get the timer tick callback
|
|
*/
|
|
}
|
|
|
|
/*
|
|
* Enqueue the irq_work @work on @cpu unless it's already pending
|
|
* somewhere.
|
|
*
|
|
* Can be re-enqueued while the callback is still in progress.
|
|
*/
|
|
bool irq_work_queue_on(struct irq_work *work, int cpu)
|
|
{
|
|
/* All work should have been flushed before going offline */
|
|
WARN_ON_ONCE(cpu_is_offline(cpu));
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/* Arch remote IPI send/receive backend aren't NMI safe */
|
|
WARN_ON_ONCE(in_nmi());
|
|
|
|
/* Only queue if not already pending */
|
|
if (!irq_work_claim(work))
|
|
return false;
|
|
|
|
if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
|
|
arch_send_call_function_single_ipi(cpu);
|
|
|
|
#else /* #ifdef CONFIG_SMP */
|
|
irq_work_queue(work);
|
|
#endif /* #else #ifdef CONFIG_SMP */
|
|
|
|
return true;
|
|
}
|
|
|
|
/* Enqueue the irq work @work on the current CPU */
|
|
bool irq_work_queue(struct irq_work *work)
|
|
{
|
|
/* Only queue if not already pending */
|
|
if (!irq_work_claim(work))
|
|
return false;
|
|
|
|
/* Queue the entry and raise the IPI if needed. */
|
|
preempt_disable();
|
|
|
|
/* If the work is "lazy", handle it from next tick if any */
|
|
if (work->flags & IRQ_WORK_LAZY) {
|
|
if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
|
|
tick_nohz_tick_stopped())
|
|
arch_irq_work_raise();
|
|
} else {
|
|
if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
|
|
arch_irq_work_raise();
|
|
}
|
|
|
|
preempt_enable();
|
|
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL_GPL(irq_work_queue);
|
|
|
|
bool irq_work_needs_cpu(void)
|
|
{
|
|
struct llist_head *raised, *lazy;
|
|
|
|
raised = this_cpu_ptr(&raised_list);
|
|
lazy = this_cpu_ptr(&lazy_list);
|
|
|
|
if (llist_empty(raised) || arch_irq_work_has_interrupt())
|
|
if (llist_empty(lazy))
|
|
return false;
|
|
|
|
/* All work should have been flushed before going offline */
|
|
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
|
|
|
|
return true;
|
|
}
|
|
|
|
static void irq_work_run_list(struct llist_head *list)
|
|
{
|
|
struct irq_work *work, *tmp;
|
|
struct llist_node *llnode;
|
|
unsigned long flags;
|
|
|
|
BUG_ON(!irqs_disabled());
|
|
|
|
if (llist_empty(list))
|
|
return;
|
|
|
|
llnode = llist_del_all(list);
|
|
llist_for_each_entry_safe(work, tmp, llnode, llnode) {
|
|
/*
|
|
* Clear the PENDING bit, after this point the @work
|
|
* can be re-used.
|
|
* Make it immediately visible so that other CPUs trying
|
|
* to claim that work don't rely on us to handle their data
|
|
* while we are in the middle of the func.
|
|
*/
|
|
flags = work->flags & ~IRQ_WORK_PENDING;
|
|
xchg(&work->flags, flags);
|
|
|
|
work->func(work);
|
|
/*
|
|
* Clear the BUSY bit and return to the free state if
|
|
* no-one else claimed it meanwhile.
|
|
*/
|
|
(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* hotplug calls this through:
|
|
* hotplug_cfd() -> flush_smp_call_function_queue()
|
|
*/
|
|
void irq_work_run(void)
|
|
{
|
|
irq_work_run_list(this_cpu_ptr(&raised_list));
|
|
irq_work_run_list(this_cpu_ptr(&lazy_list));
|
|
}
|
|
EXPORT_SYMBOL_GPL(irq_work_run);
|
|
|
|
void irq_work_tick(void)
|
|
{
|
|
struct llist_head *raised = this_cpu_ptr(&raised_list);
|
|
|
|
if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
|
|
irq_work_run_list(raised);
|
|
irq_work_run_list(this_cpu_ptr(&lazy_list));
|
|
}
|
|
|
|
/*
|
|
* Synchronize against the irq_work @entry, ensures the entry is not
|
|
* currently in use.
|
|
*/
|
|
void irq_work_sync(struct irq_work *work)
|
|
{
|
|
lockdep_assert_irqs_enabled();
|
|
|
|
while (work->flags & IRQ_WORK_BUSY)
|
|
cpu_relax();
|
|
}
|
|
EXPORT_SYMBOL_GPL(irq_work_sync);
|