2011-02-09 12:16:52 +00:00
|
|
|
/*
|
|
|
|
* Internal header to deal with irq_desc->status which will be renamed
|
|
|
|
* to irq_desc->settings.
|
|
|
|
*/
|
|
|
|
enum {
|
|
|
|
_IRQ_DEFAULT_INIT_FLAGS = IRQ_DEFAULT_INIT_FLAGS,
|
2011-02-08 16:11:03 +00:00
|
|
|
_IRQ_PER_CPU = IRQ_PER_CPU,
|
2011-02-08 16:28:12 +00:00
|
|
|
_IRQ_LEVEL = IRQ_LEVEL,
|
2011-02-09 13:44:17 +00:00
|
|
|
_IRQ_NOPROBE = IRQ_NOPROBE,
|
|
|
|
_IRQ_NOREQUEST = IRQ_NOREQUEST,
|
2011-04-06 21:01:44 +00:00
|
|
|
_IRQ_NOTHREAD = IRQ_NOTHREAD,
|
2011-02-09 13:44:17 +00:00
|
|
|
_IRQ_NOAUTOEN = IRQ_NOAUTOEN,
|
|
|
|
_IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT,
|
2011-02-08 16:11:03 +00:00
|
|
|
_IRQ_NO_BALANCING = IRQ_NO_BALANCING,
|
2011-02-09 13:44:17 +00:00
|
|
|
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
|
genirq: Add support for per-cpu dev_id interrupts
The ARM GIC interrupt controller offers per CPU interrupts (PPIs),
which are usually used to connect local timers to each core. Each CPU
has its own private interface to the GIC, and only sees the PPIs that
are directly connect to it.
While these timers are separate devices and have a separate interrupt
line to a core, they all use the same IRQ number.
For these devices, request_irq() is not the right API as it assumes
that an IRQ number is visible by a number of CPUs (through the
affinity setting), but makes it very awkward to express that an IRQ
number can be handled by all CPUs, and yet be a different interrupt
line on each CPU, requiring a different dev_id cookie to be passed
back to the handler.
The *_percpu_irq() functions is designed to overcome these
limitations, by providing a per-cpu dev_id vector:
int request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id);
void free_percpu_irq(unsigned int, void __percpu *);
int setup_percpu_irq(unsigned int irq, struct irqaction *new);
void remove_percpu_irq(unsigned int irq, struct irqaction *act);
void enable_percpu_irq(unsigned int irq);
void disable_percpu_irq(unsigned int irq);
The API has a number of limitations:
- no interrupt sharing
- no threading
- common handler across all the CPUs
Once the interrupt is requested using setup_percpu_irq() or
request_percpu_irq(), it must be enabled by each core that wishes its
local interrupt to be delivered.
Based on an initial patch by Thomas Gleixner.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/1316793788-14500-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2011-09-23 16:03:06 +00:00
|
|
|
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
|
2013-11-06 11:30:07 +00:00
|
|
|
_IRQ_IS_POLLED = IRQ_IS_POLLED,
|
2015-10-09 21:28:58 +00:00
|
|
|
_IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
|
2011-02-08 16:11:03 +00:00
|
|
|
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
|
2011-02-09 12:16:52 +00:00
|
|
|
};
|
2011-02-07 20:48:49 +00:00
|
|
|
|
2011-02-08 16:11:03 +00:00
|
|
|
#define IRQ_PER_CPU GOT_YOU_MORON
|
|
|
|
#define IRQ_NO_BALANCING GOT_YOU_MORON
|
2011-02-08 16:28:12 +00:00
|
|
|
#define IRQ_LEVEL GOT_YOU_MORON
|
2011-02-09 13:44:17 +00:00
|
|
|
#define IRQ_NOPROBE GOT_YOU_MORON
|
|
|
|
#define IRQ_NOREQUEST GOT_YOU_MORON
|
2011-04-06 21:01:44 +00:00
|
|
|
#define IRQ_NOTHREAD GOT_YOU_MORON
|
2011-02-09 13:44:17 +00:00
|
|
|
#define IRQ_NOAUTOEN GOT_YOU_MORON
|
|
|
|
#define IRQ_NESTED_THREAD GOT_YOU_MORON
|
genirq: Add support for per-cpu dev_id interrupts
The ARM GIC interrupt controller offers per CPU interrupts (PPIs),
which are usually used to connect local timers to each core. Each CPU
has its own private interface to the GIC, and only sees the PPIs that
are directly connect to it.
While these timers are separate devices and have a separate interrupt
line to a core, they all use the same IRQ number.
For these devices, request_irq() is not the right API as it assumes
that an IRQ number is visible by a number of CPUs (through the
affinity setting), but makes it very awkward to express that an IRQ
number can be handled by all CPUs, and yet be a different interrupt
line on each CPU, requiring a different dev_id cookie to be passed
back to the handler.
The *_percpu_irq() functions is designed to overcome these
limitations, by providing a per-cpu dev_id vector:
int request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id);
void free_percpu_irq(unsigned int, void __percpu *);
int setup_percpu_irq(unsigned int irq, struct irqaction *new);
void remove_percpu_irq(unsigned int irq, struct irqaction *act);
void enable_percpu_irq(unsigned int irq);
void disable_percpu_irq(unsigned int irq);
The API has a number of limitations:
- no interrupt sharing
- no threading
- common handler across all the CPUs
Once the interrupt is requested using setup_percpu_irq() or
request_percpu_irq(), it must be enabled by each core that wishes its
local interrupt to be delivered.
Based on an initial patch by Thomas Gleixner.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/1316793788-14500-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2011-09-23 16:03:06 +00:00
|
|
|
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
|
2013-11-06 11:30:07 +00:00
|
|
|
#define IRQ_IS_POLLED GOT_YOU_MORON
|
2015-10-09 21:28:58 +00:00
|
|
|
#define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
|
2011-02-08 16:11:03 +00:00
|
|
|
#undef IRQF_MODIFY_MASK
|
|
|
|
#define IRQF_MODIFY_MASK GOT_YOU_MORON
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
desc->status_use_accessors &= ~(clr & _IRQF_MODIFY_MASK);
|
|
|
|
desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
|
2011-02-08 16:11:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
return desc->status_use_accessors & _IRQ_PER_CPU;
|
2011-02-08 16:11:03 +00:00
|
|
|
}
|
|
|
|
|
genirq: Add support for per-cpu dev_id interrupts
The ARM GIC interrupt controller offers per CPU interrupts (PPIs),
which are usually used to connect local timers to each core. Each CPU
has its own private interface to the GIC, and only sees the PPIs that
are directly connect to it.
While these timers are separate devices and have a separate interrupt
line to a core, they all use the same IRQ number.
For these devices, request_irq() is not the right API as it assumes
that an IRQ number is visible by a number of CPUs (through the
affinity setting), but makes it very awkward to express that an IRQ
number can be handled by all CPUs, and yet be a different interrupt
line on each CPU, requiring a different dev_id cookie to be passed
back to the handler.
The *_percpu_irq() functions is designed to overcome these
limitations, by providing a per-cpu dev_id vector:
int request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id);
void free_percpu_irq(unsigned int, void __percpu *);
int setup_percpu_irq(unsigned int irq, struct irqaction *new);
void remove_percpu_irq(unsigned int irq, struct irqaction *act);
void enable_percpu_irq(unsigned int irq);
void disable_percpu_irq(unsigned int irq);
The API has a number of limitations:
- no interrupt sharing
- no threading
- common handler across all the CPUs
Once the interrupt is requested using setup_percpu_irq() or
request_percpu_irq(), it must be enabled by each core that wishes its
local interrupt to be delivered.
Based on an initial patch by Thomas Gleixner.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/1316793788-14500-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2011-09-23 16:03:06 +00:00
|
|
|
static inline bool irq_settings_is_per_cpu_devid(struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
return desc->status_use_accessors & _IRQ_PER_CPU_DEVID;
|
|
|
|
}
|
|
|
|
|
2011-02-08 16:11:03 +00:00
|
|
|
static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
desc->status_use_accessors |= _IRQ_PER_CPU;
|
2011-02-08 16:11:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void irq_settings_set_no_balancing(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
desc->status_use_accessors |= _IRQ_NO_BALANCING;
|
2011-02-08 16:11:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
return desc->status_use_accessors & _IRQ_NO_BALANCING;
|
2011-02-08 16:11:03 +00:00
|
|
|
}
|
2011-02-08 16:28:12 +00:00
|
|
|
|
|
|
|
static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
return desc->status_use_accessors & IRQ_TYPE_SENSE_MASK;
|
2011-02-08 16:28:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
desc->status_use_accessors &= ~IRQ_TYPE_SENSE_MASK;
|
|
|
|
desc->status_use_accessors |= mask & IRQ_TYPE_SENSE_MASK;
|
2011-02-08 16:28:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool irq_settings_is_level(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
return desc->status_use_accessors & _IRQ_LEVEL;
|
2011-02-08 16:28:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void irq_settings_clr_level(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
desc->status_use_accessors &= ~_IRQ_LEVEL;
|
2011-02-08 16:28:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void irq_settings_set_level(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
desc->status_use_accessors |= _IRQ_LEVEL;
|
2011-02-08 16:28:12 +00:00
|
|
|
}
|
2011-02-09 13:44:17 +00:00
|
|
|
|
|
|
|
static inline bool irq_settings_can_request(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
return !(desc->status_use_accessors & _IRQ_NOREQUEST);
|
2011-02-09 13:44:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void irq_settings_clr_norequest(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
desc->status_use_accessors &= ~_IRQ_NOREQUEST;
|
2011-02-09 13:44:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void irq_settings_set_norequest(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
desc->status_use_accessors |= _IRQ_NOREQUEST;
|
2011-02-09 13:44:17 +00:00
|
|
|
}
|
|
|
|
|
2011-04-06 21:01:44 +00:00
|
|
|
static inline bool irq_settings_can_thread(struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
return !(desc->status_use_accessors & _IRQ_NOTHREAD);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void irq_settings_clr_nothread(struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
desc->status_use_accessors &= ~_IRQ_NOTHREAD;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void irq_settings_set_nothread(struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
desc->status_use_accessors |= _IRQ_NOTHREAD;
|
|
|
|
}
|
|
|
|
|
2011-02-09 13:44:17 +00:00
|
|
|
static inline bool irq_settings_can_probe(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
return !(desc->status_use_accessors & _IRQ_NOPROBE);
|
2011-02-09 13:44:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void irq_settings_clr_noprobe(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
desc->status_use_accessors &= ~_IRQ_NOPROBE;
|
2011-02-09 13:44:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void irq_settings_set_noprobe(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
desc->status_use_accessors |= _IRQ_NOPROBE;
|
2011-02-09 13:44:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
return desc->status_use_accessors & _IRQ_MOVE_PCNTXT;
|
2011-02-09 13:44:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool irq_settings_can_autoenable(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
return !(desc->status_use_accessors & _IRQ_NOAUTOEN);
|
2011-02-09 13:44:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool irq_settings_is_nested_thread(struct irq_desc *desc)
|
|
|
|
{
|
2011-03-28 11:32:20 +00:00
|
|
|
return desc->status_use_accessors & _IRQ_NESTED_THREAD;
|
2011-02-09 13:44:17 +00:00
|
|
|
}
|
2013-11-06 11:30:07 +00:00
|
|
|
|
|
|
|
static inline bool irq_settings_is_polled(struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
return desc->status_use_accessors & _IRQ_IS_POLLED;
|
|
|
|
}
|
2015-10-09 21:28:58 +00:00
|
|
|
|
|
|
|
static inline bool irq_settings_disable_unlazy(struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
return desc->status_use_accessors & _IRQ_DISABLE_UNLAZY;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void irq_settings_clr_disable_unlazy(struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
desc->status_use_accessors &= ~_IRQ_DISABLE_UNLAZY;
|
|
|
|
}
|