2019-05-27 08:55:01 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2005-04-16 15:20:36 -07:00
|
|
|
#ifdef __KERNEL__
|
2005-10-10 22:54:57 +10:00
|
|
|
#ifndef _ASM_POWERPC_IRQ_H
|
|
|
|
|
#define _ASM_POWERPC_IRQ_H
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
*/
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2012-02-14 14:06:50 -07:00
|
|
|
#include <linux/irqdomain.h>
|
2005-10-10 22:54:57 +10:00
|
|
|
#include <linux/threads.h>
|
2006-07-03 21:36:01 +10:00
|
|
|
#include <linux/list.h>
|
|
|
|
|
#include <linux/radix-tree.h>
|
2005-10-10 22:54:57 +10:00
|
|
|
|
|
|
|
|
#include <asm/types.h>
|
2011-07-26 16:09:06 -07:00
|
|
|
#include <linux/atomic.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
|
|
2006-07-03 21:36:01 +10:00
|
|
|
extern atomic_t ppc_n_lost_interrupts;
|
2005-10-10 22:54:57 +10:00
|
|
|
|
2006-07-03 21:36:01 +10:00
|
|
|
/* This number is used when no interrupt has been assigned */
|
|
|
|
|
#define NO_IRQ (0)
|
|
|
|
|
|
2009-10-13 19:44:44 +00:00
|
|
|
/* Total number of virq in the platform */
|
|
|
|
|
#define NR_IRQS CONFIG_NR_IRQS
|
2005-10-10 22:54:57 +10:00
|
|
|
|
2009-10-13 19:45:03 +00:00
|
|
|
/* Same thing, used by the generic IRQ code */
|
|
|
|
|
#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
|
|
|
|
|
|
2007-06-04 14:47:04 +10:00
|
|
|
extern irq_hw_number_t virq_to_hw(unsigned int virq);
|
2011-04-04 13:46:58 +10:00
|
|
|
|
2006-07-03 21:36:01 +10:00
|
|
|
static __inline__ int irq_canonicalize(int irq)
|
2005-10-10 22:54:57 +10:00
|
|
|
{
|
2006-07-03 21:36:01 +10:00
|
|
|
return irq;
|
2005-10-10 22:54:57 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern int distribute_irqs;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2005-10-10 22:54:57 +10:00
|
|
|
struct pt_regs;
|
|
|
|
|
|
2008-04-30 03:49:55 -05:00
|
|
|
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
|
|
|
|
|
/*
|
|
|
|
|
* Per-cpu stacks for handling critical, debug and machine check
|
|
|
|
|
* level interrupts.
|
|
|
|
|
*/
|
2019-01-31 10:09:00 +00:00
|
|
|
extern void *critirq_ctx[NR_CPUS];
|
|
|
|
|
extern void *dbgirq_ctx[NR_CPUS];
|
|
|
|
|
extern void *mcheckirq_ctx[NR_CPUS];
|
2008-04-30 03:49:55 -05:00
|
|
|
#endif
|
|
|
|
|
|
2005-10-10 22:54:57 +10:00
|
|
|
/*
|
|
|
|
|
* Per-cpu stacks for handling hard and soft interrupts.
|
|
|
|
|
*/
|
2019-01-31 10:09:00 +00:00
|
|
|
extern void *hardirq_ctx[NR_CPUS];
|
|
|
|
|
extern void *softirq_ctx[NR_CPUS];
|
2005-10-10 22:54:57 +10:00
|
|
|
|
2005-10-20 09:23:26 +10:00
|
|
|
extern void do_IRQ(struct pt_regs *regs);
|
2018-02-25 18:22:31 +01:00
|
|
|
extern void __init init_IRQ(void);
|
2013-09-23 14:29:11 +10:00
|
|
|
extern void __do_irq(struct pt_regs *regs);
|
2005-10-20 09:23:26 +10:00
|
|
|
|
2011-05-19 08:54:26 -05:00
|
|
|
int irq_choose_cpu(const struct cpumask *mask);
|
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
#endif /* _ASM_IRQ_H */
|
|
|
|
|
#endif /* __KERNEL__ */
|