mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
[SPARC32]: Take enable_irq/disable_irq out of line.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
32231a66b4
commit
0f516813ce
@ -270,7 +270,7 @@ void free_irq(unsigned int irq, void *dev_id)
|
||||
kfree(action);
|
||||
|
||||
if (!sparc_irq[cpu_irq].action)
|
||||
disable_irq(irq);
|
||||
__disable_irq(irq);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&irq_action_lock, flags);
|
||||
@ -466,7 +466,7 @@ int request_fast_irq(unsigned int irq,
|
||||
|
||||
sparc_irq[cpu_irq].action = action;
|
||||
|
||||
enable_irq(irq);
|
||||
__enable_irq(irq);
|
||||
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
@ -546,7 +546,7 @@ int request_irq(unsigned int irq,
|
||||
|
||||
*actionp = action;
|
||||
|
||||
enable_irq(irq);
|
||||
__enable_irq(irq);
|
||||
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
@ -557,6 +557,25 @@ out:
|
||||
|
||||
EXPORT_SYMBOL(request_irq);
|
||||
|
||||
void disable_irq_nosync(unsigned int irq)
|
||||
{
|
||||
return __disable_irq(irq);
|
||||
}
|
||||
EXPORT_SYMBOL(disable_irq_nosync);
|
||||
|
||||
void disable_irq(unsigned int irq)
|
||||
{
|
||||
return __disable_irq(irq);
|
||||
}
|
||||
EXPORT_SYMBOL(disable_irq);
|
||||
|
||||
void enable_irq(unsigned int irq)
|
||||
{
|
||||
return __enable_irq(irq);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(enable_irq);
|
||||
|
||||
/* We really don't need these at all on the Sparc. We only have
|
||||
* stubs here because they are exported to modules.
|
||||
*/
|
||||
|
@ -1,11 +1,31 @@
|
||||
#include <asm/btfixup.h>
|
||||
|
||||
/* Dave Redman (djhr@tadpole.co.uk)
|
||||
* changed these to function pointers.. it saves cycles and will allow
|
||||
* the irq dependencies to be split into different files at a later date
|
||||
* sun4c_irq.c, sun4m_irq.c etc so we could reduce the kernel size.
|
||||
* Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
||||
* Changed these to btfixup entities... It saves cycles :)
|
||||
*/
|
||||
|
||||
BTFIXUPDEF_CALL(void, disable_irq, unsigned int)
|
||||
BTFIXUPDEF_CALL(void, enable_irq, unsigned int)
|
||||
BTFIXUPDEF_CALL(void, disable_pil_irq, unsigned int)
|
||||
BTFIXUPDEF_CALL(void, enable_pil_irq, unsigned int)
|
||||
BTFIXUPDEF_CALL(void, clear_clock_irq, void)
|
||||
BTFIXUPDEF_CALL(void, clear_profile_irq, int)
|
||||
BTFIXUPDEF_CALL(void, load_profile_irq, int, unsigned int)
|
||||
|
||||
static inline void __disable_irq(unsigned int irq)
|
||||
{
|
||||
BTFIXUP_CALL(disable_irq)(irq);
|
||||
}
|
||||
|
||||
static inline void __enable_irq(unsigned int irq)
|
||||
{
|
||||
BTFIXUP_CALL(enable_irq)(irq);
|
||||
}
|
||||
|
||||
static inline void disable_pil_irq(unsigned int irq)
|
||||
{
|
||||
BTFIXUP_CALL(disable_pil_irq)(irq);
|
||||
|
@ -154,8 +154,6 @@ EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32));
|
||||
#else
|
||||
EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id));
|
||||
#endif
|
||||
EXPORT_SYMBOL(BTFIXUP_CALL(enable_irq));
|
||||
EXPORT_SYMBOL(BTFIXUP_CALL(disable_irq));
|
||||
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_unlockarea));
|
||||
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_lockarea));
|
||||
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_sgl));
|
||||
|
@ -190,7 +190,7 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
|
||||
kfree(action);
|
||||
|
||||
if (!(*actionp))
|
||||
disable_irq(irq);
|
||||
__disable_irq(irq);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&irq_action_lock, flags);
|
||||
@ -348,7 +348,7 @@ int sun4d_request_irq(unsigned int irq,
|
||||
else
|
||||
*actionp = action;
|
||||
|
||||
enable_irq(irq);
|
||||
__enable_irq(irq);
|
||||
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
|
@ -64,7 +64,7 @@ void claim_ticker14(irq_handler_t handler,
|
||||
|
||||
/* first we copy the obp handler instructions
|
||||
*/
|
||||
disable_irq(irq_nr);
|
||||
__disable_irq(irq_nr);
|
||||
if (!handler)
|
||||
return;
|
||||
|
||||
@ -81,6 +81,6 @@ void claim_ticker14(irq_handler_t handler,
|
||||
NULL)) {
|
||||
install_linux_ticker();
|
||||
load_profile_irq(cpu, timeout);
|
||||
enable_irq(irq_nr);
|
||||
__enable_irq(irq_nr);
|
||||
}
|
||||
}
|
||||
|
@ -7,41 +7,15 @@
|
||||
#ifndef _SPARC_IRQ_H
|
||||
#define _SPARC_IRQ_H
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/threads.h> /* For NR_CPUS */
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <asm/system.h> /* For SUN4M_NCPUS */
|
||||
#include <asm/btfixup.h>
|
||||
|
||||
#define NR_IRQS 16
|
||||
|
||||
#define irq_canonicalize(irq) (irq)
|
||||
|
||||
/* Dave Redman (djhr@tadpole.co.uk)
|
||||
* changed these to function pointers.. it saves cycles and will allow
|
||||
* the irq dependencies to be split into different files at a later date
|
||||
* sun4c_irq.c, sun4m_irq.c etc so we could reduce the kernel size.
|
||||
* Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
||||
* Changed these to btfixup entities... It saves cycles :)
|
||||
*/
|
||||
BTFIXUPDEF_CALL(void, disable_irq, unsigned int)
|
||||
BTFIXUPDEF_CALL(void, enable_irq, unsigned int)
|
||||
|
||||
static inline void disable_irq_nosync(unsigned int irq)
|
||||
{
|
||||
BTFIXUP_CALL(disable_irq)(irq);
|
||||
}
|
||||
|
||||
static inline void disable_irq(unsigned int irq)
|
||||
{
|
||||
BTFIXUP_CALL(disable_irq)(irq);
|
||||
}
|
||||
|
||||
static inline void enable_irq(unsigned int irq)
|
||||
{
|
||||
BTFIXUP_CALL(enable_irq)(irq);
|
||||
}
|
||||
extern void disable_irq_nosync(unsigned int irq);
|
||||
extern void disable_irq(unsigned int irq);
|
||||
extern void enable_irq(unsigned int irq);
|
||||
|
||||
extern int request_fast_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, __const__ char *devname);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user