forked from Minki/linux
powerpc/irq: Replace #ifdefs by IS_ENABLED()
Replace #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG and #ifdef CONFIG_PERF_EVENTS by IS_ENABLED() in hw_irq.h and plpar_wrappers.h Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/c1ded642f8d9002767f8fed48ed6d1e76254ed73.1652862729.git.christophe.leroy@csgroup.eu
This commit is contained in:
parent
ef5b570d37
commit
78ffe6a7e2
|
@ -123,7 +123,6 @@ static inline notrace unsigned long irq_soft_mask_return(void)
|
|||
*/
|
||||
static inline notrace void irq_soft_mask_set(unsigned long mask)
|
||||
{
|
||||
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
|
||||
/*
|
||||
* The irq mask must always include the STD bit if any are set.
|
||||
*
|
||||
|
@ -138,8 +137,8 @@ static inline notrace void irq_soft_mask_set(unsigned long mask)
|
|||
* unmasks to be replayed, among other things. For now, take
|
||||
* the simple approach.
|
||||
*/
|
||||
WARN_ON(mask && !(mask & IRQS_DISABLED));
|
||||
#endif
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
||||
WARN_ON(mask && !(mask & IRQS_DISABLED));
|
||||
|
||||
WRITE_ONCE(local_paca->irq_soft_mask, mask);
|
||||
barrier();
|
||||
|
@ -324,11 +323,13 @@ bool power_pmu_wants_prompt_pmi(void);
|
|||
*/
|
||||
static inline bool should_hard_irq_enable(void)
|
||||
{
|
||||
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
|
||||
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
|
||||
WARN_ON(mfmsr() & MSR_EE);
|
||||
#endif
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
|
||||
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
|
||||
WARN_ON(mfmsr() & MSR_EE);
|
||||
}
|
||||
|
||||
if (!IS_ENABLED(CONFIG_PERF_EVENTS))
|
||||
return false;
|
||||
/*
|
||||
* If the PMU is not running, there is not much reason to enable
|
||||
* MSR[EE] in irq handlers because any interrupts would just be
|
||||
|
@ -343,9 +344,6 @@ static inline bool should_hard_irq_enable(void)
|
|||
return false;
|
||||
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -353,11 +351,11 @@ static inline bool should_hard_irq_enable(void)
|
|||
*/
|
||||
static inline void do_hard_irq_enable(void)
|
||||
{
|
||||
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
|
||||
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
|
||||
WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
|
||||
WARN_ON(mfmsr() & MSR_EE);
|
||||
#endif
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
|
||||
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
|
||||
WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
|
||||
WARN_ON(mfmsr() & MSR_EE);
|
||||
}
|
||||
/*
|
||||
* This allows PMI interrupts (and watchdog soft-NMIs) through.
|
||||
* There is no other reason to enable this way.
|
||||
|
|
|
@ -43,11 +43,10 @@ static inline long extended_cede_processor(unsigned long latency_hint)
|
|||
set_cede_latency_hint(latency_hint);
|
||||
|
||||
rc = cede_processor();
|
||||
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
|
||||
|
||||
/* Ensure that H_CEDE returns with IRQs on */
|
||||
if (WARN_ON(!(mfmsr() & MSR_EE)))
|
||||
if (WARN_ON(IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && !(mfmsr() & MSR_EE)))
|
||||
__hard_irq_enable();
|
||||
#endif
|
||||
|
||||
set_cede_latency_hint(old_latency_hint);
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user