xtensa: clean up WSR*/RSR*/get_sr/set_sr

WSR and RSR are too generic and collide with other macro definitions in
the kernel causing warnings in allmodconfig builds. Drop WSR and RSR
macros and WSR_* and RSR_* variants. Change get_sr and set_sr to
xtensa_get_sr and xtensa_set_sr. Fix up users.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
This commit is contained in:
Max Filippov 2018-11-27 16:27:47 -08:00
parent c066cc8af9
commit cad6fade6e
11 changed files with 50 additions and 69 deletions

View File

@ -12,7 +12,6 @@
#ifndef _XTENSA_COPROCESSOR_H
#define _XTENSA_COPROCESSOR_H
#include <linux/stringify.h>
#include <variant/core.h>
#include <variant/tie.h>
#include <asm/types.h>
@ -90,19 +89,6 @@
#ifndef __ASSEMBLY__
#if XCHAL_HAVE_CP
#define RSR_CPENABLE(x) do { \
__asm__ __volatile__("rsr %0, cpenable" : "=a" (x)); \
} while(0);
#define WSR_CPENABLE(x) do { \
__asm__ __volatile__("wsr %0, cpenable; rsync" :: "a" (x)); \
} while(0);
#endif /* XCHAL_HAVE_CP */
/*
* Additional registers.
* We define three types of additional registers:
@ -162,12 +148,6 @@ extern void coprocessor_flush(struct thread_info*, int);
extern void coprocessor_release_all(struct thread_info*);
extern void coprocessor_flush_all(struct thread_info*);
static inline void coprocessor_clear_cpenable(void)
{
unsigned long i = 0;
WSR_CPENABLE(i);
}
#endif /* XTENSA_HAVE_COPROCESSORS */
#endif /* !__ASSEMBLY__ */

View File

@ -12,6 +12,7 @@
#ifndef _XTENSA_IRQFLAGS_H
#define _XTENSA_IRQFLAGS_H
#include <linux/stringify.h>
#include <linux/types.h>
#include <asm/processor.h>

View File

@ -13,6 +13,7 @@
#include <variant/core.h>
#include <linux/compiler.h>
#include <linux/stringify.h>
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/regs.h>
@ -212,11 +213,18 @@ extern unsigned long get_wchan(struct task_struct *p);
/* Special register access. */
#define WSR(v,sr) __asm__ __volatile__ ("wsr %0,"__stringify(sr) :: "a"(v));
#define RSR(v,sr) __asm__ __volatile__ ("rsr %0,"__stringify(sr) : "=a"(v));
#define xtensa_set_sr(x, sr) \
({ \
unsigned int v = (unsigned int)(x); \
__asm__ __volatile__ ("wsr %0, "__stringify(sr) :: "a"(v)); \
})
#define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);})
#define get_sr(sr) ({unsigned int v; RSR(v,sr); v; })
#define xtensa_get_sr(sr) \
({ \
unsigned int v; \
__asm__ __volatile__ ("rsr %0, "__stringify(sr) : "=a"(v)); \
v; \
})
#ifndef XCHAL_HAVE_EXTERN_REGS
#define XCHAL_HAVE_EXTERN_REGS 0

View File

@ -11,6 +11,7 @@
#ifndef _XTENSA_THREAD_INFO_H
#define _XTENSA_THREAD_INFO_H
#include <linux/stringify.h>
#include <asm/kmem_layout.h>
#define CURRENT_SHIFT KERNEL_STACK_SHIFT

View File

@ -10,7 +10,6 @@
#define _XTENSA_TIMEX_H
#include <asm/processor.h>
#include <linux/stringify.h>
#if XCHAL_NUM_TIMERS > 0 && \
XTENSA_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
@ -40,33 +39,24 @@ void local_timer_setup(unsigned cpu);
* Register access.
*/
#define WSR_CCOUNT(r) asm volatile ("wsr %0, ccount" :: "a" (r))
#define RSR_CCOUNT(r) asm volatile ("rsr %0, ccount" : "=a" (r))
#define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) :: "a"(r))
#define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) : "=a"(r))
static inline unsigned long get_ccount (void)
{
unsigned long ccount;
RSR_CCOUNT(ccount);
return ccount;
return xtensa_get_sr(ccount);
}
static inline void set_ccount (unsigned long ccount)
{
WSR_CCOUNT(ccount);
xtensa_set_sr(ccount, ccount);
}
static inline unsigned long get_linux_timer (void)
{
unsigned ccompare;
RSR_CCOMPARE(LINUX_TIMER, ccompare);
return ccompare;
return xtensa_get_sr(SREG_CCOMPARE + LINUX_TIMER);
}
static inline void set_linux_timer (unsigned long ccompare)
{
WSR_CCOMPARE(LINUX_TIMER, ccompare);
xtensa_set_sr(ccompare, SREG_CCOMPARE + LINUX_TIMER);
}
#endif /* _XTENSA_TIMEX_H */

View File

@ -101,30 +101,30 @@ static void xtensa_wsr(unsigned long v, u8 sr)
switch (sr) {
#if XCHAL_NUM_IBREAK > 0
case SREG_IBREAKA + 0:
WSR(v, SREG_IBREAKA + 0);
xtensa_set_sr(v, SREG_IBREAKA + 0);
break;
#endif
#if XCHAL_NUM_IBREAK > 1
case SREG_IBREAKA + 1:
WSR(v, SREG_IBREAKA + 1);
xtensa_set_sr(v, SREG_IBREAKA + 1);
break;
#endif
#if XCHAL_NUM_DBREAK > 0
case SREG_DBREAKA + 0:
WSR(v, SREG_DBREAKA + 0);
xtensa_set_sr(v, SREG_DBREAKA + 0);
break;
case SREG_DBREAKC + 0:
WSR(v, SREG_DBREAKC + 0);
xtensa_set_sr(v, SREG_DBREAKC + 0);
break;
#endif
#if XCHAL_NUM_DBREAK > 1
case SREG_DBREAKA + 1:
WSR(v, SREG_DBREAKA + 1);
xtensa_set_sr(v, SREG_DBREAKA + 1);
break;
case SREG_DBREAKC + 1:
WSR(v, SREG_DBREAKC + 1);
xtensa_set_sr(v, SREG_DBREAKC + 1);
break;
#endif
}
@ -150,8 +150,8 @@ static void set_ibreak_regs(int reg, struct perf_event *bp)
unsigned long ibreakenable;
xtensa_wsr(info->address, SREG_IBREAKA + reg);
RSR(ibreakenable, SREG_IBREAKENABLE);
WSR(ibreakenable | (1 << reg), SREG_IBREAKENABLE);
ibreakenable = xtensa_get_sr(SREG_IBREAKENABLE);
xtensa_set_sr(ibreakenable | (1 << reg), SREG_IBREAKENABLE);
}
static void set_dbreak_regs(int reg, struct perf_event *bp)
@ -214,8 +214,9 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
/* Breakpoint */
i = free_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp);
if (i >= 0) {
RSR(ibreakenable, SREG_IBREAKENABLE);
WSR(ibreakenable & ~(1 << i), SREG_IBREAKENABLE);
ibreakenable = xtensa_get_sr(SREG_IBREAKENABLE);
xtensa_set_sr(ibreakenable & ~(1 << i),
SREG_IBREAKENABLE);
}
} else {
/* Watchpoint */

View File

@ -87,7 +87,7 @@ void coprocessor_release_all(struct thread_info *ti)
}
ti->cpenable = cpenable;
coprocessor_clear_cpenable();
xtensa_set_sr(0, cpenable);
preempt_enable();
}
@ -99,16 +99,16 @@ void coprocessor_flush_all(struct thread_info *ti)
preempt_disable();
RSR_CPENABLE(old_cpenable);
old_cpenable = xtensa_get_sr(cpenable);
cpenable = ti->cpenable;
WSR_CPENABLE(cpenable);
xtensa_set_sr(cpenable, cpenable);
for (i = 0; i < XCHAL_CP_MAX; i++) {
if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
coprocessor_flush(ti, i);
cpenable >>= 1;
}
WSR_CPENABLE(old_cpenable);
xtensa_set_sr(old_cpenable, cpenable);
preempt_enable();
}

View File

@ -318,9 +318,9 @@ static inline int mem_reserve(unsigned long start, unsigned long end)
void __init setup_arch(char **cmdline_p)
{
pr_info("config ID: %08x:%08x\n",
get_sr(SREG_EPC), get_sr(SREG_EXCSAVE));
if (get_sr(SREG_EPC) != XCHAL_HW_CONFIGID0 ||
get_sr(SREG_EXCSAVE) != XCHAL_HW_CONFIGID1)
xtensa_get_sr(SREG_EPC), xtensa_get_sr(SREG_EXCSAVE));
if (xtensa_get_sr(SREG_EPC) != XCHAL_HW_CONFIGID0 ||
xtensa_get_sr(SREG_EXCSAVE) != XCHAL_HW_CONFIGID1)
pr_info("built for config ID: %08x:%08x\n",
XCHAL_HW_CONFIGID0, XCHAL_HW_CONFIGID1);
@ -596,7 +596,7 @@ c_show(struct seq_file *f, void *slot)
num_online_cpus(),
cpumask_pr_args(cpu_online_mask),
XCHAL_BUILD_UNIQUE_ID,
get_sr(SREG_EPC), get_sr(SREG_EXCSAVE),
xtensa_get_sr(SREG_EPC), xtensa_get_sr(SREG_EXCSAVE),
XCHAL_HAVE_BE ? "big" : "little",
ccount_freq/1000000,
(ccount_freq/10000) % 100,

View File

@ -213,8 +213,8 @@ extern void do_IRQ(int, struct pt_regs *);
static inline void check_valid_nmi(void)
{
unsigned intread = get_sr(interrupt);
unsigned intenable = get_sr(intenable);
unsigned intread = xtensa_get_sr(interrupt);
unsigned intenable = xtensa_get_sr(intenable);
BUG_ON(intread & intenable &
~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^
@ -271,8 +271,8 @@ void do_interrupt(struct pt_regs *regs)
irq_enter();
for (;;) {
unsigned intread = get_sr(interrupt);
unsigned intenable = get_sr(intenable);
unsigned intread = xtensa_get_sr(interrupt);
unsigned intenable = xtensa_get_sr(intenable);
unsigned int_at_level = intread & intenable;
unsigned level;

View File

@ -62,7 +62,7 @@ void secondary_init_irq(void)
__this_cpu_write(cached_irq_mask,
XCHAL_INTTYPE_MASK_EXTERN_EDGE |
XCHAL_INTTYPE_MASK_EXTERN_LEVEL);
set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
xtensa_set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable);
}
@ -77,7 +77,7 @@ static void xtensa_mx_irq_mask(struct irq_data *d)
} else {
mask = __this_cpu_read(cached_irq_mask) & ~mask;
__this_cpu_write(cached_irq_mask, mask);
set_sr(mask, intenable);
xtensa_set_sr(mask, intenable);
}
}
@ -92,7 +92,7 @@ static void xtensa_mx_irq_unmask(struct irq_data *d)
} else {
mask |= __this_cpu_read(cached_irq_mask);
__this_cpu_write(cached_irq_mask, mask);
set_sr(mask, intenable);
xtensa_set_sr(mask, intenable);
}
}
@ -108,12 +108,12 @@ static void xtensa_mx_irq_disable(struct irq_data *d)
static void xtensa_mx_irq_ack(struct irq_data *d)
{
set_sr(1 << d->hwirq, intclear);
xtensa_set_sr(1 << d->hwirq, intclear);
}
static int xtensa_mx_irq_retrigger(struct irq_data *d)
{
set_sr(1 << d->hwirq, intset);
xtensa_set_sr(1 << d->hwirq, intset);
return 1;
}

View File

@ -44,13 +44,13 @@ static const struct irq_domain_ops xtensa_irq_domain_ops = {
static void xtensa_irq_mask(struct irq_data *d)
{
cached_irq_mask &= ~(1 << d->hwirq);
set_sr(cached_irq_mask, intenable);
xtensa_set_sr(cached_irq_mask, intenable);
}
static void xtensa_irq_unmask(struct irq_data *d)
{
cached_irq_mask |= 1 << d->hwirq;
set_sr(cached_irq_mask, intenable);
xtensa_set_sr(cached_irq_mask, intenable);
}
static void xtensa_irq_enable(struct irq_data *d)
@ -65,12 +65,12 @@ static void xtensa_irq_disable(struct irq_data *d)
static void xtensa_irq_ack(struct irq_data *d)
{
set_sr(1 << d->hwirq, intclear);
xtensa_set_sr(1 << d->hwirq, intclear);
}
static int xtensa_irq_retrigger(struct irq_data *d)
{
set_sr(1 << d->hwirq, intset);
xtensa_set_sr(1 << d->hwirq, intset);
return 1;
}