[SPARC64]: Add __read_mostly support.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2005-07-10 15:45:11 -07:00
parent 9126dfde9e
commit d369ddd2fc
4 changed files with 17 additions and 21 deletions

View File

@ -45,8 +45,8 @@ extern void calibrate_delay(void);
/* Please don't make this stuff initdata!!! --DaveM */
static unsigned char boot_cpu_id;
cpumask_t cpu_online_map = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
cpumask_t cpu_online_map = CPU_MASK_NONE __read_mostly;
cpumask_t phys_cpu_present_map = CPU_MASK_NONE __read_mostly;
static cpumask_t smp_commenced_mask;
static cpumask_t cpu_callout_map;
@ -155,7 +155,7 @@ void cpu_panic(void)
panic("SMP bolixed\n");
}
static unsigned long current_tick_offset;
static unsigned long current_tick_offset __read_mostly;
/* This tick register synchronization scheme is taken entirely from
* the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
@ -1193,8 +1193,8 @@ void smp_send_stop(void)
{
}
unsigned long __per_cpu_base;
unsigned long __per_cpu_shift;
unsigned long __per_cpu_base __read_mostly;
unsigned long __per_cpu_shift __read_mostly;
EXPORT_SYMBOL(__per_cpu_base);
EXPORT_SYMBOL(__per_cpu_shift);

View File

@ -73,7 +73,7 @@ static __initdata struct sparc64_tick_ops dummy_tick_ops = {
.get_tick = dummy_get_tick,
};
struct sparc64_tick_ops *tick_ops = &dummy_tick_ops;
struct sparc64_tick_ops *tick_ops __read_mostly = &dummy_tick_ops;
#define TICK_PRIV_BIT (1UL << 63)
@ -195,7 +195,7 @@ static unsigned long tick_add_tick(unsigned long adj, unsigned long offset)
return new_tick;
}
static struct sparc64_tick_ops tick_operations = {
static struct sparc64_tick_ops tick_operations __read_mostly = {
.init_tick = tick_init_tick,
.get_tick = tick_get_tick,
.get_compare = tick_get_compare,
@ -276,7 +276,7 @@ static unsigned long stick_add_compare(unsigned long adj)
return new_compare;
}
static struct sparc64_tick_ops stick_operations = {
static struct sparc64_tick_ops stick_operations __read_mostly = {
.init_tick = stick_init_tick,
.get_tick = stick_get_tick,
.get_compare = stick_get_compare,
@ -422,7 +422,7 @@ static unsigned long hbtick_add_compare(unsigned long adj)
return val;
}
static struct sparc64_tick_ops hbtick_operations = {
static struct sparc64_tick_ops hbtick_operations __read_mostly = {
.init_tick = hbtick_init_tick,
.get_tick = hbtick_get_tick,
.get_compare = hbtick_get_compare,
@ -437,10 +437,9 @@ static struct sparc64_tick_ops hbtick_operations = {
* NOTE: On SUN5 systems the ticker interrupt comes in using 2
* interrupts, one at level14 and one with softint bit 0.
*/
unsigned long timer_tick_offset;
unsigned long timer_tick_compare;
unsigned long timer_tick_offset __read_mostly;
static unsigned long timer_ticks_per_nsec_quotient;
static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
#define TICK_SIZE (tick_nsec / 1000)
@ -464,7 +463,7 @@ static inline void timer_check_rtc(void)
static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
unsigned long ticks, pstate;
unsigned long ticks, compare, pstate;
write_seqlock(&xtime_lock);
@ -483,14 +482,14 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
: "=r" (pstate)
: "i" (PSTATE_IE));
timer_tick_compare = tick_ops->add_compare(timer_tick_offset);
compare = tick_ops->add_compare(timer_tick_offset);
ticks = tick_ops->get_tick();
/* Restore PSTATE_IE. */
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
: /* no outputs */
: "r" (pstate));
} while (time_after_eq(ticks, timer_tick_compare));
} while (time_after_eq(ticks, compare));
timer_check_rtc();
@ -506,11 +505,6 @@ void timer_tick_interrupt(struct pt_regs *regs)
do_timer(regs);
/*
* Only keep timer_tick_offset uptodate, but don't set TICK_CMPR.
*/
timer_tick_compare = tick_ops->get_compare() + timer_tick_offset;
timer_check_rtc();
write_sequnlock(&xtime_lock);

View File

@ -32,6 +32,8 @@ SECTIONS
.data1 : { *(.data1) }
. = ALIGN(64);
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
. = ALIGN(64);
.data.read_mostly : { *(.data.read_mostly) }
_edata = .;
PROVIDE (edata = .);
.fixup : { *(.fixup) }

View File

@ -13,7 +13,7 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
#ifdef CONFIG_X86
#if defined(CONFIG_X86) || defined(CONFIG_SPARC64)
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
#else
#define __read_mostly