mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
use the new percpu interface for shared data
Currently most of the per cpu data, which is accessed by different cpus, has a ____cacheline_aligned_in_smp attribute. Move all this data to the new per cpu shared data section: .data.percpu.shared_aligned. This will seperate the percpu data which is referenced frequently by other cpus from the local only percpu data. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Acked-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Christoph Lameter <clameter@sgi.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
5fb7dc37dc
commit
f34e3b61f2
@ -42,5 +42,5 @@ EXPORT_SYMBOL(init_task);
|
||||
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
||||
* no more per-task TSS's.
|
||||
*/
|
||||
DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
||||
#include <asm/apic.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
||||
EXPORT_PER_CPU_SYMBOL(irq_stat);
|
||||
|
||||
DEFINE_PER_CPU(struct pt_regs *, irq_regs);
|
||||
|
@ -82,7 +82,7 @@ static volatile struct call_data_struct *call_data;
|
||||
#define IPI_KDUMP_CPU_STOP 3
|
||||
|
||||
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
|
||||
static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);
|
||||
|
||||
extern void cpu_halt (void);
|
||||
|
||||
|
@ -44,7 +44,7 @@ EXPORT_SYMBOL(init_task);
|
||||
* section. Since TSS's are completely CPU-local, we want them
|
||||
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
|
||||
*/
|
||||
DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
|
||||
|
||||
/* Copies of the original ist values from the tss are only accessed during
|
||||
* debugging, no special alignment required.
|
||||
|
@ -301,7 +301,7 @@ struct rq {
|
||||
struct lock_class_key rq_lock_key;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
||||
static DEFINE_MUTEX(sched_hotcpu_mutex);
|
||||
|
||||
static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
|
||||
|
Loading…
Reference in New Issue
Block a user