mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 21:02:19 +00:00
percpu: cleanup percpu array definitions
Currently, the following three different ways to define percpu arrays are in use. 1. DEFINE_PER_CPU(elem_type[array_len], array_name); 2. DEFINE_PER_CPU(elem_type, array_name[array_len]); 3. DEFINE_PER_CPU(elem_type, array_name)[array_len]; Unify to #1 which correctly separates the roles of the two parameters and thus allows more flexibility in the way percpu variables are defined. [ Impact: cleanup ] Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Tony Luck <tony.luck@intel.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: linux-mm@kvack.org Cc: Christoph Lameter <cl@linux-foundation.org> Cc: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
fe87f94f34
commit
204fba4aa3
@ -58,7 +58,7 @@ static struct local_tlb_flush_counts {
|
||||
unsigned int count;
|
||||
} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
|
||||
|
||||
static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
|
||||
static DEFINE_PER_CPU(unsigned short [NR_CPUS], shadow_flush_counts) ____cacheline_aligned;
|
||||
|
||||
#define IPI_CALL_FUNC 0
|
||||
#define IPI_CPU_STOP 1
|
||||
|
@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
|
||||
DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
|
||||
EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
|
||||
|
||||
DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
|
||||
DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid);
|
||||
EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
|
||||
|
||||
DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
|
||||
|
@ -31,7 +31,7 @@ struct stab_entry {
|
||||
|
||||
#define NR_STAB_CACHE_ENTRIES 8
|
||||
static DEFINE_PER_CPU(long, stab_cache_ptr);
|
||||
static DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]);
|
||||
static DEFINE_PER_CPU(long [NR_STAB_CACHE_ENTRIES], stab_cache);
|
||||
|
||||
/*
|
||||
* Create a segment table entry for the given esid/vsid pair.
|
||||
|
@ -37,7 +37,7 @@
|
||||
*/
|
||||
|
||||
#define MSG_COUNT 4
|
||||
static DEFINE_PER_CPU(unsigned int, ps3_ipi_virqs[MSG_COUNT]);
|
||||
static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs);
|
||||
|
||||
static void do_message_pass(int target, int msg)
|
||||
{
|
||||
|
@ -30,8 +30,8 @@
|
||||
#include <asm/apic.h>
|
||||
#include <asm/desc.h>
|
||||
|
||||
static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]);
|
||||
static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]);
|
||||
static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
|
||||
static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
|
||||
static DEFINE_PER_CPU(int, cpu_priv_count);
|
||||
|
||||
static DEFINE_MUTEX(cpu_debug_lock);
|
||||
|
@ -69,7 +69,7 @@ struct threshold_bank {
|
||||
struct threshold_block *blocks;
|
||||
cpumask_var_t cpus;
|
||||
};
|
||||
static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
|
||||
static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static unsigned char shared_bank[NR_BANKS] = {
|
||||
|
@ -862,7 +862,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
|
||||
x86_pmu_disable_counter(hwc, idx);
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
|
||||
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], prev_left);
|
||||
|
||||
/*
|
||||
* Set the next IRQ period, based on the hwc->period_left value.
|
||||
|
@ -47,10 +47,10 @@
|
||||
static DEFINE_SPINLOCK(irq_mapping_update_lock);
|
||||
|
||||
/* IRQ <-> VIRQ mapping. */
|
||||
static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
|
||||
static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
|
||||
|
||||
/* IRQ <-> IPI mapping */
|
||||
static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
|
||||
static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
|
||||
|
||||
/* Interrupt types. */
|
||||
enum xen_irq_type {
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/quicklist.h>
|
||||
|
||||
DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
|
||||
DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist);
|
||||
|
||||
#define FRACTION_OF_NODE_MEM 16
|
||||
|
||||
|
@ -2086,8 +2086,8 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
|
||||
*/
|
||||
#define NR_KMEM_CACHE_CPU 100
|
||||
|
||||
static DEFINE_PER_CPU(struct kmem_cache_cpu,
|
||||
kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
|
||||
static DEFINE_PER_CPU(struct kmem_cache_cpu [NR_KMEM_CACHE_CPU],
|
||||
kmem_cache_cpu);
|
||||
|
||||
static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
|
||||
static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
|
||||
|
@ -37,7 +37,7 @@ __initcall(init_syncookies);
|
||||
#define COOKIEBITS 24 /* Upper bits store count */
|
||||
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
|
||||
|
||||
static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS];
|
||||
static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], cookie_scratch);
|
||||
|
||||
static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
|
||||
u32 count, int c)
|
||||
|
@ -74,7 +74,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
|
||||
return child;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS];
|
||||
static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], cookie_scratch);
|
||||
|
||||
static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr,
|
||||
__be16 sport, __be16 dport, u32 count, int c)
|
||||
|
Loading…
Reference in New Issue
Block a user