forked from Minki/linux
98a79d6a50
Impact: cleanup Each SMP arch defines these themselves. Move them to a central location. Twists: 1) Some archs (m32, parisc, s390) set possible_map to all 1, so we add a CONFIG_INIT_ALL_POSSIBLE for this rather than break them. 2) mips and sparc32 '#define cpu_possible_map phys_cpu_present_map'. Those archs simply have phys_cpu_present_map replaced everywhere. 3) Alpha defined cpu_possible_map to cpu_present_map; this is tricky so I just manipulate them both in sync. 4) IA64, cris and m32r have gratuitous 'extern cpumask_t cpu_possible_map' declarations. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Reviewed-by: Grant Grundler <grundler@parisc-linux.org> Tested-by: Tony Luck <tony.luck@intel.com> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: Mike Travis <travis@sgi.com> Cc: ink@jurassic.park.msu.ru Cc: rmk@arm.linux.org.uk Cc: starvik@axis.com Cc: tony.luck@intel.com Cc: takata@linux-m32r.org Cc: ralf@linux-mips.org Cc: grundler@parisc-linux.org Cc: paulus@samba.org Cc: schwidefsky@de.ibm.com Cc: lethal@linux-sh.org Cc: wli@holomorphy.com Cc: davem@davemloft.net Cc: jdike@addtoit.com Cc: mingo@redhat.com
120 lines
3.6 KiB
C
120 lines
3.6 KiB
C
#ifndef _ASM_M32R_SMP_H
|
|
#define _ASM_M32R_SMP_H
|
|
|
|
#ifdef CONFIG_SMP
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/cpumask.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/threads.h>
|
|
#include <asm/m32r.h>
|
|
|
|
#define PHYSID_ARRAY_SIZE 1
|
|
|
|
struct physid_mask
|
|
{
|
|
unsigned long mask[PHYSID_ARRAY_SIZE];
|
|
};
|
|
|
|
typedef struct physid_mask physid_mask_t;
|
|
|
|
#define physid_set(physid, map) set_bit(physid, (map).mask)
|
|
#define physid_clear(physid, map) clear_bit(physid, (map).mask)
|
|
#define physid_isset(physid, map) test_bit(physid, (map).mask)
|
|
#define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask)
|
|
|
|
#define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
|
|
#define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
|
|
#define physids_clear(map) bitmap_zero((map).mask, MAX_APICS)
|
|
#define physids_complement(dst, src) bitmap_complement((dst).mask,(src).mask, MAX_APICS)
|
|
#define physids_empty(map) bitmap_empty((map).mask, MAX_APICS)
|
|
#define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS)
|
|
#define physids_weight(map) bitmap_weight((map).mask, MAX_APICS)
|
|
#define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)
|
|
#define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
|
|
#define physids_coerce(map) ((map).mask[0])
|
|
|
|
#define physids_promote(physids) \
|
|
({ \
|
|
physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
|
|
__physid_mask.mask[0] = physids; \
|
|
__physid_mask; \
|
|
})
|
|
|
|
#define physid_mask_of_physid(physid) \
|
|
({ \
|
|
physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
|
|
physid_set(physid, __physid_mask); \
|
|
__physid_mask; \
|
|
})
|
|
|
|
#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
|
|
#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
|
|
|
|
extern physid_mask_t phys_cpu_present_map;
|
|
|
|
/*
|
|
* Some lowlevel functions might want to know about
|
|
* the real CPU ID <-> CPU # mapping.
|
|
*/
|
|
extern volatile int cpu_2_physid[NR_CPUS];
|
|
#define cpu_to_physid(cpu_id) cpu_2_physid[cpu_id]
|
|
|
|
#define raw_smp_processor_id() (current_thread_info()->cpu)
|
|
|
|
extern cpumask_t cpu_callout_map;
|
|
|
|
static __inline__ int hard_smp_processor_id(void)
|
|
{
|
|
return (int)*(volatile long *)M32R_CPUID_PORTL;
|
|
}
|
|
|
|
static __inline__ int cpu_logical_map(int cpu)
|
|
{
|
|
return cpu;
|
|
}
|
|
|
|
static __inline__ int cpu_number_map(int cpu)
|
|
{
|
|
return cpu;
|
|
}
|
|
|
|
static __inline__ unsigned int num_booting_cpus(void)
|
|
{
|
|
return cpus_weight(cpu_callout_map);
|
|
}
|
|
|
|
extern void smp_send_timer(void);
|
|
extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
|
|
|
|
extern void arch_send_call_function_single_ipi(int cpu);
|
|
extern void arch_send_call_function_ipi(cpumask_t mask);
|
|
|
|
#endif /* not __ASSEMBLY__ */
|
|
|
|
#define NO_PROC_ID (0xff) /* No processor magic marker */
|
|
|
|
#define PROC_CHANGE_PENALTY (15) /* Schedule penalty */
|
|
|
|
/*
|
|
* M32R-mp IPI
|
|
*/
|
|
#define RESCHEDULE_IPI (M32R_IRQ_IPI0-M32R_IRQ_IPI0)
|
|
#define INVALIDATE_TLB_IPI (M32R_IRQ_IPI1-M32R_IRQ_IPI0)
|
|
#define CALL_FUNCTION_IPI (M32R_IRQ_IPI2-M32R_IRQ_IPI0)
|
|
#define LOCAL_TIMER_IPI (M32R_IRQ_IPI3-M32R_IRQ_IPI0)
|
|
#define INVALIDATE_CACHE_IPI (M32R_IRQ_IPI4-M32R_IRQ_IPI0)
|
|
#define CPU_BOOT_IPI (M32R_IRQ_IPI5-M32R_IRQ_IPI0)
|
|
#define CALL_FUNC_SINGLE_IPI (M32R_IRQ_IPI6-M32R_IRQ_IPI0)
|
|
|
|
#define IPI_SHIFT (0)
|
|
#define NR_IPIS (8)
|
|
|
|
#else /* CONFIG_SMP */
|
|
|
|
#define hard_smp_processor_id() 0
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#endif /* _ASM_M32R_SMP_H */
|