forked from Minki/linux
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/lliubbo/blackfin
Pull blackfin updates from Bob Liu: "One kbuild and a smp build fix." * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/lliubbo/blackfin: kbuild: add symbol prefix arg to kallsyms blackfin: smp: adapt to generic smp helpers
This commit is contained in:
commit
2a1497c3c4
@ -38,6 +38,7 @@ config BLACKFIN
|
||||
select GENERIC_ATOMIC64
|
||||
select GENERIC_IRQ_PROBE
|
||||
select IRQ_PER_CPU if SMP
|
||||
select USE_GENERIC_SMP_HELPERS if SMP
|
||||
select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
|
||||
|
@ -20,7 +20,6 @@ endif
|
||||
KBUILD_AFLAGS += $(call cc-option,-mno-fdpic)
|
||||
KBUILD_CFLAGS_MODULE += -mlong-calls
|
||||
LDFLAGS += -m elf32bfin
|
||||
KALLSYMS += --symbol-prefix=_
|
||||
|
||||
KBUILD_DEFCONFIG := BF537-STAMP_defconfig
|
||||
|
||||
|
@ -18,6 +18,8 @@
|
||||
#define raw_smp_processor_id() blackfin_core_id()
|
||||
|
||||
extern void bfin_relocate_coreb_l1_mem(void);
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
|
||||
asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr);
|
||||
|
@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];
|
||||
|
||||
struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
|
||||
|
||||
#define BFIN_IPI_TIMER 0
|
||||
#define BFIN_IPI_RESCHEDULE 1
|
||||
#define BFIN_IPI_CALL_FUNC 2
|
||||
#define BFIN_IPI_CPU_STOP 3
|
||||
enum ipi_message_type {
|
||||
BFIN_IPI_TIMER,
|
||||
BFIN_IPI_RESCHEDULE,
|
||||
BFIN_IPI_CALL_FUNC,
|
||||
BFIN_IPI_CALL_FUNC_SINGLE,
|
||||
BFIN_IPI_CPU_STOP,
|
||||
};
|
||||
|
||||
struct blackfin_flush_data {
|
||||
unsigned long start;
|
||||
@ -60,35 +63,20 @@ struct blackfin_flush_data {
|
||||
|
||||
void *secondary_stack;
|
||||
|
||||
|
||||
struct smp_call_struct {
|
||||
void (*func)(void *info);
|
||||
void *info;
|
||||
int wait;
|
||||
cpumask_t *waitmask;
|
||||
};
|
||||
|
||||
static struct blackfin_flush_data smp_flush_data;
|
||||
|
||||
static DEFINE_SPINLOCK(stop_lock);
|
||||
|
||||
struct ipi_message {
|
||||
unsigned long type;
|
||||
struct smp_call_struct call_struct;
|
||||
};
|
||||
|
||||
/* A magic number - stress test shows this is safe for common cases */
|
||||
#define BFIN_IPI_MSGQ_LEN 5
|
||||
|
||||
/* Simple FIFO buffer, overflow leads to panic */
|
||||
struct ipi_message_queue {
|
||||
spinlock_t lock;
|
||||
struct ipi_data {
|
||||
unsigned long count;
|
||||
unsigned long head; /* head of the queue */
|
||||
struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
|
||||
unsigned long bits;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
|
||||
static DEFINE_PER_CPU(struct ipi_data, bfin_ipi);
|
||||
|
||||
static void ipi_cpu_stop(unsigned int cpu)
|
||||
{
|
||||
@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info)
|
||||
blackfin_icache_flush_range(fdata->start, fdata->end);
|
||||
}
|
||||
|
||||
static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
|
||||
{
|
||||
int wait;
|
||||
void (*func)(void *info);
|
||||
void *info;
|
||||
func = msg->call_struct.func;
|
||||
info = msg->call_struct.info;
|
||||
wait = msg->call_struct.wait;
|
||||
func(info);
|
||||
if (wait) {
|
||||
#ifdef __ARCH_SYNC_CORE_DCACHE
|
||||
/*
|
||||
* 'wait' usually means synchronization between CPUs.
|
||||
* Invalidate D cache in case shared data was changed
|
||||
* by func() to ensure cache coherence.
|
||||
*/
|
||||
resync_core_dcache();
|
||||
#endif
|
||||
cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
|
||||
}
|
||||
}
|
||||
|
||||
/* Use IRQ_SUPPLE_0 to request reschedule.
|
||||
* When returning from interrupt to user space,
|
||||
* there is chance to reschedule */
|
||||
@ -172,152 +138,95 @@ void ipi_timer(void)
|
||||
|
||||
static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
|
||||
{
|
||||
struct ipi_message *msg;
|
||||
struct ipi_message_queue *msg_queue;
|
||||
struct ipi_data *bfin_ipi_data;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned long flags;
|
||||
unsigned long pending;
|
||||
unsigned long msg;
|
||||
|
||||
platform_clear_ipi(cpu, IRQ_SUPPLE_1);
|
||||
|
||||
msg_queue = &__get_cpu_var(ipi_msg_queue);
|
||||
bfin_ipi_data = &__get_cpu_var(bfin_ipi);
|
||||
|
||||
spin_lock_irqsave(&msg_queue->lock, flags);
|
||||
while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) {
|
||||
msg = 0;
|
||||
do {
|
||||
msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
|
||||
switch (msg) {
|
||||
case BFIN_IPI_TIMER:
|
||||
ipi_timer();
|
||||
break;
|
||||
case BFIN_IPI_RESCHEDULE:
|
||||
scheduler_ipi();
|
||||
break;
|
||||
case BFIN_IPI_CALL_FUNC:
|
||||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
|
||||
while (msg_queue->count) {
|
||||
msg = &msg_queue->ipi_message[msg_queue->head];
|
||||
switch (msg->type) {
|
||||
case BFIN_IPI_TIMER:
|
||||
ipi_timer();
|
||||
break;
|
||||
case BFIN_IPI_RESCHEDULE:
|
||||
scheduler_ipi();
|
||||
break;
|
||||
case BFIN_IPI_CALL_FUNC:
|
||||
ipi_call_function(cpu, msg);
|
||||
break;
|
||||
case BFIN_IPI_CPU_STOP:
|
||||
ipi_cpu_stop(cpu);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
|
||||
cpu, msg->type);
|
||||
break;
|
||||
}
|
||||
msg_queue->head++;
|
||||
msg_queue->head %= BFIN_IPI_MSGQ_LEN;
|
||||
msg_queue->count--;
|
||||
case BFIN_IPI_CALL_FUNC_SINGLE:
|
||||
generic_smp_call_function_single_interrupt();
|
||||
break;
|
||||
|
||||
case BFIN_IPI_CPU_STOP:
|
||||
ipi_cpu_stop(cpu);
|
||||
break;
|
||||
}
|
||||
} while (msg < BITS_PER_LONG);
|
||||
|
||||
smp_mb();
|
||||
}
|
||||
spin_unlock_irqrestore(&msg_queue->lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void ipi_queue_init(void)
|
||||
static void bfin_ipi_init(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
struct ipi_message_queue *msg_queue;
|
||||
struct ipi_data *bfin_ipi_data;
|
||||
for_each_possible_cpu(cpu) {
|
||||
msg_queue = &per_cpu(ipi_msg_queue, cpu);
|
||||
spin_lock_init(&msg_queue->lock);
|
||||
msg_queue->count = 0;
|
||||
msg_queue->head = 0;
|
||||
bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
|
||||
bfin_ipi_data->bits = 0;
|
||||
bfin_ipi_data->count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void smp_send_message(cpumask_t callmap, unsigned long type,
|
||||
void (*func) (void *info), void *info, int wait)
|
||||
void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
|
||||
{
|
||||
unsigned int cpu;
|
||||
struct ipi_message_queue *msg_queue;
|
||||
struct ipi_message *msg;
|
||||
unsigned long flags, next_msg;
|
||||
cpumask_t waitmask; /* waitmask is shared by all cpus */
|
||||
struct ipi_data *bfin_ipi_data;
|
||||
unsigned long flags;
|
||||
|
||||
cpumask_copy(&waitmask, &callmap);
|
||||
for_each_cpu(cpu, &callmap) {
|
||||
msg_queue = &per_cpu(ipi_msg_queue, cpu);
|
||||
spin_lock_irqsave(&msg_queue->lock, flags);
|
||||
if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
|
||||
next_msg = (msg_queue->head + msg_queue->count)
|
||||
% BFIN_IPI_MSGQ_LEN;
|
||||
msg = &msg_queue->ipi_message[next_msg];
|
||||
msg->type = type;
|
||||
if (type == BFIN_IPI_CALL_FUNC) {
|
||||
msg->call_struct.func = func;
|
||||
msg->call_struct.info = info;
|
||||
msg->call_struct.wait = wait;
|
||||
msg->call_struct.waitmask = &waitmask;
|
||||
}
|
||||
msg_queue->count++;
|
||||
} else
|
||||
panic("IPI message queue overflow\n");
|
||||
spin_unlock_irqrestore(&msg_queue->lock, flags);
|
||||
local_irq_save(flags);
|
||||
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
|
||||
smp_mb();
|
||||
set_bit(msg, &bfin_ipi_data->bits);
|
||||
bfin_ipi_data->count++;
|
||||
platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
|
||||
}
|
||||
|
||||
if (wait) {
|
||||
while (!cpumask_empty(&waitmask))
|
||||
blackfin_dcache_invalidate_range(
|
||||
(unsigned long)(&waitmask),
|
||||
(unsigned long)(&waitmask));
|
||||
#ifdef __ARCH_SYNC_CORE_DCACHE
|
||||
/*
|
||||
* Invalidate D cache in case shared data was changed by
|
||||
* other processors to ensure cache coherence.
|
||||
*/
|
||||
resync_core_dcache();
|
||||
#endif
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
int smp_call_function(void (*func)(void *info), void *info, int wait)
|
||||
void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
cpumask_t callmap;
|
||||
|
||||
preempt_disable();
|
||||
cpumask_copy(&callmap, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &callmap);
|
||||
if (!cpumask_empty(&callmap))
|
||||
smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smp_call_function);
|
||||
|
||||
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
|
||||
int wait)
|
||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
{
|
||||
unsigned int cpu = cpuid;
|
||||
cpumask_t callmap;
|
||||
|
||||
if (cpu_is_offline(cpu))
|
||||
return 0;
|
||||
cpumask_clear(&callmap);
|
||||
cpumask_set_cpu(cpu, &callmap);
|
||||
|
||||
smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
|
||||
|
||||
return 0;
|
||||
send_ipi(mask, BFIN_IPI_CALL_FUNC);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smp_call_function_single);
|
||||
|
||||
void smp_send_reschedule(int cpu)
|
||||
{
|
||||
cpumask_t callmap;
|
||||
/* simply trigger an ipi */
|
||||
|
||||
cpumask_clear(&callmap);
|
||||
cpumask_set_cpu(cpu, &callmap);
|
||||
|
||||
smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0);
|
||||
send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void smp_send_msg(const struct cpumask *mask, unsigned long type)
|
||||
{
|
||||
smp_send_message(*mask, type, NULL, NULL, 0);
|
||||
send_ipi(mask, type);
|
||||
}
|
||||
|
||||
void smp_timer_broadcast(const struct cpumask *mask)
|
||||
@ -333,7 +242,7 @@ void smp_send_stop(void)
|
||||
cpumask_copy(&callmap, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &callmap);
|
||||
if (!cpumask_empty(&callmap))
|
||||
smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
|
||||
send_ipi(&callmap, BFIN_IPI_CPU_STOP);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void)
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
platform_prepare_cpus(max_cpus);
|
||||
ipi_queue_init();
|
||||
bfin_ipi_init();
|
||||
platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
|
||||
platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
|
||||
}
|
||||
|
@ -74,8 +74,13 @@ kallsyms()
|
||||
info KSYM ${2}
|
||||
local kallsymopt;
|
||||
|
||||
if [ -n "${CONFIG_SYMBOL_PREFIX}" ]; then
|
||||
kallsymopt="${kallsymopt} \
|
||||
--symbol-prefix=${CONFIG_SYMBOL_PREFIX}"
|
||||
fi
|
||||
|
||||
if [ -n "${CONFIG_KALLSYMS_ALL}" ]; then
|
||||
kallsymopt=--all-symbols
|
||||
kallsymopt="${kallsymopt} --all-symbols"
|
||||
fi
|
||||
|
||||
local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
|
||||
|
Loading…
Reference in New Issue
Block a user