forked from Minki/linux
SPARC: SMP: Remove call to ipi_call_lock_irq()/ipi_call_unlock_irq()
ipi_call_lock/unlock() lock resp. unlock call_function.lock. This lock protects only the call_function data structure itself, but it's completely unrelated to cpu_online_mask. The mask to which the IPIs are sent is calculated before call_function.lock is taken in smp_call_function_many(), so the locking around set_cpu_online() is pointless and can be removed. Delay irq enable to after set_cpu_online(). [ tglx: Massaged changelog ] Signed-off-by: Yong Zhang <yong.zhang0@gmail.com> Cc: ralf@linux-mips.org Cc: sshtylyov@mvista.com Cc: david.daney@cavium.com Cc: nikunj@linux.vnet.ibm.com Cc: paulmck@linux.vnet.ibm.com Cc: axboe@kernel.dk Cc: peterz@infradead.org Cc: sparclinux@vger.kernel.org Link: http://lkml.kernel.org/r/20120529082732.GA4250@zhy Acked-by: "David S. Miller" <davem@davemloft.net> Acked-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
459165e250
commit
bc68330095
@ -103,8 +103,6 @@ void __cpuinit smp_callin(void)
|
||||
if (cheetah_pcache_forced_on)
|
||||
cheetah_enable_pcache();
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
callin_flag = 1;
|
||||
__asm__ __volatile__("membar #Sync\n\t"
|
||||
"flush %%g6" : : : "memory");
|
||||
@ -124,9 +122,8 @@ void __cpuinit smp_callin(void)
|
||||
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
|
||||
rmb();
|
||||
|
||||
ipi_call_lock_irq();
|
||||
set_cpu_online(cpuid, true);
|
||||
ipi_call_unlock_irq();
|
||||
local_irq_enable();
|
||||
|
||||
/* idle thread is expected to have preempt disabled */
|
||||
preempt_disable();
|
||||
@ -1308,9 +1305,7 @@ int __cpu_disable(void)
|
||||
mdelay(1);
|
||||
local_irq_disable();
|
||||
|
||||
ipi_call_lock();
|
||||
set_cpu_online(cpu, false);
|
||||
ipi_call_unlock();
|
||||
|
||||
cpu_map_rebuild();
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user