mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] Avoid array overflow if there are too many cpus in SRAT table [IA64] Remove unlikely from cpu_is_offline [IA64] irq_ia64, use set_irq_chip [IA64] perfmon: Change vmalloc to vzalloc and drop memset. [IA64] eliminate race condition in smp_flush_tlb_mm
This commit is contained in:
commit
ecacc6c70c
@ -477,6 +477,12 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
|
||||
if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
|
||||
return;
|
||||
|
||||
if (srat_num_cpus >= ARRAY_SIZE(node_cpuid)) {
|
||||
printk_once(KERN_WARNING
|
||||
"node_cpuid[%d] is too small, may not be able to use all cpus\n",
|
||||
ARRAY_SIZE(node_cpuid));
|
||||
return;
|
||||
}
|
||||
pxm = get_processor_proximity_domain(pa);
|
||||
|
||||
/* record this node in proximity bitmap */
|
||||
|
@ -633,7 +633,7 @@ ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
|
||||
BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
|
||||
desc = irq_desc + irq;
|
||||
desc->status |= IRQ_PER_CPU;
|
||||
desc->chip = &irq_type_ia64_lsapic;
|
||||
set_irq_chip(irq, &irq_type_ia64_lsapic);
|
||||
if (action)
|
||||
setup_irq(irq, action);
|
||||
set_irq_handler(irq, handle_percpu_irq);
|
||||
|
@ -829,10 +829,9 @@ pfm_rvmalloc(unsigned long size)
|
||||
unsigned long addr;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
mem = vmalloc(size);
|
||||
mem = vzalloc(size);
|
||||
if (mem) {
|
||||
//printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
|
||||
memset(mem, 0, size);
|
||||
addr = (unsigned long)mem;
|
||||
while (size > 0) {
|
||||
pfm_reserve_page(addr);
|
||||
|
@ -293,6 +293,7 @@ smp_flush_tlb_all (void)
|
||||
void
|
||||
smp_flush_tlb_mm (struct mm_struct *mm)
|
||||
{
|
||||
cpumask_var_t cpus;
|
||||
preempt_disable();
|
||||
/* this happens for the common case of a single-threaded fork(): */
|
||||
if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
|
||||
@ -301,9 +302,15 @@ smp_flush_tlb_mm (struct mm_struct *mm)
|
||||
preempt_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
smp_call_function_many(mm_cpumask(mm),
|
||||
(void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
|
||||
if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) {
|
||||
smp_call_function((void (*)(void *))local_finish_flush_tlb_mm,
|
||||
mm, 1);
|
||||
} else {
|
||||
cpumask_copy(cpus, mm_cpumask(mm));
|
||||
smp_call_function_many(cpus,
|
||||
(void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
|
||||
free_cpumask_var(cpus);
|
||||
}
|
||||
local_irq_disable();
|
||||
local_finish_flush_tlb_mm(mm);
|
||||
local_irq_enable();
|
||||
|
@ -168,7 +168,7 @@ timer_interrupt (int irq, void *dev_id)
|
||||
{
|
||||
unsigned long new_itm;
|
||||
|
||||
if (unlikely(cpu_is_offline(smp_processor_id()))) {
|
||||
if (cpu_is_offline(smp_processor_id())) {
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user