x86/apic: Eliminate cpu_mask_to_apicid() operation

Since there are only two locations where cpu_mask_to_apicid() is
called from, remove the operation and use only
cpu_mask_to_apicid_and() instead.

Signed-off-by: Alexander Gordeev <agordeev@redhat.com>
Suggested-and-acked-by: Suresh Siddha <suresh.b.siddha@intel.com>
Acked-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/20120614074935.GE3383@dhcp-26-207.brq.redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Alexander Gordeev 2012-06-14 09:49:35 +02:00 committed by Ingo Molnar
parent cac4afbc3d
commit a5a391561b
15 changed files with 25 additions and 105 deletions

View File

@ -331,8 +331,6 @@ struct apic {
unsigned long (*set_apic_id)(unsigned int id);
unsigned long apic_id_mask;
int (*cpu_mask_to_apicid)(const struct cpumask *cpumask,
unsigned int *apicid);
int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid);
@ -594,9 +592,15 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
#endif
static inline int
__flat_cpu_mask_to_apicid(unsigned long cpu_mask, unsigned int *apicid)
flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
{
cpu_mask = cpu_mask & APIC_ALL_CPUS & cpumask_bits(cpu_online_mask)[0];
unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
cpumask_bits(andmask)[0] &
cpumask_bits(cpu_online_mask)[0] &
APIC_ALL_CPUS;
if (likely(cpu_mask)) {
*apicid = (unsigned int)cpu_mask;
return 0;
@ -605,27 +609,6 @@ __flat_cpu_mask_to_apicid(unsigned long cpu_mask, unsigned int *apicid)
}
}
static inline int
flat_cpu_mask_to_apicid(const struct cpumask *cpumask,
unsigned int *apicid)
{
return __flat_cpu_mask_to_apicid(cpumask_bits(cpumask)[0], apicid);
}
static inline int
flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
{
unsigned long mask1 = cpumask_bits(cpumask)[0];
unsigned long mask2 = cpumask_bits(andmask)[0];
return __flat_cpu_mask_to_apicid(mask1 & mask2, apicid);
}
extern int
default_cpu_mask_to_apicid(const struct cpumask *cpumask,
unsigned int *apicid);
extern int
default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,

View File

@ -2123,23 +2123,6 @@ void default_init_apic_ldr(void)
apic_write(APIC_LDR, val);
}
static inline int __default_cpu_to_apicid(int cpu, unsigned int *apicid)
{
if (likely((unsigned int)cpu < nr_cpu_ids)) {
*apicid = per_cpu(x86_cpu_to_apicid, cpu);
return 0;
} else {
return -EINVAL;
}
}
int default_cpu_mask_to_apicid(const struct cpumask *cpumask,
unsigned int *apicid)
{
int cpu = cpumask_first_and(cpumask, cpu_online_mask);
return __default_cpu_to_apicid(cpu, apicid);
}
int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
@ -2151,7 +2134,12 @@ int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
break;
}
return __default_cpu_to_apicid(cpu, apicid);
if (likely((unsigned int)cpu < nr_cpu_ids)) {
*apicid = per_cpu(x86_cpu_to_apicid, cpu);
return 0;
} else {
return -EINVAL;
}
}
/*

View File

@ -191,7 +191,6 @@ static struct apic apic_flat = {
.set_apic_id = set_apic_id,
.apic_id_mask = 0xFFu << 24,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.send_IPI_mask = flat_send_IPI_mask,
@ -308,7 +307,6 @@ static struct apic apic_physflat = {
.set_apic_id = set_apic_id,
.apic_id_mask = 0xFFu << 24,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = physflat_send_IPI_mask,

View File

@ -159,7 +159,6 @@ struct apic apic_noop = {
.set_apic_id = NULL,
.apic_id_mask = 0x0F << 24,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.send_IPI_mask = noop_send_IPI_mask,

View File

@ -234,7 +234,6 @@ static struct apic apic_numachip __refconst = {
.set_apic_id = set_apic_id,
.apic_id_mask = 0xffU << 24,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = numachip_send_IPI_mask,

View File

@ -188,7 +188,6 @@ static struct apic apic_bigsmp = {
.set_apic_id = NULL,
.apic_id_mask = 0xFF << 24,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = bigsmp_send_IPI_mask,

View File

@ -525,7 +525,7 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
return 1;
}
static int
static inline int
es7000_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id)
{
unsigned int round = 0;
@ -643,7 +643,6 @@ static struct apic __refdata apic_es7000_cluster = {
.set_apic_id = NULL,
.apic_id_mask = 0xFF << 24,
.cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
.send_IPI_mask = es7000_send_IPI_mask,
@ -710,7 +709,6 @@ static struct apic __refdata apic_es7000 = {
.set_apic_id = NULL,
.apic_id_mask = 0xFF << 24,
.cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
.send_IPI_mask = es7000_send_IPI_mask,

View File

@ -1492,7 +1492,8 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
* We use logical delivery to get the timer IRQ
* to the first CPU.
*/
if (unlikely(apic->cpu_mask_to_apicid(apic->target_cpus(), &dest)))
if (unlikely(apic->cpu_mask_to_apicid_and(apic->target_cpus(),
apic->target_cpus(), &dest)))
dest = BAD_APICID;
entry.dest_mode = apic->irq_dest_mode;

View File

@ -406,13 +406,6 @@ static inline int numaq_check_phys_apicid_present(int phys_apicid)
* We use physical apicids here, not logical, so just return the default
* physical broadcast to stop people from breaking us
*/
static int
numaq_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *apicid)
{
*apicid = 0x0F;
return 0;
}
static int
numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
@ -499,7 +492,6 @@ static struct apic __refdata apic_numaq = {
.set_apic_id = NULL,
.apic_id_mask = 0x0F << 24,
.cpu_mask_to_apicid = numaq_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and,
.send_IPI_mask = numaq_send_IPI_mask,

View File

@ -108,7 +108,6 @@ static struct apic apic_default = {
.set_apic_id = NULL,
.apic_id_mask = 0x0F << 24,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.send_IPI_mask = default_send_IPI_mask_logical,

View File

@ -263,7 +263,7 @@ static int summit_check_phys_apicid_present(int physical_apicid)
return 1;
}
static int
static inline int
summit_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id)
{
unsigned int round = 0;
@ -516,7 +516,6 @@ static struct apic apic_summit = {
.set_apic_id = NULL,
.apic_id_mask = 0xFF << 24,
.cpu_mask_to_apicid = summit_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and,
.send_IPI_mask = summit_send_IPI_mask,

View File

@ -96,22 +96,6 @@ static void x2apic_send_IPI_all(int vector)
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
}
static int
x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *apicid)
{
int cpu = cpumask_first_and(cpumask, cpu_online_mask);
int i;
if (cpu >= nr_cpu_ids)
return -EINVAL;
*apicid = 0;
for_each_cpu_and(i, cpumask, per_cpu(cpus_in_cluster, cpu))
*apicid |= per_cpu(x86_cpu_to_logical_apicid, i);
return 0;
}
static int
x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
@ -270,7 +254,6 @@ static struct apic apic_x2apic_cluster = {
.set_apic_id = x2apic_set_apic_id,
.apic_id_mask = 0xFFFFFFFFu,
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
.send_IPI_mask = x2apic_send_IPI_mask,

View File

@ -123,7 +123,6 @@ static struct apic apic_x2apic_phys = {
.set_apic_id = x2apic_set_apic_id,
.apic_id_mask = 0xFFFFFFFFu,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = x2apic_send_IPI_mask,

View File

@ -269,27 +269,6 @@ static void uv_init_apic_ldr(void)
{
}
static inline int __uv_cpu_to_apicid(int cpu, unsigned int *apicid)
{
if (likely((unsigned int)cpu < nr_cpu_ids)) {
*apicid = per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
return 0;
} else {
return -EINVAL;
}
}
static int
uv_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *apicid)
{
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
int cpu = cpumask_first_and(cpumask, cpu_online_mask);
return __uv_cpu_to_apicid(cpu, apicid);
}
static int
uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
@ -306,7 +285,12 @@ uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
break;
}
return __uv_cpu_to_apicid(cpu, apicid);
if (likely((unsigned int)cpu < nr_cpu_ids)) {
*apicid = per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
return 0;
} else {
return -EINVAL;
}
}
static unsigned int x2apic_get_apic_id(unsigned long x)
@ -384,7 +368,6 @@ static struct apic __refdata apic_x2apic_uv_x = {
.set_apic_id = set_apic_id,
.apic_id_mask = 0xFFFFFFFFu,
.cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
.send_IPI_mask = uv_send_IPI_mask,

View File

@ -144,7 +144,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
if (err != 0)
return err;
err = apic->cpu_mask_to_apicid(eligible_cpu, &dest);
err = apic->cpu_mask_to_apicid_and(eligible_cpu, eligible_cpu, &dest);
if (err != 0)
return err;