cpumask: IA64: Introduce cpumask_of_{node,pcibus} to replace {node,pcibus}_to_cpumask

Impact: New APIs

The old node_to_cpumask/node_to_pcibus returned a cpumask_t: these
return a pointer to a struct cpumask.  Part of removing cpumasks from
the stack.

We can also use the new for_each_cpu_and() to avoid a temporary cpumask,
and a gratuitous test in sn_topology_show.

(Includes fix from KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>)

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
This commit is contained in:
Rusty Russell 2008-12-26 22:23:40 +10:30
parent 86c6f274f5
commit fbb776c3ca
4 changed files with 30 additions and 29 deletions

View File

@ -34,6 +34,7 @@
* Returns a bitmask of CPUs on Node 'node'. * Returns a bitmask of CPUs on Node 'node'.
*/ */
#define node_to_cpumask(node) (node_to_cpu_mask[node]) #define node_to_cpumask(node) (node_to_cpu_mask[node])
#define cpumask_of_node(node) (&node_to_cpu_mask[node])
/* /*
* Returns the number of the node containing Node 'nid'. * Returns the number of the node containing Node 'nid'.
@ -45,7 +46,7 @@
/* /*
* Returns the number of the first CPU on Node 'node'. * Returns the number of the first CPU on Node 'node'.
*/ */
#define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node))) #define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node)))
/* /*
* Determines the node for a given pci bus * Determines the node for a given pci bus
@ -121,6 +122,10 @@ extern void arch_fix_phys_package_id(int num, u32 slot);
node_to_cpumask(pcibus_to_node(bus)) \ node_to_cpumask(pcibus_to_node(bus)) \
) )
#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
cpu_all_mask : \
cpumask_from_node(pcibus_to_node(bus)))
#include <asm-generic/topology.h> #include <asm-generic/topology.h>
#endif /* _ASM_IA64_TOPOLOGY_H */ #endif /* _ASM_IA64_TOPOLOGY_H */

View File

@ -1001,7 +1001,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
node = pxm_to_node(pxm); node = pxm_to_node(pxm);
if (node >= MAX_NUMNODES || !node_online(node) || if (node >= MAX_NUMNODES || !node_online(node) ||
cpus_empty(node_to_cpumask(node))) cpumask_empty(cpumask_of_node(node)))
return AE_OK; return AE_OK;
/* We know a gsi to node mapping! */ /* We know a gsi to node mapping! */

View File

@ -695,32 +695,31 @@ get_target_cpu (unsigned int gsi, int irq)
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
{ {
int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
cpumask_t cpu_mask; const struct cpumask *cpu_mask;
iosapic_index = find_iosapic(gsi); iosapic_index = find_iosapic(gsi);
if (iosapic_index < 0 || if (iosapic_index < 0 ||
iosapic_lists[iosapic_index].node == MAX_NUMNODES) iosapic_lists[iosapic_index].node == MAX_NUMNODES)
goto skip_numa_setup; goto skip_numa_setup;
cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node); cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node);
cpus_and(cpu_mask, cpu_mask, domain); num_cpus = 0;
for_each_cpu_mask(numa_cpu, cpu_mask) { for_each_cpu_and(numa_cpu, cpu_mask, &domain) {
if (!cpu_online(numa_cpu)) if (cpu_online(numa_cpu))
cpu_clear(numa_cpu, cpu_mask); num_cpus++;
} }
num_cpus = cpus_weight(cpu_mask);
if (!num_cpus) if (!num_cpus)
goto skip_numa_setup; goto skip_numa_setup;
/* Use irq assignment to distribute across cpus in node */ /* Use irq assignment to distribute across cpus in node */
cpu_index = irq % num_cpus; cpu_index = irq % num_cpus;
for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++) for_each_cpu_and(numa_cpu, cpu_mask, &domain)
numa_cpu = next_cpu(numa_cpu, cpu_mask); if (cpu_online(numa_cpu) && i++ >= cpu_index)
break;
if (numa_cpu != NR_CPUS) if (numa_cpu < nr_cpu_ids)
return cpu_physical_id(numa_cpu); return cpu_physical_id(numa_cpu);
} }
skip_numa_setup: skip_numa_setup:
@ -731,7 +730,7 @@ skip_numa_setup:
* case of NUMA.) * case of NUMA.)
*/ */
do { do {
if (++cpu >= NR_CPUS) if (++cpu >= nr_cpu_ids)
cpu = 0; cpu = 0;
} while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); } while (!cpu_online(cpu) || !cpu_isset(cpu, domain));

View File

@ -385,7 +385,6 @@ static int sn_topology_show(struct seq_file *s, void *d)
int j; int j;
const char *slabname; const char *slabname;
int ordinal; int ordinal;
cpumask_t cpumask;
char slice; char slice;
struct cpuinfo_ia64 *c; struct cpuinfo_ia64 *c;
struct sn_hwperf_port_info *ptdata; struct sn_hwperf_port_info *ptdata;
@ -473,23 +472,21 @@ static int sn_topology_show(struct seq_file *s, void *d)
* CPUs on this node, if any * CPUs on this node, if any
*/ */
if (!SN_HWPERF_IS_IONODE(obj)) { if (!SN_HWPERF_IS_IONODE(obj)) {
cpumask = node_to_cpumask(ordinal); for_each_cpu_and(i, cpu_online_mask,
for_each_online_cpu(i) { cpumask_of_node(ordinal)) {
if (cpu_isset(i, cpumask)) { slice = 'a' + cpuid_to_slice(i);
slice = 'a' + cpuid_to_slice(i); c = cpu_data(i);
c = cpu_data(i); seq_printf(s, "cpu %d %s%c local"
seq_printf(s, "cpu %d %s%c local" " freq %luMHz, arch ia64",
" freq %luMHz, arch ia64", i, obj->location, slice,
i, obj->location, slice, c->proc_freq / 1000000);
c->proc_freq / 1000000); for_each_online_cpu(j) {
for_each_online_cpu(j) { seq_printf(s, j ? ":%d" : ", dist %d",
seq_printf(s, j ? ":%d" : ", dist %d", node_distance(
node_distance(
cpu_to_node(i), cpu_to_node(i),
cpu_to_node(j))); cpu_to_node(j)));
}
seq_putc(s, '\n');
} }
seq_putc(s, '\n');
} }
} }
} }