numa: make "nr_node_ids" unsigned int
Number of NUMA nodes can't be negative. This saves a few bytes on x86_64: add/remove: 0/0 grow/shrink: 4/21 up/down: 27/-265 (-238) Function old new delta hv_synic_alloc.cold 88 110 +22 prealloc_shrinker 260 262 +2 bootstrap 249 251 +2 sched_init_numa 1566 1567 +1 show_slab_objects 778 777 -1 s_show 1201 1200 -1 kmem_cache_init 346 345 -1 __alloc_workqueue_key 1146 1145 -1 mem_cgroup_css_alloc 1614 1612 -2 __do_sys_swapon 4702 4699 -3 __list_lru_init 655 651 -4 nic_probe 2379 2374 -5 store_user_store 118 111 -7 red_zone_store 106 99 -7 poison_store 106 99 -7 wq_numa_init 348 338 -10 __kmem_cache_empty 75 65 -10 task_numa_free 186 173 -13 merge_across_nodes_store 351 336 -15 irq_create_affinity_masks 1261 1246 -15 do_numa_crng_init 343 321 -22 task_numa_fault 4760 4737 -23 swapfile_init 179 156 -23 hv_synic_alloc 536 492 -44 apply_wqattrs_prepare 746 695 -51 Link: http://lkml.kernel.org/r/20190201223029.GA15820@avx2 Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d342a0b386
commit
b9726c26dc
@ -120,7 +120,7 @@ static void __init setup_node_to_cpumask_map(void)
|
||||
}
|
||||
|
||||
/* cpumask_of_node() will now work */
|
||||
pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
|
||||
pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -84,7 +84,7 @@ static void __init setup_node_to_cpumask_map(void)
|
||||
alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
|
||||
|
||||
/* cpumask_of_node() will now work */
|
||||
dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
|
||||
dbg("Node to cpumask map for %u nodes\n", nr_node_ids);
|
||||
}
|
||||
|
||||
static int __init fake_numa_create_new_node(unsigned long end_pfn,
|
||||
|
@ -171,7 +171,7 @@ void __init setup_per_cpu_areas(void)
|
||||
unsigned long delta;
|
||||
int rc;
|
||||
|
||||
pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%d\n",
|
||||
pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%u\n",
|
||||
NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
|
||||
|
||||
/*
|
||||
|
@ -123,7 +123,7 @@ void __init setup_node_to_cpumask_map(void)
|
||||
alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
|
||||
|
||||
/* cpumask_of_node() will now work */
|
||||
pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
|
||||
pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
|
||||
}
|
||||
|
||||
static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
|
||||
@ -866,7 +866,7 @@ const struct cpumask *cpumask_of_node(int node)
|
||||
{
|
||||
if (node >= nr_node_ids) {
|
||||
printk(KERN_WARNING
|
||||
"cpumask_of_node(%d): node > nr_node_ids(%d)\n",
|
||||
"cpumask_of_node(%d): node > nr_node_ids(%u)\n",
|
||||
node, nr_node_ids);
|
||||
dump_stack();
|
||||
return cpu_none_mask;
|
||||
|
@ -444,7 +444,7 @@ static inline int next_memory_node(int nid)
|
||||
return next_node(nid, node_states[N_MEMORY]);
|
||||
}
|
||||
|
||||
extern int nr_node_ids;
|
||||
extern unsigned int nr_node_ids;
|
||||
extern int nr_online_nodes;
|
||||
|
||||
static inline void node_set_online(int nid)
|
||||
@ -485,7 +485,7 @@ static inline int num_node_state(enum node_states state)
|
||||
#define first_online_node 0
|
||||
#define first_memory_node 0
|
||||
#define next_online_node(nid) (MAX_NUMNODES)
|
||||
#define nr_node_ids 1
|
||||
#define nr_node_ids 1U
|
||||
#define nr_online_nodes 1
|
||||
|
||||
#define node_set_online(node) node_set_state((node), N_ONLINE)
|
||||
|
@ -601,7 +601,6 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
|
||||
struct lock_class_key *key, struct shrinker *shrinker)
|
||||
{
|
||||
int i;
|
||||
size_t size = sizeof(*lru->node) * nr_node_ids;
|
||||
int err = -ENOMEM;
|
||||
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
@ -612,7 +611,7 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
|
||||
#endif
|
||||
memcg_get_cache_ids();
|
||||
|
||||
lru->node = kzalloc(size, GFP_KERNEL);
|
||||
lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
|
||||
if (!lru->node)
|
||||
goto out;
|
||||
|
||||
|
@ -4429,7 +4429,7 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)
|
||||
static struct mem_cgroup *mem_cgroup_alloc(void)
|
||||
{
|
||||
struct mem_cgroup *memcg;
|
||||
size_t size;
|
||||
unsigned int size;
|
||||
int node;
|
||||
|
||||
size = sizeof(struct mem_cgroup);
|
||||
|
@ -289,7 +289,7 @@ EXPORT_SYMBOL(movable_zone);
|
||||
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
||||
|
||||
#if MAX_NUMNODES > 1
|
||||
int nr_node_ids __read_mostly = MAX_NUMNODES;
|
||||
unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
|
||||
int nr_online_nodes __read_mostly = 1;
|
||||
EXPORT_SYMBOL(nr_node_ids);
|
||||
EXPORT_SYMBOL(nr_online_nodes);
|
||||
|
@ -677,12 +677,11 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries,
|
||||
static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
|
||||
{
|
||||
struct alien_cache **alc_ptr;
|
||||
size_t memsize = sizeof(void *) * nr_node_ids;
|
||||
int i;
|
||||
|
||||
if (limit > 1)
|
||||
limit = 12;
|
||||
alc_ptr = kzalloc_node(memsize, gfp, node);
|
||||
alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
|
||||
if (!alc_ptr)
|
||||
return NULL;
|
||||
|
||||
|
@ -4262,7 +4262,7 @@ void __init kmem_cache_init(void)
|
||||
cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
|
||||
slub_cpu_dead);
|
||||
|
||||
pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%d\n",
|
||||
pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
|
||||
cache_line_size(),
|
||||
slub_min_order, slub_max_order, slub_min_objects,
|
||||
nr_cpu_ids, nr_node_ids);
|
||||
|
@ -2713,7 +2713,7 @@ static struct swap_info_struct *alloc_swap_info(void)
|
||||
struct swap_info_struct *p;
|
||||
unsigned int type;
|
||||
int i;
|
||||
int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node);
|
||||
unsigned int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node);
|
||||
|
||||
p = kvzalloc(size, GFP_KERNEL);
|
||||
if (!p)
|
||||
|
@ -374,7 +374,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone
|
||||
*/
|
||||
int prealloc_shrinker(struct shrinker *shrinker)
|
||||
{
|
||||
size_t size = sizeof(*shrinker->nr_deferred);
|
||||
unsigned int size = sizeof(*shrinker->nr_deferred);
|
||||
|
||||
if (shrinker->flags & SHRINKER_NUMA_AWARE)
|
||||
size *= nr_node_ids;
|
||||
|
Loading…
Reference in New Issue
Block a user