mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 12:52:30 +00:00
mm/slab: factor out kmem_cache_node initialization code
It can be reused on other place, so factor out it. Following patch will use it. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a5aa63a5f7
commit
ded0ecf611
74
mm/slab.c
74
mm/slab.c
@ -848,6 +848,46 @@ static inline gfp_t gfp_exact_node(gfp_t flags)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
|
||||
{
|
||||
struct kmem_cache_node *n;
|
||||
|
||||
/*
|
||||
* Set up the kmem_cache_node for cpu before we can
|
||||
* begin anything. Make sure some other cpu on this
|
||||
* node has not already allocated this
|
||||
*/
|
||||
n = get_node(cachep, node);
|
||||
if (n) {
|
||||
spin_lock_irq(&n->list_lock);
|
||||
n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
|
||||
cachep->num;
|
||||
spin_unlock_irq(&n->list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
|
||||
if (!n)
|
||||
return -ENOMEM;
|
||||
|
||||
kmem_cache_node_init(n);
|
||||
n->next_reap = jiffies + REAPTIMEOUT_NODE +
|
||||
((unsigned long)cachep) % REAPTIMEOUT_NODE;
|
||||
|
||||
n->free_limit =
|
||||
(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
|
||||
|
||||
/*
|
||||
* The kmem_cache_nodes don't come and go as CPUs
|
||||
* come and go. slab_mutex is sufficient
|
||||
* protection here.
|
||||
*/
|
||||
cachep->node[node] = n;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocates and initializes node for a node on each slab cache, used for
|
||||
* either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
|
||||
@ -859,39 +899,15 @@ static inline gfp_t gfp_exact_node(gfp_t flags)
|
||||
*/
|
||||
static int init_cache_node_node(int node)
|
||||
{
|
||||
int ret;
|
||||
struct kmem_cache *cachep;
|
||||
struct kmem_cache_node *n;
|
||||
const size_t memsize = sizeof(struct kmem_cache_node);
|
||||
|
||||
list_for_each_entry(cachep, &slab_caches, list) {
|
||||
/*
|
||||
* Set up the kmem_cache_node for cpu before we can
|
||||
* begin anything. Make sure some other cpu on this
|
||||
* node has not already allocated this
|
||||
*/
|
||||
n = get_node(cachep, node);
|
||||
if (!n) {
|
||||
n = kmalloc_node(memsize, GFP_KERNEL, node);
|
||||
if (!n)
|
||||
return -ENOMEM;
|
||||
kmem_cache_node_init(n);
|
||||
n->next_reap = jiffies + REAPTIMEOUT_NODE +
|
||||
((unsigned long)cachep) % REAPTIMEOUT_NODE;
|
||||
|
||||
/*
|
||||
* The kmem_cache_nodes don't come and go as CPUs
|
||||
* come and go. slab_mutex is sufficient
|
||||
* protection here.
|
||||
*/
|
||||
cachep->node[node] = n;
|
||||
}
|
||||
|
||||
spin_lock_irq(&n->list_lock);
|
||||
n->free_limit =
|
||||
(1 + nr_cpus_node(node)) *
|
||||
cachep->batchcount + cachep->num;
|
||||
spin_unlock_irq(&n->list_lock);
|
||||
ret = init_cache_node(cachep, node, GFP_KERNEL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user