forked from Minki/linux
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull slab update from Pekka Enberg: "Highlights: - Fix for boot-time problems on some architectures due to init_lock_keys() not respecting kmalloc_caches boundaries (Christoph Lameter) - CONFIG_SLUB_CPU_PARTIAL requested by RT folks (Joonsoo Kim) - Fix for excessive slab freelist draining (Wanpeng Li) - SLUB and SLOB cleanups and fixes (various people)" I ended up editing the branch, and this avoids two commits at the end that were immediately reverted, and I instead just applied the oneliner fix in between myself. * 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux slub: Check for page NULL before doing the node_match check mm/slab: Give s_next and s_stop slab-specific names slob: Check for NULL pointer before calling ctor() slub: Make cpu partial slab support configurable slab: add kmalloc() to kernel API documentation slab: fix init_lock_keys slob: use DIV_ROUND_UP where possible slub: do not put a slab to cpu partial list when cpu_partial is 0 mm/slub: Use node_nr_slabs and node_nr_objs in get_slabinfo mm/slub: Drop unnecessary nr_partials mm/slab: Fix /proc/slabinfo unwriteable for slab mm/slab: Sharing s_next and s_stop between slab and slub mm/slab: Fix drain freelist excessively slob: Rework #ifdeffery in slab.h mm, slab: moved kmem_cache_alloc_node comment to correct place
This commit is contained in:
commit
54be820019
@ -169,11 +169,7 @@ struct kmem_cache {
|
|||||||
struct list_head list; /* List of all slab caches on the system */
|
struct list_head list; /* List of all slab caches on the system */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define KMALLOC_MAX_SIZE (1UL << 30)
|
#endif /* CONFIG_SLOB */
|
||||||
|
|
||||||
#include <linux/slob_def.h>
|
|
||||||
|
|
||||||
#else /* CONFIG_SLOB */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Kmalloc array related definitions
|
* Kmalloc array related definitions
|
||||||
@ -195,7 +191,9 @@ struct kmem_cache {
|
|||||||
#ifndef KMALLOC_SHIFT_LOW
|
#ifndef KMALLOC_SHIFT_LOW
|
||||||
#define KMALLOC_SHIFT_LOW 5
|
#define KMALLOC_SHIFT_LOW 5
|
||||||
#endif
|
#endif
|
||||||
#else
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_SLUB
|
||||||
/*
|
/*
|
||||||
* SLUB allocates up to order 2 pages directly and otherwise
|
* SLUB allocates up to order 2 pages directly and otherwise
|
||||||
* passes the request to the page allocator.
|
* passes the request to the page allocator.
|
||||||
@ -207,6 +205,19 @@ struct kmem_cache {
|
|||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_SLOB
|
||||||
|
/*
|
||||||
|
* SLOB passes all page size and larger requests to the page allocator.
|
||||||
|
* No kmalloc array is necessary since objects of different sizes can
|
||||||
|
* be allocated from the same page.
|
||||||
|
*/
|
||||||
|
#define KMALLOC_SHIFT_MAX 30
|
||||||
|
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
|
||||||
|
#ifndef KMALLOC_SHIFT_LOW
|
||||||
|
#define KMALLOC_SHIFT_LOW 3
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Maximum allocatable size */
|
/* Maximum allocatable size */
|
||||||
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
|
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
|
||||||
/* Maximum size for which we actually use a slab cache */
|
/* Maximum size for which we actually use a slab cache */
|
||||||
@ -221,6 +232,7 @@ struct kmem_cache {
|
|||||||
#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
|
#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef CONFIG_SLOB
|
||||||
extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
|
extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
|
||||||
#ifdef CONFIG_ZONE_DMA
|
#ifdef CONFIG_ZONE_DMA
|
||||||
extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
|
extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
|
||||||
@ -275,13 +287,18 @@ static __always_inline int kmalloc_index(size_t size)
|
|||||||
/* Will never be reached. Needed because the compiler may complain */
|
/* Will never be reached. Needed because the compiler may complain */
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
#endif /* !CONFIG_SLOB */
|
||||||
|
|
||||||
#ifdef CONFIG_SLAB
|
#ifdef CONFIG_SLAB
|
||||||
#include <linux/slab_def.h>
|
#include <linux/slab_def.h>
|
||||||
#elif defined(CONFIG_SLUB)
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_SLUB
|
||||||
#include <linux/slub_def.h>
|
#include <linux/slub_def.h>
|
||||||
#else
|
#endif
|
||||||
#error "Unknown slab allocator"
|
|
||||||
|
#ifdef CONFIG_SLOB
|
||||||
|
#include <linux/slob_def.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -291,6 +308,7 @@ static __always_inline int kmalloc_index(size_t size)
|
|||||||
*/
|
*/
|
||||||
static __always_inline int kmalloc_size(int n)
|
static __always_inline int kmalloc_size(int n)
|
||||||
{
|
{
|
||||||
|
#ifndef CONFIG_SLOB
|
||||||
if (n > 2)
|
if (n > 2)
|
||||||
return 1 << n;
|
return 1 << n;
|
||||||
|
|
||||||
@ -299,10 +317,9 @@ static __always_inline int kmalloc_size(int n)
|
|||||||
|
|
||||||
if (n == 2 && KMALLOC_MIN_SIZE <= 64)
|
if (n == 2 && KMALLOC_MIN_SIZE <= 64)
|
||||||
return 192;
|
return 192;
|
||||||
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif /* !CONFIG_SLOB */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
|
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
|
||||||
@ -356,9 +373,8 @@ int cache_show(struct kmem_cache *s, struct seq_file *m);
|
|||||||
void print_slabinfo_header(struct seq_file *m);
|
void print_slabinfo_header(struct seq_file *m);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kmalloc_array - allocate memory for an array.
|
* kmalloc - allocate memory
|
||||||
* @n: number of elements.
|
* @size: how many bytes of memory are required.
|
||||||
* @size: element size.
|
|
||||||
* @flags: the type of memory to allocate.
|
* @flags: the type of memory to allocate.
|
||||||
*
|
*
|
||||||
* The @flags argument may be one of:
|
* The @flags argument may be one of:
|
||||||
@ -405,6 +421,17 @@ void print_slabinfo_header(struct seq_file *m);
|
|||||||
* There are other flags available as well, but these are not intended
|
* There are other flags available as well, but these are not intended
|
||||||
* for general use, and so are not documented here. For a full list of
|
* for general use, and so are not documented here. For a full list of
|
||||||
* potential flags, always refer to linux/gfp.h.
|
* potential flags, always refer to linux/gfp.h.
|
||||||
|
*
|
||||||
|
* kmalloc is the normal method of allocating memory
|
||||||
|
* in the kernel.
|
||||||
|
*/
|
||||||
|
static __always_inline void *kmalloc(size_t size, gfp_t flags);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kmalloc_array - allocate memory for an array.
|
||||||
|
* @n: number of elements.
|
||||||
|
* @size: element size.
|
||||||
|
* @flags: the type of memory to allocate (see kmalloc).
|
||||||
*/
|
*/
|
||||||
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
|
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
|
||||||
{
|
{
|
||||||
@ -428,7 +455,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
|
|||||||
/**
|
/**
|
||||||
* kmalloc_node - allocate memory from a specific node
|
* kmalloc_node - allocate memory from a specific node
|
||||||
* @size: how many bytes of memory are required.
|
* @size: how many bytes of memory are required.
|
||||||
* @flags: the type of memory to allocate (see kcalloc).
|
* @flags: the type of memory to allocate (see kmalloc).
|
||||||
* @node: node to allocate from.
|
* @node: node to allocate from.
|
||||||
*
|
*
|
||||||
* kmalloc() for non-local nodes, used to allocate from a specific node
|
* kmalloc() for non-local nodes, used to allocate from a specific node
|
||||||
|
@ -18,14 +18,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
|||||||
return __kmalloc_node(size, flags, node);
|
return __kmalloc_node(size, flags, node);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* kmalloc - allocate memory
|
|
||||||
* @size: how many bytes of memory are required.
|
|
||||||
* @flags: the type of memory to allocate (see kcalloc).
|
|
||||||
*
|
|
||||||
* kmalloc is the normal method of allocating memory
|
|
||||||
* in the kernel.
|
|
||||||
*/
|
|
||||||
static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
||||||
{
|
{
|
||||||
return __kmalloc_node(size, flags, NUMA_NO_NODE);
|
return __kmalloc_node(size, flags, NUMA_NO_NODE);
|
||||||
|
11
init/Kconfig
11
init/Kconfig
@ -1596,6 +1596,17 @@ config SLOB
|
|||||||
|
|
||||||
endchoice
|
endchoice
|
||||||
|
|
||||||
|
config SLUB_CPU_PARTIAL
|
||||||
|
default y
|
||||||
|
depends on SLUB
|
||||||
|
bool "SLUB per cpu partial cache"
|
||||||
|
help
|
||||||
|
Per cpu partial caches accellerate objects allocation and freeing
|
||||||
|
that is local to a processor at the price of more indeterminism
|
||||||
|
in the latency of the free. On overflow these caches will be cleared
|
||||||
|
which requires the taking of locks that may cause latency spikes.
|
||||||
|
Typically one would choose no for a realtime system.
|
||||||
|
|
||||||
config MMAP_ALLOW_UNINITIALIZED
|
config MMAP_ALLOW_UNINITIALIZED
|
||||||
bool "Allow mmapped anonymous memory to be uninitialized"
|
bool "Allow mmapped anonymous memory to be uninitialized"
|
||||||
depends on EXPERT && !MMU
|
depends on EXPERT && !MMU
|
||||||
|
51
mm/slab.c
51
mm/slab.c
@ -565,7 +565,7 @@ static void init_node_lock_keys(int q)
|
|||||||
if (slab_state < UP)
|
if (slab_state < UP)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
|
for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) {
|
||||||
struct kmem_cache_node *n;
|
struct kmem_cache_node *n;
|
||||||
struct kmem_cache *cache = kmalloc_caches[i];
|
struct kmem_cache *cache = kmalloc_caches[i];
|
||||||
|
|
||||||
@ -1180,6 +1180,12 @@ static int init_cache_node_node(int node)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int slabs_tofree(struct kmem_cache *cachep,
|
||||||
|
struct kmem_cache_node *n)
|
||||||
|
{
|
||||||
|
return (n->free_objects + cachep->num - 1) / cachep->num;
|
||||||
|
}
|
||||||
|
|
||||||
static void __cpuinit cpuup_canceled(long cpu)
|
static void __cpuinit cpuup_canceled(long cpu)
|
||||||
{
|
{
|
||||||
struct kmem_cache *cachep;
|
struct kmem_cache *cachep;
|
||||||
@ -1241,7 +1247,7 @@ free_array_cache:
|
|||||||
n = cachep->node[node];
|
n = cachep->node[node];
|
||||||
if (!n)
|
if (!n)
|
||||||
continue;
|
continue;
|
||||||
drain_freelist(cachep, n, n->free_objects);
|
drain_freelist(cachep, n, slabs_tofree(cachep, n));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1408,7 +1414,7 @@ static int __meminit drain_cache_node_node(int node)
|
|||||||
if (!n)
|
if (!n)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
drain_freelist(cachep, n, n->free_objects);
|
drain_freelist(cachep, n, slabs_tofree(cachep, n));
|
||||||
|
|
||||||
if (!list_empty(&n->slabs_full) ||
|
if (!list_empty(&n->slabs_full) ||
|
||||||
!list_empty(&n->slabs_partial)) {
|
!list_empty(&n->slabs_partial)) {
|
||||||
@ -2532,7 +2538,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
|
|||||||
if (!n)
|
if (!n)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
drain_freelist(cachep, n, n->free_objects);
|
drain_freelist(cachep, n, slabs_tofree(cachep, n));
|
||||||
|
|
||||||
ret += !list_empty(&n->slabs_full) ||
|
ret += !list_empty(&n->slabs_full) ||
|
||||||
!list_empty(&n->slabs_partial);
|
!list_empty(&n->slabs_partial);
|
||||||
@ -3338,18 +3344,6 @@ done:
|
|||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* kmem_cache_alloc_node - Allocate an object on the specified node
|
|
||||||
* @cachep: The cache to allocate from.
|
|
||||||
* @flags: See kmalloc().
|
|
||||||
* @nodeid: node number of the target node.
|
|
||||||
* @caller: return address of caller, used for debug information
|
|
||||||
*
|
|
||||||
* Identical to kmem_cache_alloc but it will allocate memory on the given
|
|
||||||
* node, which can improve the performance for cpu bound structures.
|
|
||||||
*
|
|
||||||
* Fallback to other node is possible if __GFP_THISNODE is not set.
|
|
||||||
*/
|
|
||||||
static __always_inline void *
|
static __always_inline void *
|
||||||
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
|
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
|
||||||
unsigned long caller)
|
unsigned long caller)
|
||||||
@ -3643,6 +3637,17 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
|
/**
|
||||||
|
* kmem_cache_alloc_node - Allocate an object on the specified node
|
||||||
|
* @cachep: The cache to allocate from.
|
||||||
|
* @flags: See kmalloc().
|
||||||
|
* @nodeid: node number of the target node.
|
||||||
|
*
|
||||||
|
* Identical to kmem_cache_alloc but it will allocate memory on the given
|
||||||
|
* node, which can improve the performance for cpu bound structures.
|
||||||
|
*
|
||||||
|
* Fallback to other node is possible if __GFP_THISNODE is not set.
|
||||||
|
*/
|
||||||
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
|
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
|
||||||
{
|
{
|
||||||
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
|
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
|
||||||
@ -4431,20 +4436,10 @@ static int leaks_show(struct seq_file *m, void *p)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
|
|
||||||
{
|
|
||||||
return seq_list_next(p, &slab_caches, pos);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void s_stop(struct seq_file *m, void *p)
|
|
||||||
{
|
|
||||||
mutex_unlock(&slab_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct seq_operations slabstats_op = {
|
static const struct seq_operations slabstats_op = {
|
||||||
.start = leaks_start,
|
.start = leaks_start,
|
||||||
.next = s_next,
|
.next = slab_next,
|
||||||
.stop = s_stop,
|
.stop = slab_stop,
|
||||||
.show = leaks_show,
|
.show = leaks_show,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -271,3 +271,6 @@ struct kmem_cache_node {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void *slab_next(struct seq_file *m, void *p, loff_t *pos);
|
||||||
|
void slab_stop(struct seq_file *m, void *p);
|
||||||
|
@ -497,6 +497,13 @@ void __init create_kmalloc_caches(unsigned long flags)
|
|||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_SLABINFO
|
#ifdef CONFIG_SLABINFO
|
||||||
|
|
||||||
|
#ifdef CONFIG_SLAB
|
||||||
|
#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
|
||||||
|
#else
|
||||||
|
#define SLABINFO_RIGHTS S_IRUSR
|
||||||
|
#endif
|
||||||
|
|
||||||
void print_slabinfo_header(struct seq_file *m)
|
void print_slabinfo_header(struct seq_file *m)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -531,12 +538,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
|
|||||||
return seq_list_start(&slab_caches, *pos);
|
return seq_list_start(&slab_caches, *pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
|
void *slab_next(struct seq_file *m, void *p, loff_t *pos)
|
||||||
{
|
{
|
||||||
return seq_list_next(p, &slab_caches, pos);
|
return seq_list_next(p, &slab_caches, pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void s_stop(struct seq_file *m, void *p)
|
void slab_stop(struct seq_file *m, void *p)
|
||||||
{
|
{
|
||||||
mutex_unlock(&slab_mutex);
|
mutex_unlock(&slab_mutex);
|
||||||
}
|
}
|
||||||
@ -613,8 +620,8 @@ static int s_show(struct seq_file *m, void *p)
|
|||||||
*/
|
*/
|
||||||
static const struct seq_operations slabinfo_op = {
|
static const struct seq_operations slabinfo_op = {
|
||||||
.start = s_start,
|
.start = s_start,
|
||||||
.next = s_next,
|
.next = slab_next,
|
||||||
.stop = s_stop,
|
.stop = slab_stop,
|
||||||
.show = s_show,
|
.show = s_show,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -633,7 +640,8 @@ static const struct file_operations proc_slabinfo_operations = {
|
|||||||
|
|
||||||
static int __init slab_proc_init(void)
|
static int __init slab_proc_init(void)
|
||||||
{
|
{
|
||||||
proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
|
proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
|
||||||
|
&proc_slabinfo_operations);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
module_init(slab_proc_init);
|
module_init(slab_proc_init);
|
||||||
|
@ -122,7 +122,7 @@ static inline void clear_slob_page_free(struct page *sp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define SLOB_UNIT sizeof(slob_t)
|
#define SLOB_UNIT sizeof(slob_t)
|
||||||
#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
|
#define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* struct slob_rcu is inserted at the tail of allocated slob blocks, which
|
* struct slob_rcu is inserted at the tail of allocated slob blocks, which
|
||||||
@ -554,7 +554,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
|
|||||||
flags, node);
|
flags, node);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (c->ctor)
|
if (b && c->ctor)
|
||||||
c->ctor(b);
|
c->ctor(b);
|
||||||
|
|
||||||
kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
|
kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
|
||||||
|
38
mm/slub.c
38
mm/slub.c
@ -123,6 +123,15 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_SLUB_CPU_PARTIAL
|
||||||
|
return !kmem_cache_debug(s);
|
||||||
|
#else
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Issues still to be resolved:
|
* Issues still to be resolved:
|
||||||
*
|
*
|
||||||
@ -1573,7 +1582,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
|
|||||||
put_cpu_partial(s, page, 0);
|
put_cpu_partial(s, page, 0);
|
||||||
stat(s, CPU_PARTIAL_NODE);
|
stat(s, CPU_PARTIAL_NODE);
|
||||||
}
|
}
|
||||||
if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
|
if (!kmem_cache_has_cpu_partial(s)
|
||||||
|
|| available > s->cpu_partial / 2)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -1884,6 +1894,7 @@ redo:
|
|||||||
static void unfreeze_partials(struct kmem_cache *s,
|
static void unfreeze_partials(struct kmem_cache *s,
|
||||||
struct kmem_cache_cpu *c)
|
struct kmem_cache_cpu *c)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_SLUB_CPU_PARTIAL
|
||||||
struct kmem_cache_node *n = NULL, *n2 = NULL;
|
struct kmem_cache_node *n = NULL, *n2 = NULL;
|
||||||
struct page *page, *discard_page = NULL;
|
struct page *page, *discard_page = NULL;
|
||||||
|
|
||||||
@ -1938,6 +1949,7 @@ static void unfreeze_partials(struct kmem_cache *s,
|
|||||||
discard_slab(s, page);
|
discard_slab(s, page);
|
||||||
stat(s, FREE_SLAB);
|
stat(s, FREE_SLAB);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1951,10 +1963,14 @@ static void unfreeze_partials(struct kmem_cache *s,
|
|||||||
*/
|
*/
|
||||||
static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
|
static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_SLUB_CPU_PARTIAL
|
||||||
struct page *oldpage;
|
struct page *oldpage;
|
||||||
int pages;
|
int pages;
|
||||||
int pobjects;
|
int pobjects;
|
||||||
|
|
||||||
|
if (!s->cpu_partial)
|
||||||
|
return;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
pages = 0;
|
pages = 0;
|
||||||
pobjects = 0;
|
pobjects = 0;
|
||||||
@ -1987,6 +2003,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
|
|||||||
page->next = oldpage;
|
page->next = oldpage;
|
||||||
|
|
||||||
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
|
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
|
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
|
||||||
@ -2358,7 +2375,7 @@ redo:
|
|||||||
|
|
||||||
object = c->freelist;
|
object = c->freelist;
|
||||||
page = c->page;
|
page = c->page;
|
||||||
if (unlikely(!object || !node_match(page, node)))
|
if (unlikely(!object || !page || !node_match(page, node)))
|
||||||
object = __slab_alloc(s, gfpflags, node, addr, c);
|
object = __slab_alloc(s, gfpflags, node, addr, c);
|
||||||
|
|
||||||
else {
|
else {
|
||||||
@ -2495,7 +2512,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|||||||
new.inuse--;
|
new.inuse--;
|
||||||
if ((!new.inuse || !prior) && !was_frozen) {
|
if ((!new.inuse || !prior) && !was_frozen) {
|
||||||
|
|
||||||
if (!kmem_cache_debug(s) && !prior)
|
if (kmem_cache_has_cpu_partial(s) && !prior)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Slab was on no list before and will be partially empty
|
* Slab was on no list before and will be partially empty
|
||||||
@ -2550,8 +2567,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|||||||
* Objects left in the slab. If it was not on the partial list before
|
* Objects left in the slab. If it was not on the partial list before
|
||||||
* then add it.
|
* then add it.
|
||||||
*/
|
*/
|
||||||
if (kmem_cache_debug(s) && unlikely(!prior)) {
|
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
|
||||||
remove_full(s, page);
|
if (kmem_cache_debug(s))
|
||||||
|
remove_full(s, page);
|
||||||
add_partial(n, page, DEACTIVATE_TO_TAIL);
|
add_partial(n, page, DEACTIVATE_TO_TAIL);
|
||||||
stat(s, FREE_ADD_PARTIAL);
|
stat(s, FREE_ADD_PARTIAL);
|
||||||
}
|
}
|
||||||
@ -3059,7 +3077,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
|
|||||||
* per node list when we run out of per cpu objects. We only fetch 50%
|
* per node list when we run out of per cpu objects. We only fetch 50%
|
||||||
* to keep some capacity around for frees.
|
* to keep some capacity around for frees.
|
||||||
*/
|
*/
|
||||||
if (kmem_cache_debug(s))
|
if (!kmem_cache_has_cpu_partial(s))
|
||||||
s->cpu_partial = 0;
|
s->cpu_partial = 0;
|
||||||
else if (s->size >= PAGE_SIZE)
|
else if (s->size >= PAGE_SIZE)
|
||||||
s->cpu_partial = 2;
|
s->cpu_partial = 2;
|
||||||
@ -4456,7 +4474,7 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
|
|||||||
err = strict_strtoul(buf, 10, &objects);
|
err = strict_strtoul(buf, 10, &objects);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
if (objects && kmem_cache_debug(s))
|
if (objects && !kmem_cache_has_cpu_partial(s))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
s->cpu_partial = objects;
|
s->cpu_partial = objects;
|
||||||
@ -5269,7 +5287,6 @@ __initcall(slab_sysfs_init);
|
|||||||
#ifdef CONFIG_SLABINFO
|
#ifdef CONFIG_SLABINFO
|
||||||
void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
|
void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
|
||||||
{
|
{
|
||||||
unsigned long nr_partials = 0;
|
|
||||||
unsigned long nr_slabs = 0;
|
unsigned long nr_slabs = 0;
|
||||||
unsigned long nr_objs = 0;
|
unsigned long nr_objs = 0;
|
||||||
unsigned long nr_free = 0;
|
unsigned long nr_free = 0;
|
||||||
@ -5281,9 +5298,8 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
|
|||||||
if (!n)
|
if (!n)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
nr_partials += n->nr_partial;
|
nr_slabs += node_nr_slabs(n);
|
||||||
nr_slabs += atomic_long_read(&n->nr_slabs);
|
nr_objs += node_nr_objs(n);
|
||||||
nr_objs += atomic_long_read(&n->total_objects);
|
|
||||||
nr_free += count_partial(n, count_free);
|
nr_free += count_partial(n, count_free);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user