slub: Use page variable instead of c->page.

Store the value of c->page to avoid additional fetches
from per cpu data.

Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
Christoph Lameter 2012-05-09 10:09:58 -05:00 committed by Pekka Enberg
parent c17dda40a6
commit f6e7def7f7

View File

@ -2208,6 +2208,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
void *freelist;
struct page *page;
unsigned long flags;
local_irq_save(flags);
@ -2220,13 +2221,14 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
c = this_cpu_ptr(s->cpu_slab);
#endif
if (!c->page)
page = c->page;
if (!page)
goto new_slab;
redo:
if (unlikely(!node_match(c, node))) {
stat(s, ALLOC_NODE_MISMATCH);
deactivate_slab(s, c->page, c->freelist);
deactivate_slab(s, page, c->freelist);
c->page = NULL;
c->freelist = NULL;
goto new_slab;
@ -2239,7 +2241,7 @@ redo:
stat(s, ALLOC_SLOWPATH);
freelist = get_freelist(s, c->page);
freelist = get_freelist(s, page);
if (!freelist) {
c->page = NULL;
@ -2264,8 +2266,8 @@ load_freelist:
new_slab:
if (c->partial) {
c->page = c->partial;
c->partial = c->page->next;
page = c->page = c->partial;
c->partial = page->next;
stat(s, CPU_PARTIAL_ALLOC);
c->freelist = NULL;
goto redo;
@ -2281,14 +2283,15 @@ new_slab:
return NULL;
}
page = c->page;
if (likely(!kmem_cache_debug(s)))
goto load_freelist;
/* Only entered in the debug case */
if (!alloc_debug_processing(s, c->page, freelist, addr))
if (!alloc_debug_processing(s, page, freelist, addr))
goto new_slab; /* Slab failed checks. Next slab needed */
deactivate_slab(s, c->page, get_freepointer(s, freelist));
deactivate_slab(s, page, get_freepointer(s, freelist));
c->page = NULL;
c->freelist = NULL;
local_irq_restore(flags);