mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
slub: Commonize slab_cache field in struct page
Right now, slab and slub have fields in struct page to derive which cache a page belongs to, but they do it slightly differently. slab uses a field called slab_cache, that lives in the third double word. slub, uses a field called "slab", living outside of the doublewords area. Ideally, we could use the same field for this. Since slub heavily makes use of the doubleword region, there isn't really much room to move slub's slab_cache field around. Since slab does not have such strict placement restrictions, we can move it outside the doubleword area. The naming used by slab, "slab_cache", is less confusing, and it is preferred over slub's generic "slab". Signed-off-by: Glauber Costa <glommer@parallels.com> Acked-by: Christoph Lameter <cl@linux.com> CC: David Rientjes <rientjes@google.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
parent
b4f591c45f
commit
1b4f59e356
@ -128,10 +128,7 @@ struct page {
|
||||
};
|
||||
|
||||
struct list_head list; /* slobs list of pages */
|
||||
struct { /* slab fields */
|
||||
struct kmem_cache *slab_cache;
|
||||
struct slab *slab_page;
|
||||
};
|
||||
struct slab *slab_page; /* slab fields */
|
||||
};
|
||||
|
||||
/* Remainder is not double word aligned */
|
||||
@ -146,7 +143,7 @@ struct page {
|
||||
#if USE_SPLIT_PTLOCKS
|
||||
spinlock_t ptl;
|
||||
#endif
|
||||
struct kmem_cache *slab; /* SLUB: Pointer to slab */
|
||||
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
|
||||
struct page *first_page; /* Compound tail pages */
|
||||
};
|
||||
|
||||
|
24
mm/slub.c
24
mm/slub.c
@ -1092,11 +1092,11 @@ static noinline struct kmem_cache_node *free_debug_processing(
|
||||
if (!check_object(s, page, object, SLUB_RED_ACTIVE))
|
||||
goto out;
|
||||
|
||||
if (unlikely(s != page->slab)) {
|
||||
if (unlikely(s != page->slab_cache)) {
|
||||
if (!PageSlab(page)) {
|
||||
slab_err(s, page, "Attempt to free object(0x%p) "
|
||||
"outside of slab", object);
|
||||
} else if (!page->slab) {
|
||||
} else if (!page->slab_cache) {
|
||||
printk(KERN_ERR
|
||||
"SLUB <none>: no slab for object 0x%p.\n",
|
||||
object);
|
||||
@ -1357,7 +1357,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
||||
goto out;
|
||||
|
||||
inc_slabs_node(s, page_to_nid(page), page->objects);
|
||||
page->slab = s;
|
||||
page->slab_cache = s;
|
||||
__SetPageSlab(page);
|
||||
if (page->pfmemalloc)
|
||||
SetPageSlabPfmemalloc(page);
|
||||
@ -1424,7 +1424,7 @@ static void rcu_free_slab(struct rcu_head *h)
|
||||
else
|
||||
page = container_of((struct list_head *)h, struct page, lru);
|
||||
|
||||
__free_slab(page->slab, page);
|
||||
__free_slab(page->slab_cache, page);
|
||||
}
|
||||
|
||||
static void free_slab(struct kmem_cache *s, struct page *page)
|
||||
@ -2617,9 +2617,9 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
|
||||
|
||||
page = virt_to_head_page(x);
|
||||
|
||||
if (kmem_cache_debug(s) && page->slab != s) {
|
||||
if (kmem_cache_debug(s) && page->slab_cache != s) {
|
||||
pr_err("kmem_cache_free: Wrong slab cache. %s but object"
|
||||
" is from %s\n", page->slab->name, s->name);
|
||||
" is from %s\n", page->slab_cache->name, s->name);
|
||||
WARN_ON_ONCE(1);
|
||||
return;
|
||||
}
|
||||
@ -3418,7 +3418,7 @@ size_t ksize(const void *object)
|
||||
return PAGE_SIZE << compound_order(page);
|
||||
}
|
||||
|
||||
return slab_ksize(page->slab);
|
||||
return slab_ksize(page->slab_cache);
|
||||
}
|
||||
EXPORT_SYMBOL(ksize);
|
||||
|
||||
@ -3443,8 +3443,8 @@ bool verify_mem_not_deleted(const void *x)
|
||||
}
|
||||
|
||||
slab_lock(page);
|
||||
if (on_freelist(page->slab, page, object)) {
|
||||
object_err(page->slab, page, object, "Object is on free-list");
|
||||
if (on_freelist(page->slab_cache, page, object)) {
|
||||
object_err(page->slab_cache, page, object, "Object is on free-list");
|
||||
rv = false;
|
||||
} else {
|
||||
rv = true;
|
||||
@ -3475,7 +3475,7 @@ void kfree(const void *x)
|
||||
__free_pages(page, compound_order(page));
|
||||
return;
|
||||
}
|
||||
slab_free(page->slab, page, object, _RET_IP_);
|
||||
slab_free(page->slab_cache, page, object, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(kfree);
|
||||
|
||||
@ -3686,11 +3686,11 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
|
||||
|
||||
if (n) {
|
||||
list_for_each_entry(p, &n->partial, lru)
|
||||
p->slab = s;
|
||||
p->slab_cache = s;
|
||||
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
list_for_each_entry(p, &n->full, lru)
|
||||
p->slab = s;
|
||||
p->slab_cache = s;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user