diff --git a/mm/slab.c b/mm/slab.c index 29300fc1289a..74ece29e3a7e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -220,7 +220,6 @@ static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, static inline void fixup_slab_list(struct kmem_cache *cachep, struct kmem_cache_node *n, struct slab *slab, void **list); -static int slab_early_init = 1; #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node)) @@ -1249,8 +1248,6 @@ void __init kmem_cache_init(void) slab_state = PARTIAL_NODE; setup_kmalloc_cache_index_table(); - slab_early_init = 0; - /* 5) Replace the bootstrap kmem_cache_node */ { int nid; @@ -1389,7 +1386,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab) BUG_ON(!folio_test_slab(folio)); __slab_clear_pfmemalloc(slab); - page_mapcount_reset(folio_page(folio, 0)); + page_mapcount_reset(&folio->page); folio->mapping = NULL; /* Make the mapping reset visible before clearing the flag */ smp_wmb(); @@ -1398,7 +1395,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab) if (current->reclaim_state) current->reclaim_state->reclaimed_slab += 1 << order; unaccount_slab(slab, order, cachep); - __free_pages(folio_page(folio, 0), order); + __free_pages(&folio->page, order); } static void kmem_rcu_free(struct rcu_head *head) @@ -1413,13 +1410,10 @@ static void kmem_rcu_free(struct rcu_head *head) } #if DEBUG -static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) +static inline bool is_debug_pagealloc_cache(struct kmem_cache *cachep) { - if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) && - (cachep->size % PAGE_SIZE) == 0) - return true; - - return false; + return debug_pagealloc_enabled_static() && OFF_SLAB(cachep) && + ((cachep->size % PAGE_SIZE) == 0); } #ifdef CONFIG_DEBUG_PAGEALLOC @@ -3479,14 +3473,15 @@ cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags, int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p) { - size_t i; struct obj_cgroup *objcg = NULL; + unsigned long irqflags; + size_t i; s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags); if (!s) return 0; - local_irq_disable(); + local_irq_save(irqflags); for (i = 0; i < size; i++) { void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags, NUMA_NO_NODE); @@ -3495,7 +3490,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, goto error; p[i] = objp; } - local_irq_enable(); + local_irq_restore(irqflags); cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_); @@ -3508,7 +3503,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, /* FIXME: Trace call missing. Christoph would like a bulk variant */ return size; error: - local_irq_enable(); + local_irq_restore(irqflags); cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_); slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size); kmem_cache_free_bulk(s, i, p); @@ -3610,8 +3605,9 @@ EXPORT_SYMBOL(kmem_cache_free); void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) { + unsigned long flags; - local_irq_disable(); + local_irq_save(flags); for (int i = 0; i < size; i++) { void *objp = p[i]; struct kmem_cache *s; @@ -3621,9 +3617,9 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) /* called via kfree_bulk */ if (!folio_test_slab(folio)) { - local_irq_enable(); + local_irq_restore(flags); free_large_kmalloc(folio, objp); - local_irq_disable(); + local_irq_save(flags); continue; } s = folio_slab(folio)->slab_cache; @@ -3640,7 +3636,7 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) __cache_free(s, objp, _RET_IP_); } - local_irq_enable(); + local_irq_restore(flags); /* FIXME: add tracing */ } diff --git a/mm/slub.c b/mm/slub.c index 13459c69095a..1013834fb7bb 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2066,7 +2066,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab) if (current->reclaim_state) current->reclaim_state->reclaimed_slab += pages; unaccount_slab(slab, order, s); - __free_pages(folio_page(folio, 0), order); + __free_pages(&folio->page, order); } static void rcu_free_slab(struct rcu_head *h) @@ -3913,6 +3913,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p, struct obj_cgroup *objcg) { struct kmem_cache_cpu *c; + unsigned long irqflags; int i; /* @@ -3921,7 +3922,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, * handlers invoking normal fastpath. */ c = slub_get_cpu_ptr(s->cpu_slab); - local_lock_irq(&s->cpu_slab->lock); + local_lock_irqsave(&s->cpu_slab->lock, irqflags); for (i = 0; i < size; i++) { void *object = kfence_alloc(s, s->object_size, flags); @@ -3942,7 +3943,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, */ c->tid = next_tid(c->tid); - local_unlock_irq(&s->cpu_slab->lock); + local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); /* * Invoking slow path likely have side-effect @@ -3956,7 +3957,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, c = this_cpu_ptr(s->cpu_slab); maybe_wipe_obj_freeptr(s, p[i]); - local_lock_irq(&s->cpu_slab->lock); + local_lock_irqsave(&s->cpu_slab->lock, irqflags); continue; /* goto for-loop */ } @@ -3965,7 +3966,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, maybe_wipe_obj_freeptr(s, p[i]); } c->tid = next_tid(c->tid); - local_unlock_irq(&s->cpu_slab->lock); + local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); slub_put_cpu_ptr(s->cpu_slab); return i; @@ -6449,7 +6450,7 @@ static void debugfs_slab_add(struct kmem_cache *s) void debugfs_slab_release(struct kmem_cache *s) { - debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root)); + debugfs_lookup_and_remove(s->name, slab_debugfs_root); } static int __init slab_debugfs_init(void)