mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
mm: add vfree_atomic()
We are going to use sleeping lock for freeing vmap. However some vfree() users want to free memory from atomic (but not from interrupt) context. For this we add vfree_atomic() - deferred variation of vfree() which can be used in any atomic context (except NMIs). [akpm@linux-foundation.org: tweak comment grammar] [aryabinin@virtuozzo.com: use raw_cpu_ptr() instead of this_cpu_ptr()] Link: http://lkml.kernel.org/r/1481553981-3856-1-git-send-email-aryabinin@virtuozzo.com Link: http://lkml.kernel.org/r/1479474236-4139-5-git-send-email-hch@lst.de Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Cc: Joel Fernandes <joelaf@google.com> Cc: Jisheng Zhang <jszhang@marvell.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: John Dias <joaodias@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
0574ecd141
commit
bf22e37a64
@ -82,6 +82,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
||||
const void *caller);
|
||||
|
||||
extern void vfree(const void *addr);
|
||||
extern void vfree_atomic(const void *addr);
|
||||
|
||||
extern void *vmap(struct page **pages, unsigned int count,
|
||||
unsigned long flags, pgprot_t prot);
|
||||
|
42
mm/vmalloc.c
42
mm/vmalloc.c
@ -1486,7 +1486,39 @@ static void __vunmap(const void *addr, int deallocate_pages)
|
||||
kfree(area);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
static inline void __vfree_deferred(const void *addr)
|
||||
{
|
||||
/*
|
||||
* Use raw_cpu_ptr() because this can be called from preemptible
|
||||
* context. Preemption is absolutely fine here, because the llist_add()
|
||||
* implementation is lockless, so it works even if we are adding to
|
||||
* nother cpu's list. schedule_work() should be fine with this too.
|
||||
*/
|
||||
struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
|
||||
|
||||
if (llist_add((struct llist_node *)addr, &p->list))
|
||||
schedule_work(&p->wq);
|
||||
}
|
||||
|
||||
/**
|
||||
* vfree_atomic - release memory allocated by vmalloc()
|
||||
* @addr: memory base address
|
||||
*
|
||||
* This one is just like vfree() but can be called in any atomic context
|
||||
* except NMIs.
|
||||
*/
|
||||
void vfree_atomic(const void *addr)
|
||||
{
|
||||
BUG_ON(in_nmi());
|
||||
|
||||
kmemleak_free(addr);
|
||||
|
||||
if (!addr)
|
||||
return;
|
||||
__vfree_deferred(addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* vfree - release memory allocated by vmalloc()
|
||||
* @addr: memory base address
|
||||
@ -1509,11 +1541,9 @@ void vfree(const void *addr)
|
||||
|
||||
if (!addr)
|
||||
return;
|
||||
if (unlikely(in_interrupt())) {
|
||||
struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
|
||||
if (llist_add((struct llist_node *)addr, &p->list))
|
||||
schedule_work(&p->wq);
|
||||
} else
|
||||
if (unlikely(in_interrupt()))
|
||||
__vfree_deferred(addr);
|
||||
else
|
||||
__vunmap(addr, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(vfree);
|
||||
|
Loading…
Reference in New Issue
Block a user