forked from Minki/linux
kmemleak: Handle percpu memory allocation
This patch adds kmemleak callbacks from the percpu allocator, reducing a number of false positives caused by kmemleak not scanning such memory blocks. The percpu chunks are never reported as leaks because of current kmemleak limitations with the __percpu pointer not pointing directly to the actual chunks. Reported-by: Huajun Li <huajun.li.lee@gmail.com> Acked-by: Christoph Lameter <cl@gentwo.org> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
74341703ed
commit
f528f0b8e5
@ -127,7 +127,10 @@ See the include/linux/kmemleak.h header for the functions prototype.
|
||||
|
||||
kmemleak_init - initialize kmemleak
|
||||
kmemleak_alloc - notify of a memory block allocation
|
||||
kmemleak_alloc_percpu - notify of a percpu memory block allocation
|
||||
kmemleak_free - notify of a memory block freeing
|
||||
kmemleak_free_part - notify of a partial memory block freeing
|
||||
kmemleak_free_percpu - notify of a percpu memory block freeing
|
||||
kmemleak_not_leak - mark an object as not a leak
|
||||
kmemleak_ignore - do not scan or report an object as leak
|
||||
kmemleak_scan_area - add scan areas inside a memory block
|
||||
|
@ -26,8 +26,10 @@
|
||||
extern void kmemleak_init(void) __ref;
|
||||
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
|
||||
gfp_t gfp) __ref;
|
||||
extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
|
||||
extern void kmemleak_free(const void *ptr) __ref;
|
||||
extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
|
||||
extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
|
||||
extern void kmemleak_padding(const void *ptr, unsigned long offset,
|
||||
size_t size) __ref;
|
||||
extern void kmemleak_not_leak(const void *ptr) __ref;
|
||||
@ -68,6 +70,9 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
|
||||
gfp_t gfp)
|
||||
{
|
||||
}
|
||||
static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
|
||||
{
|
||||
}
|
||||
static inline void kmemleak_free(const void *ptr)
|
||||
{
|
||||
}
|
||||
@ -77,6 +82,9 @@ static inline void kmemleak_free_part(const void *ptr, size_t size)
|
||||
static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
|
||||
{
|
||||
}
|
||||
static inline void kmemleak_free_percpu(const void __percpu *ptr)
|
||||
{
|
||||
}
|
||||
static inline void kmemleak_not_leak(const void *ptr)
|
||||
{
|
||||
}
|
||||
|
@ -230,8 +230,10 @@ static int kmemleak_skip_disable;
|
||||
/* kmemleak operation type for early logging */
|
||||
enum {
|
||||
KMEMLEAK_ALLOC,
|
||||
KMEMLEAK_ALLOC_PERCPU,
|
||||
KMEMLEAK_FREE,
|
||||
KMEMLEAK_FREE_PART,
|
||||
KMEMLEAK_FREE_PERCPU,
|
||||
KMEMLEAK_NOT_LEAK,
|
||||
KMEMLEAK_IGNORE,
|
||||
KMEMLEAK_SCAN_AREA,
|
||||
@ -852,6 +854,20 @@ out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Log an early allocated block and populate the stack trace.
|
||||
*/
|
||||
static void early_alloc_percpu(struct early_log *log)
|
||||
{
|
||||
unsigned int cpu;
|
||||
const void __percpu *ptr = log->ptr;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
log->ptr = per_cpu_ptr(ptr, cpu);
|
||||
early_alloc(log);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* kmemleak_alloc - register a newly allocated object
|
||||
* @ptr: pointer to beginning of the object
|
||||
@ -878,6 +894,34 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmemleak_alloc);
|
||||
|
||||
/**
|
||||
* kmemleak_alloc_percpu - register a newly allocated __percpu object
|
||||
* @ptr: __percpu pointer to beginning of the object
|
||||
* @size: size of the object
|
||||
*
|
||||
* This function is called from the kernel percpu allocator when a new object
|
||||
* (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL
|
||||
* allocation.
|
||||
*/
|
||||
void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
|
||||
|
||||
/*
|
||||
* Percpu allocations are only scanned and not reported as leaks
|
||||
* (min_count is set to 0).
|
||||
*/
|
||||
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
||||
for_each_possible_cpu(cpu)
|
||||
create_object((unsigned long)per_cpu_ptr(ptr, cpu),
|
||||
size, 0, GFP_KERNEL);
|
||||
else if (atomic_read(&kmemleak_early_log))
|
||||
log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
|
||||
|
||||
/**
|
||||
* kmemleak_free - unregister a previously registered object
|
||||
* @ptr: pointer to beginning of the object
|
||||
@ -916,6 +960,28 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmemleak_free_part);
|
||||
|
||||
/**
|
||||
* kmemleak_free_percpu - unregister a previously registered __percpu object
|
||||
* @ptr: __percpu pointer to beginning of the object
|
||||
*
|
||||
* This function is called from the kernel percpu allocator when an object
|
||||
* (memory block) is freed (free_percpu).
|
||||
*/
|
||||
void __ref kmemleak_free_percpu(const void __percpu *ptr)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
pr_debug("%s(0x%p)\n", __func__, ptr);
|
||||
|
||||
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
||||
for_each_possible_cpu(cpu)
|
||||
delete_object_full((unsigned long)per_cpu_ptr(ptr,
|
||||
cpu));
|
||||
else if (atomic_read(&kmemleak_early_log))
|
||||
log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
|
||||
|
||||
/**
|
||||
* kmemleak_not_leak - mark an allocated object as false positive
|
||||
* @ptr: pointer to beginning of the object
|
||||
@ -1727,12 +1793,18 @@ void __init kmemleak_init(void)
|
||||
case KMEMLEAK_ALLOC:
|
||||
early_alloc(log);
|
||||
break;
|
||||
case KMEMLEAK_ALLOC_PERCPU:
|
||||
early_alloc_percpu(log);
|
||||
break;
|
||||
case KMEMLEAK_FREE:
|
||||
kmemleak_free(log->ptr);
|
||||
break;
|
||||
case KMEMLEAK_FREE_PART:
|
||||
kmemleak_free_part(log->ptr, log->size);
|
||||
break;
|
||||
case KMEMLEAK_FREE_PERCPU:
|
||||
kmemleak_free_percpu(log->ptr);
|
||||
break;
|
||||
case KMEMLEAK_NOT_LEAK:
|
||||
kmemleak_not_leak(log->ptr);
|
||||
break;
|
||||
|
12
mm/percpu.c
12
mm/percpu.c
@ -67,6 +67,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/kmemleak.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/sections.h>
|
||||
@ -710,6 +711,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
|
||||
const char *err;
|
||||
int slot, off, new_alloc;
|
||||
unsigned long flags;
|
||||
void __percpu *ptr;
|
||||
|
||||
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
|
||||
WARN(true, "illegal size (%zu) or align (%zu) for "
|
||||
@ -802,7 +804,9 @@ area_found:
|
||||
mutex_unlock(&pcpu_alloc_mutex);
|
||||
|
||||
/* return address relative to base address */
|
||||
return __addr_to_pcpu_ptr(chunk->base_addr + off);
|
||||
ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
|
||||
kmemleak_alloc_percpu(ptr, size);
|
||||
return ptr;
|
||||
|
||||
fail_unlock:
|
||||
spin_unlock_irqrestore(&pcpu_lock, flags);
|
||||
@ -916,6 +920,8 @@ void free_percpu(void __percpu *ptr)
|
||||
if (!ptr)
|
||||
return;
|
||||
|
||||
kmemleak_free_percpu(ptr);
|
||||
|
||||
addr = __pcpu_ptr_to_addr(ptr);
|
||||
|
||||
spin_lock_irqsave(&pcpu_lock, flags);
|
||||
@ -1637,6 +1643,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
|
||||
rc = -ENOMEM;
|
||||
goto out_free_areas;
|
||||
}
|
||||
/* kmemleak tracks the percpu allocations separately */
|
||||
kmemleak_free(ptr);
|
||||
areas[group] = ptr;
|
||||
|
||||
base = min(ptr, base);
|
||||
@ -1751,6 +1759,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
|
||||
"for cpu%u\n", psize_str, cpu);
|
||||
goto enomem;
|
||||
}
|
||||
/* kmemleak tracks the percpu allocations separately */
|
||||
kmemleak_free(ptr);
|
||||
pages[j++] = virt_to_page(ptr);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user