mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 12:52:30 +00:00
mm/sparse.c: fix memory leak of sparsemap_buf in aligned memory
sparse_buffer_alloc(xsize) gets the size of memory from sparsemap_buf after being aligned with the size. However, the size is at least PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION) and usually larger than PAGE_SIZE. Also, sparse_buffer_fini() only frees memory between sparsemap_buf and sparsemap_buf_end, since sparsemap_buf may be changed by PTR_ALIGN() first, the aligned space before sparsemap_buf is wasted and no one will touch it. In our ARM32 platform (without SPARSEMEM_VMEMMAP) Sparse_buffer_init Reserve d359c000 - d3e9c000 (9M) Sparse_buffer_alloc Alloc d3a00000 - d3E80000 (4.5M) Sparse_buffer_fini Free d3e80000 - d3e9c000 (~=100k) The reserved memory between d359c000 - d3a00000 (~=4.4M) is unfreed. In ARM64 platform (with SPARSEMEM_VMEMMAP) sparse_buffer_init Reserve ffffffc07d623000 - ffffffc07f623000 (32M) Sparse_buffer_alloc Alloc ffffffc07d800000 - ffffffc07f600000 (30M) Sparse_buffer_fini Free ffffffc07f600000 - ffffffc07f623000 (140K) The reserved memory between ffffffc07d623000 - ffffffc07d800000 (~=1.9M) is unfreed. Let's explicit free redundant aligned memory. [arnd@arndb.de: mark sparse_buffer_free as __meminit] Link: http://lkml.kernel.org/r/20190709185528.3251709-1-arnd@arndb.de Link: http://lkml.kernel.org/r/20190705114730.28534-1-lecopzer.chen@mediatek.com Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com> Signed-off-by: Mark-PK Tsai <Mark-PK.Tsai@mediatek.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: YJ Chiang <yj.chiang@mediatek.com> Cc: Lecopzer Chen <lecopzer.chen@mediatek.com> Cc: Pavel Tatashin <pasha.tatashin@oracle.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
29a90db929
commit
ae83189405
14
mm/sparse.c
14
mm/sparse.c
@ -470,6 +470,12 @@ struct page __init *__populate_section_memmap(unsigned long pfn,
|
||||
static void *sparsemap_buf __meminitdata;
|
||||
static void *sparsemap_buf_end __meminitdata;
|
||||
|
||||
static inline void __meminit sparse_buffer_free(unsigned long size)
|
||||
{
|
||||
WARN_ON(!sparsemap_buf || size == 0);
|
||||
memblock_free_early(__pa(sparsemap_buf), size);
|
||||
}
|
||||
|
||||
static void __init sparse_buffer_init(unsigned long size, int nid)
|
||||
{
|
||||
phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
|
||||
@ -486,7 +492,7 @@ static void __init sparse_buffer_fini(void)
|
||||
unsigned long size = sparsemap_buf_end - sparsemap_buf;
|
||||
|
||||
if (sparsemap_buf && size > 0)
|
||||
memblock_free_early(__pa(sparsemap_buf), size);
|
||||
sparse_buffer_free(size);
|
||||
sparsemap_buf = NULL;
|
||||
}
|
||||
|
||||
@ -498,8 +504,12 @@ void * __meminit sparse_buffer_alloc(unsigned long size)
|
||||
ptr = PTR_ALIGN(sparsemap_buf, size);
|
||||
if (ptr + size > sparsemap_buf_end)
|
||||
ptr = NULL;
|
||||
else
|
||||
else {
|
||||
/* Free redundant aligned space */
|
||||
if ((unsigned long)(ptr - sparsemap_buf) > 0)
|
||||
sparse_buffer_free((unsigned long)(ptr - sparsemap_buf));
|
||||
sparsemap_buf = ptr + size;
|
||||
}
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user