mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
usercopy: Mark kmalloc caches as usercopy caches
Mark the kmalloc slab caches as entirely whitelisted. These caches are frequently used to fulfill kernel allocations that contain data to be copied to/from userspace. Internal-only uses are also common, but are scattered in the kernel. For now, mark all the kmalloc caches as whitelisted. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor <dave@nullcore.net> [kees: merged in moved kmalloc hunks, adjust commit log] Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: linux-mm@kvack.org Cc: linux-xfs@vger.kernel.org Signed-off-by: Kees Cook <keescook@chromium.org> Acked-by: Christoph Lameter <cl@linux.com>
This commit is contained in:
parent
2d891fbc3b
commit
6c0c21adc7
@ -1291,7 +1291,8 @@ void __init kmem_cache_init(void)
|
||||
*/
|
||||
kmalloc_caches[INDEX_NODE] = create_kmalloc_cache(
|
||||
kmalloc_info[INDEX_NODE].name,
|
||||
kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
|
||||
kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS,
|
||||
0, kmalloc_size(INDEX_NODE));
|
||||
slab_state = PARTIAL_NODE;
|
||||
setup_kmalloc_cache_index_table();
|
||||
|
||||
|
@ -97,7 +97,8 @@ struct kmem_cache *kmalloc_slab(size_t, gfp_t);
|
||||
int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
|
||||
|
||||
extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
|
||||
slab_flags_t flags);
|
||||
slab_flags_t flags, size_t useroffset,
|
||||
size_t usersize);
|
||||
extern void create_boot_cache(struct kmem_cache *, const char *name,
|
||||
size_t size, slab_flags_t flags, size_t useroffset,
|
||||
size_t usersize);
|
||||
|
@ -937,14 +937,15 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
|
||||
}
|
||||
|
||||
struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
|
||||
slab_flags_t flags)
|
||||
slab_flags_t flags, size_t useroffset,
|
||||
size_t usersize)
|
||||
{
|
||||
struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
||||
|
||||
if (!s)
|
||||
panic("Out of memory when creating slab %s\n", name);
|
||||
|
||||
create_boot_cache(s, name, size, flags, 0, size);
|
||||
create_boot_cache(s, name, size, flags, useroffset, usersize);
|
||||
list_add(&s->list, &slab_caches);
|
||||
memcg_link_cache(s);
|
||||
s->refcount = 1;
|
||||
@ -1098,7 +1099,8 @@ void __init setup_kmalloc_cache_index_table(void)
|
||||
static void __init new_kmalloc_cache(int idx, slab_flags_t flags)
|
||||
{
|
||||
kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
|
||||
kmalloc_info[idx].size, flags);
|
||||
kmalloc_info[idx].size, flags, 0,
|
||||
kmalloc_info[idx].size);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1139,7 +1141,7 @@ void __init create_kmalloc_caches(slab_flags_t flags)
|
||||
|
||||
BUG_ON(!n);
|
||||
kmalloc_dma_caches[i] = create_kmalloc_cache(n,
|
||||
size, SLAB_CACHE_DMA | flags);
|
||||
size, SLAB_CACHE_DMA | flags, 0, 0);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user