forked from Minki/linux
Merge branch 'slab/for-6.1/kmalloc_size_roundup' into slab/for-next
The first two patches from a series by Kees Cook [1] that introduce kmalloc_size_roundup(). This will allow merging of per-subsystem patches using the new function and ultimately stop (ab)using ksize() in a way that causes ongoing trouble for debugging functionality and static checkers. [1] https://lore.kernel.org/all/20220923202822.2667581-1-keescook@chromium.org/ -- Resolved a conflict of modifying mm/slab.c __ksize() comment with a commit that unifies __ksize() implementation into mm/slab_common.c
This commit is contained in:
commit
445d41d7a7
@ -35,7 +35,8 @@
|
||||
|
||||
/*
|
||||
* Note: do not use this directly. Instead, use __alloc_size() since it is conditionally
|
||||
* available and includes other attributes.
|
||||
* available and includes other attributes. For GCC < 9.1, __alloc_size__ gets undefined
|
||||
* in compiler-gcc.h, due to misbehaviors.
|
||||
*
|
||||
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alloc_005fsize-function-attribute
|
||||
* clang: https://clang.llvm.org/docs/AttributeReference.html#alloc-size
|
||||
|
@ -271,14 +271,16 @@ struct ftrace_likely_data {
|
||||
|
||||
/*
|
||||
* Any place that could be marked with the "alloc_size" attribute is also
|
||||
* a place to be marked with the "malloc" attribute. Do this as part of the
|
||||
* __alloc_size macro to avoid redundant attributes and to avoid missing a
|
||||
* __malloc marking.
|
||||
* a place to be marked with the "malloc" attribute, except those that may
|
||||
* be performing a _reallocation_, as that may alias the existing pointer.
|
||||
* For these, use __realloc_size().
|
||||
*/
|
||||
#ifdef __alloc_size__
|
||||
# define __alloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) __malloc
|
||||
# define __realloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__)
|
||||
#else
|
||||
# define __alloc_size(x, ...) __malloc
|
||||
# define __realloc_size(x, ...)
|
||||
#endif
|
||||
|
||||
#ifndef asm_volatile_goto
|
||||
|
@ -186,10 +186,25 @@ int kmem_cache_shrink(struct kmem_cache *s);
|
||||
/*
|
||||
* Common kmalloc functions provided by all allocators
|
||||
*/
|
||||
void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
|
||||
void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
|
||||
void kfree(const void *objp);
|
||||
void kfree_sensitive(const void *objp);
|
||||
size_t __ksize(const void *objp);
|
||||
|
||||
/**
|
||||
* ksize - Report actual allocation size of associated object
|
||||
*
|
||||
* @objp: Pointer returned from a prior kmalloc()-family allocation.
|
||||
*
|
||||
* This should not be used for writing beyond the originally requested
|
||||
* allocation size. Either use krealloc() or round up the allocation size
|
||||
* with kmalloc_size_roundup() prior to allocation. If this is used to
|
||||
* access beyond the originally requested allocation size, UBSAN_BOUNDS
|
||||
* and/or FORTIFY_SOURCE may trip, since they only know about the
|
||||
* originally allocated size via the __alloc_size attribute.
|
||||
*/
|
||||
size_t ksize(const void *objp);
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
bool kmem_valid_obj(void *object);
|
||||
void kmem_dump_obj(void *object);
|
||||
@ -614,10 +629,10 @@ static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_
|
||||
* @new_size: new size of a single member of the array
|
||||
* @flags: the type of memory to allocate (see kmalloc)
|
||||
*/
|
||||
static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p,
|
||||
size_t new_n,
|
||||
size_t new_size,
|
||||
gfp_t flags)
|
||||
static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
|
||||
size_t new_n,
|
||||
size_t new_size,
|
||||
gfp_t flags)
|
||||
{
|
||||
size_t bytes;
|
||||
|
||||
@ -732,11 +747,28 @@ static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t fla
|
||||
}
|
||||
|
||||
extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
|
||||
__alloc_size(3);
|
||||
__realloc_size(3);
|
||||
extern void kvfree(const void *addr);
|
||||
extern void kvfree_sensitive(const void *addr, size_t len);
|
||||
|
||||
unsigned int kmem_cache_size(struct kmem_cache *s);
|
||||
|
||||
/**
|
||||
* kmalloc_size_roundup - Report allocation bucket size for the given size
|
||||
*
|
||||
* @size: Number of bytes to round up from.
|
||||
*
|
||||
* This returns the number of bytes that would be available in a kmalloc()
|
||||
* allocation of @size bytes. For example, a 126 byte request would be
|
||||
* rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
|
||||
* for the general-purpose kmalloc()-based allocations, and is not for the
|
||||
* pre-sized kmem_cache_alloc()-based allocations.)
|
||||
*
|
||||
* Use this to kmalloc() the full bucket size ahead of time instead of using
|
||||
* ksize() to query the size after an allocation.
|
||||
*/
|
||||
size_t kmalloc_size_roundup(size_t size);
|
||||
|
||||
void __init kmem_cache_init_late(void);
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
|
||||
|
@ -734,6 +734,26 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
|
||||
return kmalloc_caches[kmalloc_type(flags)][index];
|
||||
}
|
||||
|
||||
size_t kmalloc_size_roundup(size_t size)
|
||||
{
|
||||
struct kmem_cache *c;
|
||||
|
||||
/* Short-circuit the 0 size case. */
|
||||
if (unlikely(size == 0))
|
||||
return 0;
|
||||
/* Short-circuit saturated "too-large" case. */
|
||||
if (unlikely(size == SIZE_MAX))
|
||||
return SIZE_MAX;
|
||||
/* Above the smaller buckets, size is a multiple of page size. */
|
||||
if (size > KMALLOC_MAX_CACHE_SIZE)
|
||||
return PAGE_SIZE << get_order(size);
|
||||
|
||||
/* The flags don't matter since size_index is common to all. */
|
||||
c = kmalloc_slab(size, GFP_KERNEL);
|
||||
return c ? c->object_size : 0;
|
||||
}
|
||||
EXPORT_SYMBOL(kmalloc_size_roundup);
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
#define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
|
||||
#else
|
||||
@ -987,7 +1007,18 @@ void kfree(const void *object)
|
||||
}
|
||||
EXPORT_SYMBOL(kfree);
|
||||
|
||||
/* Uninstrumented ksize. Only called by KASAN. */
|
||||
/**
|
||||
* __ksize -- Report full size of underlying allocation
|
||||
* @objp: pointer to the object
|
||||
*
|
||||
* This should only be used internally to query the true size of allocations.
|
||||
* It is not meant to be a way to discover the usable size of an allocation
|
||||
* after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
|
||||
* the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
|
||||
* and/or FORTIFY_SOURCE.
|
||||
*
|
||||
* Return: size of the actual memory used by @objp in bytes
|
||||
*/
|
||||
size_t __ksize(const void *object)
|
||||
{
|
||||
struct folio *folio;
|
||||
@ -1294,8 +1325,8 @@ module_init(slab_proc_init);
|
||||
|
||||
#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
|
||||
|
||||
static __always_inline void *__do_krealloc(const void *p, size_t new_size,
|
||||
gfp_t flags)
|
||||
static __always_inline __realloc_size(2) void *
|
||||
__do_krealloc(const void *p, size_t new_size, gfp_t flags)
|
||||
{
|
||||
void *ret;
|
||||
size_t ks;
|
||||
|
14
mm/slob.c
14
mm/slob.c
@ -564,6 +564,20 @@ void kfree(const void *block)
|
||||
}
|
||||
EXPORT_SYMBOL(kfree);
|
||||
|
||||
size_t kmalloc_size_roundup(size_t size)
|
||||
{
|
||||
/* Short-circuit the 0 size case. */
|
||||
if (unlikely(size == 0))
|
||||
return 0;
|
||||
/* Short-circuit saturated "too-large" case. */
|
||||
if (unlikely(size == SIZE_MAX))
|
||||
return SIZE_MAX;
|
||||
|
||||
return ALIGN(size, ARCH_KMALLOC_MINALIGN);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(kmalloc_size_roundup);
|
||||
|
||||
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
|
||||
size_t __ksize(const void *block)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user