mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
kasan: inline kasan_reset_tag for tag-based modes
Using kasan_reset_tag() currently results in a function call. As it's called quite often from the allocator code, this leads to a noticeable slowdown. Move it to include/linux/kasan.h and turn it into a static inline function. Also remove the now unneeded reset_tag() internal KASAN macro and use kasan_reset_tag() instead. Link: https://lkml.kernel.org/r/6940383a3a9dfb416134d338d8fac97a9ebb8686.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/I4d2061acfe91d480a75df00b07c22d8494ef14b5 Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Marco Elver <elver@google.com> Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Branislav Rankov <Branislav.Rankov@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
77f57c9830
commit
c0054c565a
@ -194,7 +194,10 @@ static inline void kasan_record_aux_stack(void *ptr) {}
|
||||
|
||||
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
|
||||
|
||||
void *kasan_reset_tag(const void *addr);
|
||||
static inline void *kasan_reset_tag(const void *addr)
|
||||
{
|
||||
return (void *)arch_kasan_reset_tag(addr);
|
||||
}
|
||||
|
||||
bool kasan_report(unsigned long addr, size_t size,
|
||||
bool is_write, unsigned long ip);
|
||||
|
@ -179,14 +179,14 @@ size_t kasan_metadata_size(struct kmem_cache *cache)
|
||||
struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
|
||||
const void *object)
|
||||
{
|
||||
return (void *)reset_tag(object) + cache->kasan_info.alloc_meta_offset;
|
||||
return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
|
||||
}
|
||||
|
||||
struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
|
||||
const void *object)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
|
||||
return (void *)reset_tag(object) + cache->kasan_info.free_meta_offset;
|
||||
return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
|
||||
}
|
||||
|
||||
void kasan_poison_slab(struct page *page)
|
||||
@ -283,7 +283,7 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
|
||||
|
||||
tag = get_tag(object);
|
||||
tagged_object = object;
|
||||
object = reset_tag(object);
|
||||
object = kasan_reset_tag(object);
|
||||
|
||||
if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
|
||||
object)) {
|
||||
|
@ -30,20 +30,15 @@ void __init kasan_init_hw_tags(void)
|
||||
pr_info("KernelAddressSanitizer initialized\n");
|
||||
}
|
||||
|
||||
void *kasan_reset_tag(const void *addr)
|
||||
{
|
||||
return reset_tag(addr);
|
||||
}
|
||||
|
||||
void poison_range(const void *address, size_t size, u8 value)
|
||||
{
|
||||
hw_set_mem_tag_range(reset_tag(address),
|
||||
hw_set_mem_tag_range(kasan_reset_tag(address),
|
||||
round_up(size, KASAN_GRANULE_SIZE), value);
|
||||
}
|
||||
|
||||
void unpoison_range(const void *address, size_t size)
|
||||
{
|
||||
hw_set_mem_tag_range(reset_tag(address),
|
||||
hw_set_mem_tag_range(kasan_reset_tag(address),
|
||||
round_up(size, KASAN_GRANULE_SIZE), get_tag(address));
|
||||
}
|
||||
|
||||
|
@ -248,15 +248,11 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
|
||||
return addr;
|
||||
}
|
||||
#endif
|
||||
#ifndef arch_kasan_reset_tag
|
||||
#define arch_kasan_reset_tag(addr) ((void *)(addr))
|
||||
#endif
|
||||
#ifndef arch_kasan_get_tag
|
||||
#define arch_kasan_get_tag(addr) 0
|
||||
#endif
|
||||
|
||||
#define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag)))
|
||||
#define reset_tag(addr) ((void *)arch_kasan_reset_tag(addr))
|
||||
#define get_tag(addr) arch_kasan_get_tag(addr)
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
|
@ -328,7 +328,7 @@ void kasan_report_invalid_free(void *object, unsigned long ip)
|
||||
unsigned long flags;
|
||||
u8 tag = get_tag(object);
|
||||
|
||||
object = reset_tag(object);
|
||||
object = kasan_reset_tag(object);
|
||||
|
||||
#if IS_ENABLED(CONFIG_KUNIT)
|
||||
if (current->kunit_test)
|
||||
@ -361,7 +361,7 @@ static void __kasan_report(unsigned long addr, size_t size, bool is_write,
|
||||
disable_trace_on_warning();
|
||||
|
||||
tagged_addr = (void *)addr;
|
||||
untagged_addr = reset_tag(tagged_addr);
|
||||
untagged_addr = kasan_reset_tag(tagged_addr);
|
||||
|
||||
info.access_addr = tagged_addr;
|
||||
if (addr_has_metadata(untagged_addr))
|
||||
|
@ -22,7 +22,7 @@ const char *get_bug_type(struct kasan_access_info *info)
|
||||
|
||||
void *find_first_bad_addr(void *addr, size_t size)
|
||||
{
|
||||
return reset_tag(addr);
|
||||
return kasan_reset_tag(addr);
|
||||
}
|
||||
|
||||
void metadata_fetch_row(char *buffer, void *row)
|
||||
|
@ -41,7 +41,7 @@ const char *get_bug_type(struct kasan_access_info *info)
|
||||
int i;
|
||||
|
||||
tag = get_tag(info->access_addr);
|
||||
addr = reset_tag(info->access_addr);
|
||||
addr = kasan_reset_tag(info->access_addr);
|
||||
page = kasan_addr_to_page(addr);
|
||||
if (page && PageSlab(page)) {
|
||||
cache = page->slab_cache;
|
||||
@ -72,7 +72,7 @@ const char *get_bug_type(struct kasan_access_info *info)
|
||||
void *find_first_bad_addr(void *addr, size_t size)
|
||||
{
|
||||
u8 tag = get_tag(addr);
|
||||
void *p = reset_tag(addr);
|
||||
void *p = kasan_reset_tag(addr);
|
||||
void *end = p + size;
|
||||
|
||||
while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p))
|
||||
|
@ -81,7 +81,7 @@ void poison_range(const void *address, size_t size, u8 value)
|
||||
* some of the callers (e.g. kasan_poison_object_data) pass tagged
|
||||
* addresses to this function.
|
||||
*/
|
||||
address = reset_tag(address);
|
||||
address = kasan_reset_tag(address);
|
||||
|
||||
shadow_start = kasan_mem_to_shadow(address);
|
||||
shadow_end = kasan_mem_to_shadow(address + size);
|
||||
@ -98,7 +98,7 @@ void unpoison_range(const void *address, size_t size)
|
||||
* some of the callers (e.g. kasan_unpoison_object_data) pass tagged
|
||||
* addresses to this function.
|
||||
*/
|
||||
address = reset_tag(address);
|
||||
address = kasan_reset_tag(address);
|
||||
|
||||
poison_range(address, size, tag);
|
||||
|
||||
|
@ -67,11 +67,6 @@ u8 random_tag(void)
|
||||
return (u8)(state % (KASAN_TAG_MAX + 1));
|
||||
}
|
||||
|
||||
void *kasan_reset_tag(const void *addr)
|
||||
{
|
||||
return reset_tag(addr);
|
||||
}
|
||||
|
||||
bool check_memory_region(unsigned long addr, size_t size, bool write,
|
||||
unsigned long ret_ip)
|
||||
{
|
||||
@ -107,7 +102,7 @@ bool check_memory_region(unsigned long addr, size_t size, bool write,
|
||||
if (tag == KASAN_TAG_KERNEL)
|
||||
return true;
|
||||
|
||||
untagged_addr = reset_tag((const void *)addr);
|
||||
untagged_addr = kasan_reset_tag((const void *)addr);
|
||||
if (unlikely(untagged_addr <
|
||||
kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
|
||||
return !kasan_report(addr, size, write, ret_ip);
|
||||
@ -126,7 +121,7 @@ bool check_memory_region(unsigned long addr, size_t size, bool write,
|
||||
bool check_invalid_free(void *addr)
|
||||
{
|
||||
u8 tag = get_tag(addr);
|
||||
u8 shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(reset_tag(addr)));
|
||||
u8 shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(kasan_reset_tag(addr)));
|
||||
|
||||
return (shadow_byte == KASAN_TAG_INVALID) ||
|
||||
(tag != KASAN_TAG_KERNEL && tag != shadow_byte);
|
||||
|
Loading…
Reference in New Issue
Block a user