mirror of
https://github.com/torvalds/linux.git
synced 2025-01-01 15:51:46 +00:00
Merge branch 'akpm' (patches from Andrew)
Merge KASAN updates from Andrew Morton. This adds a new hardware tag-based mode to KASAN. The new mode is similar to the existing software tag-based KASAN, but relies on arm64 Memory Tagging Extension (MTE) to perform memory and pointer tagging (instead of shadow memory and compiler instrumentation). By Andrey Konovalov and Vincenzo Frascino. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (60 commits) kasan: update documentation kasan, mm: allow cache merging with no metadata kasan: sanitize objects when metadata doesn't fit kasan: clarify comment in __kasan_kfree_large kasan: simplify assign_tag and set_tag calls kasan: don't round_up too much kasan, mm: rename kasan_poison_kfree kasan, mm: check kasan_enabled in annotations kasan: add and integrate kasan boot parameters kasan: inline (un)poison_range and check_invalid_free kasan: open-code kasan_unpoison_slab kasan: inline random_tag for HW_TAGS kasan: inline kasan_reset_tag for tag-based modes kasan: remove __kasan_unpoison_stack kasan: allow VMAP_STACK for HW_TAGS mode kasan, arm64: unpoison stack only with CONFIG_KASAN_STACK kasan: introduce set_alloc_info kasan: rename get_alloc/free_info kasan: simplify quarantine_put call site kselftest/arm64: check GCR_EL1 after context switch ...
This commit is contained in:
commit
1375b9803e
@ -4,13 +4,16 @@ The Kernel Address Sanitizer (KASAN)
|
||||
Overview
|
||||
--------
|
||||
|
||||
KernelAddressSANitizer (KASAN) is a dynamic memory error detector designed to
|
||||
find out-of-bound and use-after-free bugs. KASAN has two modes: generic KASAN
|
||||
(similar to userspace ASan) and software tag-based KASAN (similar to userspace
|
||||
HWASan).
|
||||
KernelAddressSANitizer (KASAN) is a dynamic memory safety error detector
|
||||
designed to find out-of-bound and use-after-free bugs. KASAN has three modes:
|
||||
|
||||
KASAN uses compile-time instrumentation to insert validity checks before every
|
||||
memory access, and therefore requires a compiler version that supports that.
|
||||
1. generic KASAN (similar to userspace ASan),
|
||||
2. software tag-based KASAN (similar to userspace HWASan),
|
||||
3. hardware tag-based KASAN (based on hardware memory tagging).
|
||||
|
||||
Software KASAN modes (1 and 2) use compile-time instrumentation to insert
|
||||
validity checks before every memory access, and therefore require a compiler
|
||||
version that supports that.
|
||||
|
||||
Generic KASAN is supported in both GCC and Clang. With GCC it requires version
|
||||
8.3.0 or later. Any supported Clang version is compatible, but detection of
|
||||
@ -19,7 +22,7 @@ out-of-bounds accesses for global variables is only supported since Clang 11.
|
||||
Tag-based KASAN is only supported in Clang.
|
||||
|
||||
Currently generic KASAN is supported for the x86_64, arm, arm64, xtensa, s390
|
||||
and riscv architectures, and tag-based KASAN is supported only for arm64.
|
||||
and riscv architectures, and tag-based KASAN modes are supported only for arm64.
|
||||
|
||||
Usage
|
||||
-----
|
||||
@ -28,30 +31,22 @@ To enable KASAN configure kernel with::
|
||||
|
||||
CONFIG_KASAN = y
|
||||
|
||||
and choose between CONFIG_KASAN_GENERIC (to enable generic KASAN) and
|
||||
CONFIG_KASAN_SW_TAGS (to enable software tag-based KASAN).
|
||||
and choose between CONFIG_KASAN_GENERIC (to enable generic KASAN),
|
||||
CONFIG_KASAN_SW_TAGS (to enable software tag-based KASAN), and
|
||||
CONFIG_KASAN_HW_TAGS (to enable hardware tag-based KASAN).
|
||||
|
||||
You also need to choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE.
|
||||
Outline and inline are compiler instrumentation types. The former produces
|
||||
smaller binary while the latter is 1.1 - 2 times faster.
|
||||
For software modes, you also need to choose between CONFIG_KASAN_OUTLINE and
|
||||
CONFIG_KASAN_INLINE. Outline and inline are compiler instrumentation types.
|
||||
The former produces smaller binary while the latter is 1.1 - 2 times faster.
|
||||
|
||||
Both KASAN modes work with both SLUB and SLAB memory allocators.
|
||||
For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.
|
||||
Both software KASAN modes work with both SLUB and SLAB memory allocators,
|
||||
while the hardware tag-based KASAN currently only support SLUB.
|
||||
|
||||
For better error reports that include stack traces, enable CONFIG_STACKTRACE.
|
||||
|
||||
To augment reports with last allocation and freeing stack of the physical page,
|
||||
it is recommended to enable also CONFIG_PAGE_OWNER and boot with page_owner=on.
|
||||
|
||||
To disable instrumentation for specific files or directories, add a line
|
||||
similar to the following to the respective kernel Makefile:
|
||||
|
||||
- For a single file (e.g. main.o)::
|
||||
|
||||
KASAN_SANITIZE_main.o := n
|
||||
|
||||
- For all files in one directory::
|
||||
|
||||
KASAN_SANITIZE := n
|
||||
|
||||
Error reports
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
@ -136,22 +131,75 @@ freed (in case of a use-after-free bug report). Next comes a description of
|
||||
the accessed slab object and information about the accessed memory page.
|
||||
|
||||
In the last section the report shows memory state around the accessed address.
|
||||
Reading this part requires some understanding of how KASAN works.
|
||||
Internally KASAN tracks memory state separately for each memory granule, which
|
||||
is either 8 or 16 aligned bytes depending on KASAN mode. Each number in the
|
||||
memory state section of the report shows the state of one of the memory
|
||||
granules that surround the accessed address.
|
||||
|
||||
The state of each 8 aligned bytes of memory is encoded in one shadow byte.
|
||||
Those 8 bytes can be accessible, partially accessible, freed or be a redzone.
|
||||
We use the following encoding for each shadow byte: 0 means that all 8 bytes
|
||||
of the corresponding memory region are accessible; number N (1 <= N <= 7) means
|
||||
that the first N bytes are accessible, and other (8 - N) bytes are not;
|
||||
any negative value indicates that the entire 8-byte word is inaccessible.
|
||||
We use different negative values to distinguish between different kinds of
|
||||
inaccessible memory like redzones or freed memory (see mm/kasan/kasan.h).
|
||||
For generic KASAN the size of each memory granule is 8. The state of each
|
||||
granule is encoded in one shadow byte. Those 8 bytes can be accessible,
|
||||
partially accessible, freed or be a part of a redzone. KASAN uses the following
|
||||
encoding for each shadow byte: 0 means that all 8 bytes of the corresponding
|
||||
memory region are accessible; number N (1 <= N <= 7) means that the first N
|
||||
bytes are accessible, and other (8 - N) bytes are not; any negative value
|
||||
indicates that the entire 8-byte word is inaccessible. KASAN uses different
|
||||
negative values to distinguish between different kinds of inaccessible memory
|
||||
like redzones or freed memory (see mm/kasan/kasan.h).
|
||||
|
||||
In the report above the arrows point to the shadow byte 03, which means that
|
||||
the accessed address is partially accessible.
|
||||
|
||||
For tag-based KASAN this last report section shows the memory tags around the
|
||||
accessed address (see Implementation details section).
|
||||
accessed address (see `Implementation details`_ section).
|
||||
|
||||
Boot parameters
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Hardware tag-based KASAN mode (see the section about different mode below) is
|
||||
intended for use in production as a security mitigation. Therefore it supports
|
||||
boot parameters that allow to disable KASAN competely or otherwise control
|
||||
particular KASAN features.
|
||||
|
||||
The things that can be controlled are:
|
||||
|
||||
1. Whether KASAN is enabled at all.
|
||||
2. Whether KASAN collects and saves alloc/free stacks.
|
||||
3. Whether KASAN panics on a detected bug or not.
|
||||
|
||||
The ``kasan.mode`` boot parameter allows to choose one of three main modes:
|
||||
|
||||
- ``kasan.mode=off`` - KASAN is disabled, no tag checks are performed
|
||||
- ``kasan.mode=prod`` - only essential production features are enabled
|
||||
- ``kasan.mode=full`` - all KASAN features are enabled
|
||||
|
||||
The chosen mode provides default control values for the features mentioned
|
||||
above. However it's also possible to override the default values by providing:
|
||||
|
||||
- ``kasan.stacktrace=off`` or ``=on`` - enable alloc/free stack collection
|
||||
(default: ``on`` for ``mode=full``,
|
||||
otherwise ``off``)
|
||||
- ``kasan.fault=report`` or ``=panic`` - only print KASAN report or also panic
|
||||
(default: ``report``)
|
||||
|
||||
If ``kasan.mode`` parameter is not provided, it defaults to ``full`` when
|
||||
``CONFIG_DEBUG_KERNEL`` is enabled, and to ``prod`` otherwise.
|
||||
|
||||
For developers
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Software KASAN modes use compiler instrumentation to insert validity checks.
|
||||
Such instrumentation might be incompatible with some part of the kernel, and
|
||||
therefore needs to be disabled. To disable instrumentation for specific files
|
||||
or directories, add a line similar to the following to the respective kernel
|
||||
Makefile:
|
||||
|
||||
- For a single file (e.g. main.o)::
|
||||
|
||||
KASAN_SANITIZE_main.o := n
|
||||
|
||||
- For all files in one directory::
|
||||
|
||||
KASAN_SANITIZE := n
|
||||
|
||||
|
||||
Implementation details
|
||||
@ -160,10 +208,10 @@ Implementation details
|
||||
Generic KASAN
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
From a high level, our approach to memory error detection is similar to that
|
||||
of kmemcheck: use shadow memory to record whether each byte of memory is safe
|
||||
to access, and use compile-time instrumentation to insert checks of shadow
|
||||
memory on each memory access.
|
||||
From a high level perspective, KASAN's approach to memory error detection is
|
||||
similar to that of kmemcheck: use shadow memory to record whether each byte of
|
||||
memory is safe to access, and use compile-time instrumentation to insert checks
|
||||
of shadow memory on each memory access.
|
||||
|
||||
Generic KASAN dedicates 1/8th of kernel memory to its shadow memory (e.g. 16TB
|
||||
to cover 128TB on x86_64) and uses direct mapping with a scale and offset to
|
||||
@ -194,20 +242,30 @@ Generic KASAN also reports the last 2 call stacks to creation of work that
|
||||
potentially has access to an object. Call stacks for the following are shown:
|
||||
call_rcu() and workqueue queuing.
|
||||
|
||||
Generic KASAN is the only mode that delays the reuse of freed object via
|
||||
quarantine (see mm/kasan/quarantine.c for implementation).
|
||||
|
||||
Software tag-based KASAN
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tag-based KASAN uses the Top Byte Ignore (TBI) feature of modern arm64 CPUs to
|
||||
store a pointer tag in the top byte of kernel pointers. Like generic KASAN it
|
||||
uses shadow memory to store memory tags associated with each 16-byte memory
|
||||
Software tag-based KASAN requires software memory tagging support in the form
|
||||
of HWASan-like compiler instrumentation (see HWASan documentation for details).
|
||||
|
||||
Software tag-based KASAN is currently only implemented for arm64 architecture.
|
||||
|
||||
Software tag-based KASAN uses the Top Byte Ignore (TBI) feature of arm64 CPUs
|
||||
to store a pointer tag in the top byte of kernel pointers. Like generic KASAN
|
||||
it uses shadow memory to store memory tags associated with each 16-byte memory
|
||||
cell (therefore it dedicates 1/16th of the kernel memory for shadow memory).
|
||||
|
||||
On each memory allocation tag-based KASAN generates a random tag, tags the
|
||||
allocated memory with this tag, and embeds this tag into the returned pointer.
|
||||
On each memory allocation software tag-based KASAN generates a random tag, tags
|
||||
the allocated memory with this tag, and embeds this tag into the returned
|
||||
pointer.
|
||||
|
||||
Software tag-based KASAN uses compile-time instrumentation to insert checks
|
||||
before each memory access. These checks make sure that tag of the memory that
|
||||
is being accessed is equal to tag of the pointer that is used to access this
|
||||
memory. In case of a tag mismatch tag-based KASAN prints a bug report.
|
||||
memory. In case of a tag mismatch software tag-based KASAN prints a bug report.
|
||||
|
||||
Software tag-based KASAN also has two instrumentation modes (outline, that
|
||||
emits callbacks to check memory accesses; and inline, that performs the shadow
|
||||
@ -216,9 +274,36 @@ simply printed from the function that performs the access check. With inline
|
||||
instrumentation a brk instruction is emitted by the compiler, and a dedicated
|
||||
brk handler is used to print bug reports.
|
||||
|
||||
A potential expansion of this mode is a hardware tag-based mode, which would
|
||||
use hardware memory tagging support instead of compiler instrumentation and
|
||||
manual shadow memory manipulation.
|
||||
Software tag-based KASAN uses 0xFF as a match-all pointer tag (accesses through
|
||||
pointers with 0xFF pointer tag aren't checked). The value 0xFE is currently
|
||||
reserved to tag freed memory regions.
|
||||
|
||||
Software tag-based KASAN currently only supports tagging of
|
||||
kmem_cache_alloc/kmalloc and page_alloc memory.
|
||||
|
||||
Hardware tag-based KASAN
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Hardware tag-based KASAN is similar to the software mode in concept, but uses
|
||||
hardware memory tagging support instead of compiler instrumentation and
|
||||
shadow memory.
|
||||
|
||||
Hardware tag-based KASAN is currently only implemented for arm64 architecture
|
||||
and based on both arm64 Memory Tagging Extension (MTE) introduced in ARMv8.5
|
||||
Instruction Set Architecture, and Top Byte Ignore (TBI).
|
||||
|
||||
Special arm64 instructions are used to assign memory tags for each allocation.
|
||||
Same tags are assigned to pointers to those allocations. On every memory
|
||||
access, hardware makes sure that tag of the memory that is being accessed is
|
||||
equal to tag of the pointer that is used to access this memory. In case of a
|
||||
tag mismatch a fault is generated and a report is printed.
|
||||
|
||||
Hardware tag-based KASAN uses 0xFF as a match-all pointer tag (accesses through
|
||||
pointers with 0xFF pointer tag aren't checked). The value 0xFE is currently
|
||||
reserved to tag freed memory regions.
|
||||
|
||||
Hardware tag-based KASAN currently only supports tagging of
|
||||
kmem_cache_alloc/kmalloc and page_alloc memory.
|
||||
|
||||
What memory accesses are sanitised by KASAN?
|
||||
--------------------------------------------
|
||||
@ -265,17 +350,17 @@ Most mappings in vmalloc space are small, requiring less than a full
|
||||
page of shadow space. Allocating a full shadow page per mapping would
|
||||
therefore be wasteful. Furthermore, to ensure that different mappings
|
||||
use different shadow pages, mappings would have to be aligned to
|
||||
``KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE``.
|
||||
``KASAN_GRANULE_SIZE * PAGE_SIZE``.
|
||||
|
||||
Instead, we share backing space across multiple mappings. We allocate
|
||||
Instead, KASAN shares backing space across multiple mappings. It allocates
|
||||
a backing page when a mapping in vmalloc space uses a particular page
|
||||
of the shadow region. This page can be shared by other vmalloc
|
||||
mappings later on.
|
||||
|
||||
We hook in to the vmap infrastructure to lazily clean up unused shadow
|
||||
KASAN hooks into the vmap infrastructure to lazily clean up unused shadow
|
||||
memory.
|
||||
|
||||
To avoid the difficulties around swapping mappings around, we expect
|
||||
To avoid the difficulties around swapping mappings around, KASAN expects
|
||||
that the part of the shadow region that covers the vmalloc space will
|
||||
not be covered by the early shadow page, but will be left
|
||||
unmapped. This will require changes in arch-specific code.
|
||||
@ -286,24 +371,31 @@ architectures that do not have a fixed module region.
|
||||
CONFIG_KASAN_KUNIT_TEST & CONFIG_TEST_KASAN_MODULE
|
||||
--------------------------------------------------
|
||||
|
||||
``CONFIG_KASAN_KUNIT_TEST`` utilizes the KUnit Test Framework for testing.
|
||||
This means each test focuses on a small unit of functionality and
|
||||
there are a few ways these tests can be run.
|
||||
KASAN tests consist on two parts:
|
||||
|
||||
Each test will print the KASAN report if an error is detected and then
|
||||
print the number of the test and the status of the test:
|
||||
1. Tests that are integrated with the KUnit Test Framework. Enabled with
|
||||
``CONFIG_KASAN_KUNIT_TEST``. These tests can be run and partially verified
|
||||
automatically in a few different ways, see the instructions below.
|
||||
|
||||
pass::
|
||||
2. Tests that are currently incompatible with KUnit. Enabled with
|
||||
``CONFIG_TEST_KASAN_MODULE`` and can only be run as a module. These tests can
|
||||
only be verified manually, by loading the kernel module and inspecting the
|
||||
kernel log for KASAN reports.
|
||||
|
||||
Each KUnit-compatible KASAN test prints a KASAN report if an error is detected.
|
||||
Then the test prints its number and status.
|
||||
|
||||
When a test passes::
|
||||
|
||||
ok 28 - kmalloc_double_kzfree
|
||||
|
||||
or, if kmalloc failed::
|
||||
When a test fails due to a failed ``kmalloc``::
|
||||
|
||||
# kmalloc_large_oob_right: ASSERTION FAILED at lib/test_kasan.c:163
|
||||
Expected ptr is not null, but is
|
||||
not ok 4 - kmalloc_large_oob_right
|
||||
|
||||
or, if a KASAN report was expected, but not found::
|
||||
When a test fails due to a missing KASAN report::
|
||||
|
||||
# kmalloc_double_kzfree: EXPECTATION FAILED at lib/test_kasan.c:629
|
||||
Expected kasan_data->report_expected == kasan_data->report_found, but
|
||||
@ -311,46 +403,38 @@ or, if a KASAN report was expected, but not found::
|
||||
kasan_data->report_found == 0
|
||||
not ok 28 - kmalloc_double_kzfree
|
||||
|
||||
All test statuses are tracked as they run and an overall status will
|
||||
be printed at the end::
|
||||
At the end the cumulative status of all KASAN tests is printed. On success::
|
||||
|
||||
ok 1 - kasan
|
||||
|
||||
or::
|
||||
Or, if one of the tests failed::
|
||||
|
||||
not ok 1 - kasan
|
||||
|
||||
(1) Loadable Module
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
There are a few ways to run KUnit-compatible KASAN tests.
|
||||
|
||||
1. Loadable module
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
With ``CONFIG_KUNIT`` enabled, ``CONFIG_KASAN_KUNIT_TEST`` can be built as
|
||||
a loadable module and run on any architecture that supports KASAN
|
||||
using something like insmod or modprobe. The module is called ``test_kasan``.
|
||||
a loadable module and run on any architecture that supports KASAN by loading
|
||||
the module with insmod or modprobe. The module is called ``test_kasan``.
|
||||
|
||||
(2) Built-In
|
||||
~~~~~~~~~~~~~
|
||||
2. Built-In
|
||||
~~~~~~~~~~~
|
||||
|
||||
With ``CONFIG_KUNIT`` built-in, ``CONFIG_KASAN_KUNIT_TEST`` can be built-in
|
||||
on any architecture that supports KASAN. These and any other KUnit
|
||||
tests enabled will run and print the results at boot as a late-init
|
||||
call.
|
||||
on any architecure that supports KASAN. These and any other KUnit tests enabled
|
||||
will run and print the results at boot as a late-init call.
|
||||
|
||||
(3) Using kunit_tool
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
3. Using kunit_tool
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
With ``CONFIG_KUNIT`` and ``CONFIG_KASAN_KUNIT_TEST`` built-in, we can also
|
||||
use kunit_tool to see the results of these along with other KUnit
|
||||
tests in a more readable way. This will not print the KASAN reports
|
||||
of tests that passed. Use `KUnit documentation <https://www.kernel.org/doc/html/latest/dev-tools/kunit/index.html>`_ for more up-to-date
|
||||
information on kunit_tool.
|
||||
With ``CONFIG_KUNIT`` and ``CONFIG_KASAN_KUNIT_TEST`` built-in, it's also
|
||||
possible use ``kunit_tool`` to see the results of these and other KUnit tests
|
||||
in a more readable way. This will not print the KASAN reports of the tests that
|
||||
passed. Use `KUnit documentation <https://www.kernel.org/doc/html/latest/dev-tools/kunit/index.html>`_
|
||||
for more up-to-date information on ``kunit_tool``.
|
||||
|
||||
.. _KUnit: https://www.kernel.org/doc/html/latest/dev-tools/kunit/index.html
|
||||
|
||||
``CONFIG_TEST_KASAN_MODULE`` is a set of KASAN tests that could not be
|
||||
converted to KUnit. These tests can be run only as a module with
|
||||
``CONFIG_TEST_KASAN_MODULE`` built as a loadable module and
|
||||
``CONFIG_KASAN`` built-in. The type of error expected and the
|
||||
function being run is printed before the expression expected to give
|
||||
an error. Then the error is printed, if found, and that test
|
||||
should be interpreted to pass only if the error was the one expected
|
||||
by the test.
|
||||
|
@ -976,16 +976,16 @@ config VMAP_STACK
|
||||
default y
|
||||
bool "Use a virtually-mapped stack"
|
||||
depends on HAVE_ARCH_VMAP_STACK
|
||||
depends on !KASAN || KASAN_VMALLOC
|
||||
depends on !KASAN || KASAN_HW_TAGS || KASAN_VMALLOC
|
||||
help
|
||||
Enable this if you want the use virtually-mapped kernel stacks
|
||||
with guard pages. This causes kernel stack overflows to be
|
||||
caught immediately rather than causing difficult-to-diagnose
|
||||
corruption.
|
||||
|
||||
To use this with KASAN, the architecture must support backing
|
||||
virtual mappings with real shadow memory, and KASAN_VMALLOC must
|
||||
be enabled.
|
||||
To use this with software KASAN modes, the architecture must support
|
||||
backing virtual mappings with real shadow memory, and KASAN_VMALLOC
|
||||
must be enabled.
|
||||
|
||||
config ARCH_OPTIONAL_KERNEL_RWX
|
||||
def_bool n
|
||||
|
@ -137,6 +137,7 @@ config ARM64
|
||||
select HAVE_ARCH_JUMP_LABEL_RELATIVE
|
||||
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
|
||||
select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
|
||||
select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE)
|
||||
select HAVE_ARCH_KGDB
|
||||
select HAVE_ARCH_MMAP_RND_BITS
|
||||
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
|
||||
@ -334,7 +335,7 @@ config BROKEN_GAS_INST
|
||||
|
||||
config KASAN_SHADOW_OFFSET
|
||||
hex
|
||||
depends on KASAN
|
||||
depends on KASAN_GENERIC || KASAN_SW_TAGS
|
||||
default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS
|
||||
default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS
|
||||
default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
|
||||
@ -1571,6 +1572,9 @@ endmenu
|
||||
|
||||
menu "ARMv8.5 architectural features"
|
||||
|
||||
config AS_HAS_ARMV8_5
|
||||
def_bool $(cc-option,-Wa$(comma)-march=armv8.5-a)
|
||||
|
||||
config ARM64_BTI
|
||||
bool "Branch Target Identification support"
|
||||
default y
|
||||
@ -1645,6 +1649,9 @@ config ARM64_MTE
|
||||
bool "Memory Tagging Extension support"
|
||||
default y
|
||||
depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
|
||||
depends on AS_HAS_ARMV8_5
|
||||
# Required for tag checking in the uaccess routines
|
||||
depends on ARM64_PAN
|
||||
select ARCH_USES_HIGH_VMA_FLAGS
|
||||
help
|
||||
Memory Tagging (part of the ARMv8.5 Extensions) provides
|
||||
|
@ -96,6 +96,11 @@ ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
|
||||
asm-arch := armv8.4-a
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_AS_HAS_ARMV8_5), y)
|
||||
# make sure to pass the newest target architecture to -march.
|
||||
asm-arch := armv8.5-a
|
||||
endif
|
||||
|
||||
ifdef asm-arch
|
||||
KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \
|
||||
-DARM64_ASM_ARCH='"$(asm-arch)"'
|
||||
@ -132,7 +137,7 @@ head-y := arch/arm64/kernel/head.o
|
||||
|
||||
ifeq ($(CONFIG_KASAN_SW_TAGS), y)
|
||||
KASAN_SHADOW_SCALE_SHIFT := 4
|
||||
else
|
||||
else ifeq ($(CONFIG_KASAN_GENERIC), y)
|
||||
KASAN_SHADOW_SCALE_SHIFT := 3
|
||||
endif
|
||||
|
||||
|
@ -473,7 +473,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
|
||||
#define NOKPROBE(x)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
#define EXPORT_SYMBOL_NOKASAN(name)
|
||||
#else
|
||||
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
|
||||
|
@ -6,6 +6,7 @@
|
||||
#define __ASM_CACHE_H
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/mte-kasan.h>
|
||||
|
||||
#define CTR_L1IP_SHIFT 14
|
||||
#define CTR_L1IP_MASK 3
|
||||
@ -51,6 +52,8 @@
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
|
||||
#elif defined(CONFIG_KASAN_HW_TAGS)
|
||||
#define ARCH_SLAB_MINALIGN MTE_GRANULE_SIZE
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
@ -106,6 +106,7 @@
|
||||
#define ESR_ELx_FSC_TYPE (0x3C)
|
||||
#define ESR_ELx_FSC_LEVEL (0x03)
|
||||
#define ESR_ELx_FSC_EXTABT (0x10)
|
||||
#define ESR_ELx_FSC_MTE (0x11)
|
||||
#define ESR_ELx_FSC_SERROR (0x11)
|
||||
#define ESR_ELx_FSC_ACCESS (0x08)
|
||||
#define ESR_ELx_FSC_FAULT (0x04)
|
||||
|
@ -12,7 +12,9 @@
|
||||
#define arch_kasan_reset_tag(addr) __tag_reset(addr)
|
||||
#define arch_kasan_get_tag(addr) __tag_get(addr)
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
|
||||
void kasan_init(void);
|
||||
|
||||
/*
|
||||
* KASAN_SHADOW_START: beginning of the kernel virtual addresses.
|
||||
@ -33,7 +35,6 @@
|
||||
#define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
|
||||
#define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual)
|
||||
|
||||
void kasan_init(void);
|
||||
void kasan_copy_shadow(pgd_t *pgdir);
|
||||
asmlinkage void kasan_early_init(void);
|
||||
|
||||
|
@ -72,7 +72,7 @@
|
||||
* address space for the shadow region respectively. They can bloat the stack
|
||||
* significantly, so double the (minimum) stack size when they are in use.
|
||||
*/
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
|
||||
#define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \
|
||||
+ KASAN_SHADOW_OFFSET)
|
||||
@ -214,7 +214,7 @@ static inline unsigned long kaslr_offset(void)
|
||||
(__force __typeof__(addr))__addr; \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
|
||||
#define __tag_shifted(tag) ((u64)(tag) << 56)
|
||||
#define __tag_reset(addr) __untagged_addr(addr)
|
||||
#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
|
||||
@ -222,7 +222,7 @@ static inline unsigned long kaslr_offset(void)
|
||||
#define __tag_shifted(tag) 0UL
|
||||
#define __tag_reset(addr) (addr)
|
||||
#define __tag_get(addr) 0
|
||||
#endif /* CONFIG_KASAN_SW_TAGS */
|
||||
#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
|
||||
|
||||
static inline const void *__tag_set(const void *addr, u8 tag)
|
||||
{
|
||||
@ -230,6 +230,15 @@ static inline const void *__tag_set(const void *addr, u8 tag)
|
||||
return (const void *)(__addr | __tag_shifted(tag));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
#define arch_enable_tagging() mte_enable_kernel()
|
||||
#define arch_init_tags(max_tag) mte_init_tags(max_tag)
|
||||
#define arch_get_random_tag() mte_get_random_tag()
|
||||
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)
|
||||
#define arch_set_mem_tag_range(addr, size, tag) \
|
||||
mte_set_mem_tag_range((addr), (size), (tag))
|
||||
#endif /* CONFIG_KASAN_HW_TAGS */
|
||||
|
||||
/*
|
||||
* Physical vs virtual RAM address space conversion. These are
|
||||
* private definitions which should NOT be used outside memory.h
|
||||
|
14
arch/arm64/include/asm/mte-def.h
Normal file
14
arch/arm64/include/asm/mte-def.h
Normal file
@ -0,0 +1,14 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2020 ARM Ltd.
|
||||
*/
|
||||
#ifndef __ASM_MTE_DEF_H
|
||||
#define __ASM_MTE_DEF_H
|
||||
|
||||
#define MTE_GRANULE_SIZE UL(16)
|
||||
#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1))
|
||||
#define MTE_TAG_SHIFT 56
|
||||
#define MTE_TAG_SIZE 4
|
||||
#define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
|
||||
|
||||
#endif /* __ASM_MTE_DEF_H */
|
67
arch/arm64/include/asm/mte-kasan.h
Normal file
67
arch/arm64/include/asm/mte-kasan.h
Normal file
@ -0,0 +1,67 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2020 ARM Ltd.
|
||||
*/
|
||||
#ifndef __ASM_MTE_KASAN_H
|
||||
#define __ASM_MTE_KASAN_H
|
||||
|
||||
#include <asm/mte-def.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* The functions below are meant to be used only for the
|
||||
* KASAN_HW_TAGS interface defined in asm/memory.h.
|
||||
*/
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
|
||||
static inline u8 mte_get_ptr_tag(void *ptr)
|
||||
{
|
||||
/* Note: The format of KASAN tags is 0xF<x> */
|
||||
u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
|
||||
|
||||
return tag;
|
||||
}
|
||||
|
||||
u8 mte_get_mem_tag(void *addr);
|
||||
u8 mte_get_random_tag(void);
|
||||
void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
|
||||
|
||||
void mte_enable_kernel(void);
|
||||
void mte_init_tags(u64 max_tag);
|
||||
|
||||
#else /* CONFIG_ARM64_MTE */
|
||||
|
||||
static inline u8 mte_get_ptr_tag(void *ptr)
|
||||
{
|
||||
return 0xFF;
|
||||
}
|
||||
|
||||
static inline u8 mte_get_mem_tag(void *addr)
|
||||
{
|
||||
return 0xFF;
|
||||
}
|
||||
static inline u8 mte_get_random_tag(void)
|
||||
{
|
||||
return 0xFF;
|
||||
}
|
||||
static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
|
||||
{
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline void mte_enable_kernel(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mte_init_tags(u64 max_tag)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ARM64_MTE */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_MTE_KASAN_H */
|
@ -5,17 +5,21 @@
|
||||
#ifndef __ASM_MTE_H
|
||||
#define __ASM_MTE_H
|
||||
|
||||
#define MTE_GRANULE_SIZE UL(16)
|
||||
#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1))
|
||||
#define MTE_TAG_SHIFT 56
|
||||
#define MTE_TAG_SIZE 4
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/mte-def.h>
|
||||
|
||||
#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/page-flags.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/pgtable-types.h>
|
||||
|
||||
extern u64 gcr_kernel_excl;
|
||||
|
||||
void mte_clear_page_tags(void *addr);
|
||||
unsigned long mte_copy_tags_from_user(void *to, const void __user *from,
|
||||
unsigned long n);
|
||||
@ -45,7 +49,9 @@ long get_mte_ctrl(struct task_struct *task);
|
||||
int mte_ptrace_copy_tags(struct task_struct *child, long request,
|
||||
unsigned long addr, unsigned long data);
|
||||
|
||||
#else
|
||||
void mte_assign_mem_tag_range(void *addr, size_t size);
|
||||
|
||||
#else /* CONFIG_ARM64_MTE */
|
||||
|
||||
/* unused if !CONFIG_ARM64_MTE, silence the compiler */
|
||||
#define PG_mte_tagged 0
|
||||
@ -80,7 +86,11 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
#endif
|
||||
static inline void mte_assign_mem_tag_range(void *addr, size_t size)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ARM64_MTE */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_MTE_H */
|
||||
|
@ -152,7 +152,7 @@ struct thread_struct {
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
u64 sctlr_tcf0;
|
||||
u64 gcr_user_incl;
|
||||
u64 gcr_user_excl;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
#ifndef __ASM_STRING_H
|
||||
#define __ASM_STRING_H
|
||||
|
||||
#ifndef CONFIG_KASAN
|
||||
#if !(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
|
||||
#define __HAVE_ARCH_STRRCHR
|
||||
extern char *strrchr(const char *, int c);
|
||||
|
||||
@ -48,7 +48,8 @@ extern void *__memset(void *, int, __kernel_size_t);
|
||||
void memcpy_flushcache(void *dst, const void *src, size_t cnt);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
|
||||
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
|
||||
!defined(__SANITIZE_ADDRESS__)
|
||||
|
||||
/*
|
||||
* For files that are not instrumented (e.g. mm/slub.c) we
|
||||
|
@ -159,8 +159,28 @@ static inline void __uaccess_enable_hw_pan(void)
|
||||
CONFIG_ARM64_PAN));
|
||||
}
|
||||
|
||||
/*
|
||||
* The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
|
||||
* affects EL0 and TCF affects EL1 irrespective of which TTBR is
|
||||
* used.
|
||||
* The kernel accesses TTBR0 usually with LDTR/STTR instructions
|
||||
* when UAO is available, so these would act as EL0 accesses using
|
||||
* TCF0.
|
||||
* However futex.h code uses exclusives which would be executed as
|
||||
* EL1, this can potentially cause a tag check fault even if the
|
||||
* user disables TCF0.
|
||||
*
|
||||
* To address the problem we set the PSTATE.TCO bit in uaccess_enable()
|
||||
* and reset it in uaccess_disable().
|
||||
*
|
||||
* The Tag check override (TCO) bit disables temporarily the tag checking
|
||||
* preventing the issue.
|
||||
*/
|
||||
static inline void uaccess_disable_privileged(void)
|
||||
{
|
||||
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
|
||||
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
|
||||
|
||||
if (uaccess_ttbr0_disable())
|
||||
return;
|
||||
|
||||
@ -169,6 +189,9 @@ static inline void uaccess_disable_privileged(void)
|
||||
|
||||
static inline void uaccess_enable_privileged(void)
|
||||
{
|
||||
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
|
||||
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
|
||||
|
||||
if (uaccess_ttbr0_enable())
|
||||
return;
|
||||
|
||||
|
@ -46,6 +46,9 @@ int main(void)
|
||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||
DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user));
|
||||
DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel));
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
DEFINE(THREAD_GCR_EL1_USER, offsetof(struct task_struct, thread.gcr_user_excl));
|
||||
#endif
|
||||
BLANK();
|
||||
DEFINE(S_X0, offsetof(struct pt_regs, regs[0]));
|
||||
|
@ -70,6 +70,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
@ -1710,6 +1711,8 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
|
||||
cleared_zero_page = true;
|
||||
mte_clear_page_tags(lm_alias(empty_zero_page));
|
||||
}
|
||||
|
||||
kasan_init_hw_tags_cpu();
|
||||
}
|
||||
#endif /* CONFIG_ARM64_MTE */
|
||||
|
||||
|
@ -173,6 +173,43 @@ alternative_else_nop_endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro mte_set_gcr, tmp, tmp2
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
/*
|
||||
* Calculate and set the exclude mask preserving
|
||||
* the RRND (bit[16]) setting.
|
||||
*/
|
||||
mrs_s \tmp2, SYS_GCR_EL1
|
||||
bfi \tmp2, \tmp, #0, #16
|
||||
msr_s SYS_GCR_EL1, \tmp2
|
||||
isb
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro mte_set_kernel_gcr, tmp, tmp2
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
alternative_if_not ARM64_MTE
|
||||
b 1f
|
||||
alternative_else_nop_endif
|
||||
ldr_l \tmp, gcr_kernel_excl
|
||||
|
||||
mte_set_gcr \tmp, \tmp2
|
||||
1:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro mte_set_user_gcr, tsk, tmp, tmp2
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
alternative_if_not ARM64_MTE
|
||||
b 1f
|
||||
alternative_else_nop_endif
|
||||
ldr \tmp, [\tsk, #THREAD_GCR_EL1_USER]
|
||||
|
||||
mte_set_gcr \tmp, \tmp2
|
||||
1:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro kernel_entry, el, regsize = 64
|
||||
.if \regsize == 32
|
||||
mov w0, w0 // zero upper 32 bits of x0
|
||||
@ -212,6 +249,8 @@ alternative_else_nop_endif
|
||||
|
||||
ptrauth_keys_install_kernel tsk, x20, x22, x23
|
||||
|
||||
mte_set_kernel_gcr x22, x23
|
||||
|
||||
scs_load tsk, x20
|
||||
.else
|
||||
add x21, sp, #S_FRAME_SIZE
|
||||
@ -315,6 +354,8 @@ alternative_else_nop_endif
|
||||
/* No kernel C function calls after this as user keys are set. */
|
||||
ptrauth_keys_install_user tsk, x0, x1, x2
|
||||
|
||||
mte_set_user_gcr tsk, x0, x1
|
||||
|
||||
apply_ssbd 0, x0, x1
|
||||
.endif
|
||||
|
||||
|
@ -433,7 +433,7 @@ SYM_FUNC_START_LOCAL(__primary_switched)
|
||||
bl __pi_memset
|
||||
dsb ishst // Make zero page visible to PTW
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
bl kasan_early_init
|
||||
#endif
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
|
@ -371,6 +371,11 @@ static void swsusp_mte_restore_tags(void)
|
||||
unsigned long pfn = xa_state.xa_index;
|
||||
struct page *page = pfn_to_online_page(pfn);
|
||||
|
||||
/*
|
||||
* It is not required to invoke page_kasan_tag_reset(page)
|
||||
* at this point since the tags stored in page->flags are
|
||||
* already restored.
|
||||
*/
|
||||
mte_restore_page_tags(page_address(page), tags);
|
||||
|
||||
mte_free_tag_storage(tags);
|
||||
|
@ -37,7 +37,7 @@ __efistub_strncmp = __pi_strncmp;
|
||||
__efistub_strrchr = __pi_strrchr;
|
||||
__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
__efistub___memcpy = __pi_memcpy;
|
||||
__efistub___memmove = __pi_memmove;
|
||||
__efistub___memset = __pi_memset;
|
||||
|
@ -161,7 +161,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
|
||||
/* use the top 16 bits to randomize the linear region */
|
||||
memstart_offset_seed = seed >> 48;
|
||||
|
||||
if (IS_ENABLED(CONFIG_KASAN))
|
||||
if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
|
||||
IS_ENABLED(CONFIG_KASAN_SW_TAGS))
|
||||
/*
|
||||
* KASAN does not expect the module region to intersect the
|
||||
* vmalloc region, since shadow memory is allocated for each
|
||||
|
@ -30,7 +30,8 @@ void *module_alloc(unsigned long size)
|
||||
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
|
||||
gfp_mask |= __GFP_NOWARN;
|
||||
|
||||
if (IS_ENABLED(CONFIG_KASAN))
|
||||
if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
|
||||
IS_ENABLED(CONFIG_KASAN_SW_TAGS))
|
||||
/* don't exceed the static module region - see below */
|
||||
module_alloc_end = MODULES_END;
|
||||
|
||||
@ -39,7 +40,8 @@ void *module_alloc(unsigned long size)
|
||||
NUMA_NO_NODE, __builtin_return_address(0));
|
||||
|
||||
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
|
||||
!IS_ENABLED(CONFIG_KASAN))
|
||||
!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
|
||||
!IS_ENABLED(CONFIG_KASAN_SW_TAGS))
|
||||
/*
|
||||
* KASAN can only deal with module allocations being served
|
||||
* from the reserved module region, since the remainder of
|
||||
|
@ -13,13 +13,18 @@
|
||||
#include <linux/swap.h>
|
||||
#include <linux/swapops.h>
|
||||
#include <linux/thread_info.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/uio.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/mte.h>
|
||||
#include <asm/mte-kasan.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
u64 gcr_kernel_excl __ro_after_init;
|
||||
|
||||
static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
|
||||
{
|
||||
pte_t old_pte = READ_ONCE(*ptep);
|
||||
@ -31,6 +36,15 @@ static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
|
||||
return;
|
||||
}
|
||||
|
||||
page_kasan_tag_reset(page);
|
||||
/*
|
||||
* We need smp_wmb() in between setting the flags and clearing the
|
||||
* tags because if another thread reads page->flags and builds a
|
||||
* tagged address out of it, there is an actual dependency to the
|
||||
* memory access, but on the current thread we do not guarantee that
|
||||
* the new page->flags are visible before the tags were updated.
|
||||
*/
|
||||
smp_wmb();
|
||||
mte_clear_page_tags(page_address(page));
|
||||
}
|
||||
|
||||
@ -72,6 +86,78 @@ int memcmp_pages(struct page *page1, struct page *page2)
|
||||
return ret;
|
||||
}
|
||||
|
||||
u8 mte_get_mem_tag(void *addr)
|
||||
{
|
||||
if (!system_supports_mte())
|
||||
return 0xFF;
|
||||
|
||||
asm(__MTE_PREAMBLE "ldg %0, [%0]"
|
||||
: "+r" (addr));
|
||||
|
||||
return mte_get_ptr_tag(addr);
|
||||
}
|
||||
|
||||
u8 mte_get_random_tag(void)
|
||||
{
|
||||
void *addr;
|
||||
|
||||
if (!system_supports_mte())
|
||||
return 0xFF;
|
||||
|
||||
asm(__MTE_PREAMBLE "irg %0, %0"
|
||||
: "+r" (addr));
|
||||
|
||||
return mte_get_ptr_tag(addr);
|
||||
}
|
||||
|
||||
void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
|
||||
{
|
||||
void *ptr = addr;
|
||||
|
||||
if ((!system_supports_mte()) || (size == 0))
|
||||
return addr;
|
||||
|
||||
/* Make sure that size is MTE granule aligned. */
|
||||
WARN_ON(size & (MTE_GRANULE_SIZE - 1));
|
||||
|
||||
/* Make sure that the address is MTE granule aligned. */
|
||||
WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));
|
||||
|
||||
tag = 0xF0 | tag;
|
||||
ptr = (void *)__tag_set(ptr, tag);
|
||||
|
||||
mte_assign_mem_tag_range(ptr, size);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void mte_init_tags(u64 max_tag)
|
||||
{
|
||||
static bool gcr_kernel_excl_initialized;
|
||||
|
||||
if (!gcr_kernel_excl_initialized) {
|
||||
/*
|
||||
* The format of the tags in KASAN is 0xFF and in MTE is 0xF.
|
||||
* This conversion extracts an MTE tag from a KASAN tag.
|
||||
*/
|
||||
u64 incl = GENMASK(FIELD_GET(MTE_TAG_MASK >> MTE_TAG_SHIFT,
|
||||
max_tag), 0);
|
||||
|
||||
gcr_kernel_excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
|
||||
gcr_kernel_excl_initialized = true;
|
||||
}
|
||||
|
||||
/* Enable the kernel exclude mask for random tags generation. */
|
||||
write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1);
|
||||
}
|
||||
|
||||
void mte_enable_kernel(void)
|
||||
{
|
||||
/* Enable MTE Sync Mode for EL1. */
|
||||
sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_SYNC);
|
||||
isb();
|
||||
}
|
||||
|
||||
static void update_sctlr_el1_tcf0(u64 tcf0)
|
||||
{
|
||||
/* ISB required for the kernel uaccess routines */
|
||||
@ -92,23 +178,26 @@ static void set_sctlr_el1_tcf0(u64 tcf0)
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void update_gcr_el1_excl(u64 incl)
|
||||
static void update_gcr_el1_excl(u64 excl)
|
||||
{
|
||||
u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
|
||||
|
||||
/*
|
||||
* Note that 'incl' is an include mask (controlled by the user via
|
||||
* prctl()) while GCR_EL1 accepts an exclude mask.
|
||||
* Note that the mask controlled by the user via prctl() is an
|
||||
* include while GCR_EL1 accepts an exclude mask.
|
||||
* No need for ISB since this only affects EL0 currently, implicit
|
||||
* with ERET.
|
||||
*/
|
||||
sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
|
||||
}
|
||||
|
||||
static void set_gcr_el1_excl(u64 incl)
|
||||
static void set_gcr_el1_excl(u64 excl)
|
||||
{
|
||||
current->thread.gcr_user_incl = incl;
|
||||
update_gcr_el1_excl(incl);
|
||||
current->thread.gcr_user_excl = excl;
|
||||
|
||||
/*
|
||||
* SYS_GCR_EL1 will be set to current->thread.gcr_user_excl value
|
||||
* by mte_set_user_gcr() in kernel_exit,
|
||||
*/
|
||||
}
|
||||
|
||||
void flush_mte_state(void)
|
||||
@ -123,7 +212,7 @@ void flush_mte_state(void)
|
||||
/* disable tag checking */
|
||||
set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
|
||||
/* reset tag generation mask */
|
||||
set_gcr_el1_excl(0);
|
||||
set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
|
||||
}
|
||||
|
||||
void mte_thread_switch(struct task_struct *next)
|
||||
@ -134,7 +223,6 @@ void mte_thread_switch(struct task_struct *next)
|
||||
/* avoid expensive SCTLR_EL1 accesses if no change */
|
||||
if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
|
||||
update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
|
||||
update_gcr_el1_excl(next->thread.gcr_user_incl);
|
||||
}
|
||||
|
||||
void mte_suspend_exit(void)
|
||||
@ -142,13 +230,14 @@ void mte_suspend_exit(void)
|
||||
if (!system_supports_mte())
|
||||
return;
|
||||
|
||||
update_gcr_el1_excl(current->thread.gcr_user_incl);
|
||||
update_gcr_el1_excl(gcr_kernel_excl);
|
||||
}
|
||||
|
||||
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
|
||||
{
|
||||
u64 tcf0;
|
||||
u64 gcr_incl = (arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT;
|
||||
u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
|
||||
SYS_GCR_EL1_EXCL_MASK;
|
||||
|
||||
if (!system_supports_mte())
|
||||
return 0;
|
||||
@ -169,10 +258,10 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
|
||||
|
||||
if (task != current) {
|
||||
task->thread.sctlr_tcf0 = tcf0;
|
||||
task->thread.gcr_user_incl = gcr_incl;
|
||||
task->thread.gcr_user_excl = gcr_excl;
|
||||
} else {
|
||||
set_sctlr_el1_tcf0(tcf0);
|
||||
set_gcr_el1_excl(gcr_incl);
|
||||
set_gcr_el1_excl(gcr_excl);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -181,11 +270,12 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
|
||||
long get_mte_ctrl(struct task_struct *task)
|
||||
{
|
||||
unsigned long ret;
|
||||
u64 incl = ~task->thread.gcr_user_excl & SYS_GCR_EL1_EXCL_MASK;
|
||||
|
||||
if (!system_supports_mte())
|
||||
return 0;
|
||||
|
||||
ret = task->thread.gcr_user_incl << PR_MTE_TAG_SHIFT;
|
||||
ret = incl << PR_MTE_TAG_SHIFT;
|
||||
|
||||
switch (task->thread.sctlr_tcf0) {
|
||||
case SCTLR_EL1_TCF0_NONE:
|
||||
|
@ -358,7 +358,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
|
||||
smp_build_mpidr_hash();
|
||||
|
||||
/* Init percpu seeds for random tags after cpus are set up. */
|
||||
kasan_init_tags();
|
||||
kasan_init_sw_tags();
|
||||
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
/*
|
||||
|
@ -133,7 +133,7 @@ SYM_FUNC_START(_cpu_resume)
|
||||
*/
|
||||
bl cpu_do_resume
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
|
||||
mov x0, sp
|
||||
bl kasan_unpoison_task_stack_below
|
||||
#endif
|
||||
|
@ -462,6 +462,8 @@ void __init smp_prepare_boot_cpu(void)
|
||||
/* Conditionally switch to GIC PMR for interrupt masking */
|
||||
if (system_uses_irq_prio_masking())
|
||||
init_gic_priority_masking();
|
||||
|
||||
kasan_init_hw_tags();
|
||||
}
|
||||
|
||||
static u64 __init of_get_cpu_mpidr(struct device_node *dn)
|
||||
|
@ -149,3 +149,19 @@ SYM_FUNC_START(mte_restore_page_tags)
|
||||
|
||||
ret
|
||||
SYM_FUNC_END(mte_restore_page_tags)
|
||||
|
||||
/*
|
||||
* Assign allocation tags for a region of memory based on the pointer tag
|
||||
* x0 - source pointer
|
||||
* x1 - size
|
||||
*
|
||||
* Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
|
||||
* size must be non-zero and MTE_GRANULE_SIZE aligned.
|
||||
*/
|
||||
SYM_FUNC_START(mte_assign_mem_tag_range)
|
||||
1: stg x0, [x0]
|
||||
add x0, x0, #MTE_GRANULE_SIZE
|
||||
subs x1, x1, #MTE_GRANULE_SIZE
|
||||
b.gt 1b
|
||||
ret
|
||||
SYM_FUNC_END(mte_assign_mem_tag_range)
|
||||
|
@ -23,6 +23,15 @@ void copy_highpage(struct page *to, struct page *from)
|
||||
|
||||
if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
|
||||
set_bit(PG_mte_tagged, &to->flags);
|
||||
page_kasan_tag_reset(to);
|
||||
/*
|
||||
* We need smp_wmb() in between setting the flags and clearing the
|
||||
* tags because if another thread reads page->flags and builds a
|
||||
* tagged address out of it, there is an actual dependency to the
|
||||
* memory access, but on the current thread we do not guarantee that
|
||||
* the new page->flags are visible before the tags were updated.
|
||||
*/
|
||||
smp_wmb();
|
||||
mte_copy_page_tags(kto, kfrom);
|
||||
}
|
||||
}
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/page-flags.h>
|
||||
@ -33,6 +34,7 @@
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/esr.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/mte.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/system_misc.h>
|
||||
@ -296,6 +298,57 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
|
||||
do_exit(SIGKILL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
static void report_tag_fault(unsigned long addr, unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
bool is_write = ((esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT) != 0;
|
||||
|
||||
/*
|
||||
* SAS bits aren't set for all faults reported in EL1, so we can't
|
||||
* find out access size.
|
||||
*/
|
||||
kasan_report(addr, 0, is_write, regs->pc);
|
||||
}
|
||||
#else
|
||||
/* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */
|
||||
static inline void report_tag_fault(unsigned long addr, unsigned int esr,
|
||||
struct pt_regs *regs) { }
|
||||
#endif
|
||||
|
||||
static void do_tag_recovery(unsigned long addr, unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
static bool reported;
|
||||
|
||||
if (!READ_ONCE(reported)) {
|
||||
report_tag_fault(addr, esr, regs);
|
||||
WRITE_ONCE(reported, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable MTE Tag Checking on the local CPU for the current EL.
|
||||
* It will be done lazily on the other CPUs when they will hit a
|
||||
* tag fault.
|
||||
*/
|
||||
sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_NONE);
|
||||
isb();
|
||||
}
|
||||
|
||||
static bool is_el1_mte_sync_tag_check_fault(unsigned int esr)
|
||||
{
|
||||
unsigned int ec = ESR_ELx_EC(esr);
|
||||
unsigned int fsc = esr & ESR_ELx_FSC;
|
||||
|
||||
if (ec != ESR_ELx_EC_DABT_CUR)
|
||||
return false;
|
||||
|
||||
if (fsc == ESR_ELx_FSC_MTE)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void __do_kernel_fault(unsigned long addr, unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
@ -312,6 +365,12 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
|
||||
"Ignoring spurious kernel translation fault at virtual address %016lx\n", addr))
|
||||
return;
|
||||
|
||||
if (is_el1_mte_sync_tag_check_fault(esr)) {
|
||||
do_tag_recovery(addr, esr, regs);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (is_el1_permission_fault(addr, esr, regs)) {
|
||||
if (esr & ESR_ELx_WNR)
|
||||
msg = "write to read-only memory";
|
||||
|
@ -21,6 +21,8 @@
|
||||
#include <asm/sections.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
|
||||
static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
|
||||
|
||||
/*
|
||||
@ -208,7 +210,7 @@ static void __init clear_pgds(unsigned long start,
|
||||
set_pgd(pgd_offset_k(start), __pgd(0));
|
||||
}
|
||||
|
||||
void __init kasan_init(void)
|
||||
static void __init kasan_init_shadow(void)
|
||||
{
|
||||
u64 kimg_shadow_start, kimg_shadow_end;
|
||||
u64 mod_shadow_start, mod_shadow_end;
|
||||
@ -269,8 +271,21 @@ void __init kasan_init(void)
|
||||
|
||||
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
|
||||
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
|
||||
|
||||
/* At this point kasan is fully initialized. Enable error messages */
|
||||
init_task.kasan_depth = 0;
|
||||
pr_info("KernelAddressSanitizer initialized\n");
|
||||
}
|
||||
|
||||
static void __init kasan_init_depth(void)
|
||||
{
|
||||
init_task.kasan_depth = 0;
|
||||
}
|
||||
|
||||
void __init kasan_init(void)
|
||||
{
|
||||
kasan_init_shadow();
|
||||
kasan_init_depth();
|
||||
#if defined(CONFIG_KASAN_GENERIC)
|
||||
/* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
|
||||
pr_info("KernelAddressSanitizer initialized\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
|
||||
|
@ -53,6 +53,15 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page)
|
||||
if (!tags)
|
||||
return false;
|
||||
|
||||
page_kasan_tag_reset(page);
|
||||
/*
|
||||
* We need smp_wmb() in between setting the flags and clearing the
|
||||
* tags because if another thread reads page->flags and builds a
|
||||
* tagged address out of it, there is an actual dependency to the
|
||||
* memory access, but on the current thread we do not guarantee that
|
||||
* the new page->flags are visible before the tags were updated.
|
||||
*/
|
||||
smp_wmb();
|
||||
mte_restore_page_tags(page_address(page), tags);
|
||||
|
||||
return true;
|
||||
|
@ -40,9 +40,15 @@
|
||||
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
#define TCR_KASAN_FLAGS TCR_TBI1 | TCR_TBID1
|
||||
#define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1
|
||||
#else
|
||||
#define TCR_KASAN_FLAGS 0
|
||||
#define TCR_KASAN_SW_FLAGS 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1
|
||||
#else
|
||||
#define TCR_KASAN_HW_FLAGS 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -427,6 +433,10 @@ SYM_FUNC_START(__cpu_setup)
|
||||
*/
|
||||
mov_q x5, MAIR_EL1_SET
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
mte_tcr .req x20
|
||||
|
||||
mov mte_tcr, #0
|
||||
|
||||
/*
|
||||
* Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
|
||||
* (ID_AA64PFR1_EL1[11:8] > 1).
|
||||
@ -447,6 +457,9 @@ SYM_FUNC_START(__cpu_setup)
|
||||
/* clear any pending tag check faults in TFSR*_EL1 */
|
||||
msr_s SYS_TFSR_EL1, xzr
|
||||
msr_s SYS_TFSRE0_EL1, xzr
|
||||
|
||||
/* set the TCR_EL1 bits */
|
||||
mov_q mte_tcr, TCR_KASAN_HW_FLAGS
|
||||
1:
|
||||
#endif
|
||||
msr mair_el1, x5
|
||||
@ -456,7 +469,11 @@ SYM_FUNC_START(__cpu_setup)
|
||||
*/
|
||||
mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
|
||||
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
|
||||
TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
|
||||
TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
orr x10, x10, mte_tcr
|
||||
.unreq mte_tcr
|
||||
#endif
|
||||
tcr_clear_errata_bits x10, x9, x5
|
||||
|
||||
#ifdef CONFIG_ARM64_VA_BITS_52
|
||||
|
@ -29,7 +29,7 @@
|
||||
enum address_markers_idx {
|
||||
PAGE_OFFSET_NR = 0,
|
||||
PAGE_END_NR,
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
KASAN_START_NR,
|
||||
#endif
|
||||
};
|
||||
@ -37,7 +37,7 @@ enum address_markers_idx {
|
||||
static struct addr_marker address_markers[] = {
|
||||
{ PAGE_OFFSET, "Linear Mapping start" },
|
||||
{ 0 /* PAGE_END */, "Linear Mapping end" },
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
{ 0 /* KASAN_SHADOW_START */, "Kasan shadow start" },
|
||||
{ KASAN_SHADOW_END, "Kasan shadow end" },
|
||||
#endif
|
||||
@ -383,7 +383,7 @@ void ptdump_check_wx(void)
|
||||
static int ptdump_init(void)
|
||||
{
|
||||
address_markers[PAGE_END_NR].start_address = PAGE_END;
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START;
|
||||
#endif
|
||||
ptdump_initialize();
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#undef CONFIG_KASAN
|
||||
#undef CONFIG_KASAN_GENERIC
|
||||
#include "../lib/string.c"
|
||||
|
||||
int strncmp(const char *cs, const char *ct, size_t count)
|
||||
|
@ -12,6 +12,7 @@
|
||||
#undef CONFIG_PARAVIRT_XXL
|
||||
#undef CONFIG_PARAVIRT_SPINLOCKS
|
||||
#undef CONFIG_KASAN
|
||||
#undef CONFIG_KASAN_GENERIC
|
||||
|
||||
/* cpu_feature_enabled() cannot be used this early */
|
||||
#define USE_EARLY_PGTABLE_L5
|
||||
|
@ -112,7 +112,7 @@ SYM_FUNC_START(do_suspend_lowlevel)
|
||||
movq pt_regs_r14(%rax), %r14
|
||||
movq pt_regs_r15(%rax), %r15
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
|
||||
/*
|
||||
* The suspend path may have poisoned some areas deeper in the stack,
|
||||
* which we now need to unpoison.
|
||||
|
@ -9,7 +9,7 @@
|
||||
* even in compilation units that selectively disable KASAN, but must use KASAN
|
||||
* to validate access to an address. Never use these in header files!
|
||||
*/
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
bool __kasan_check_read(const volatile void *p, unsigned int size);
|
||||
bool __kasan_check_write(const volatile void *p, unsigned int size);
|
||||
#else
|
||||
|
@ -2,6 +2,7 @@
|
||||
#ifndef _LINUX_KASAN_H
|
||||
#define _LINUX_KASAN_H
|
||||
|
||||
#include <linux/static_key.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct kmem_cache;
|
||||
@ -11,7 +12,7 @@ struct task_struct;
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/kasan.h>
|
||||
|
||||
/* kasan_data struct is used in KUnit tests for KASAN expected failures */
|
||||
@ -20,6 +21,20 @@ struct kunit_kasan_expectation {
|
||||
bool report_found;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
/* Software KASAN implementations use shadow memory. */
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
#define KASAN_SHADOW_INIT 0xFF
|
||||
#else
|
||||
#define KASAN_SHADOW_INIT 0
|
||||
#endif
|
||||
|
||||
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
|
||||
extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
|
||||
extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
|
||||
@ -35,88 +50,219 @@ static inline void *kasan_mem_to_shadow(const void *addr)
|
||||
+ KASAN_SHADOW_OFFSET;
|
||||
}
|
||||
|
||||
int kasan_add_zero_shadow(void *start, unsigned long size);
|
||||
void kasan_remove_zero_shadow(void *start, unsigned long size);
|
||||
|
||||
/* Enable reporting bugs after kasan_disable_current() */
|
||||
extern void kasan_enable_current(void);
|
||||
|
||||
/* Disable reporting bugs for current task */
|
||||
extern void kasan_disable_current(void);
|
||||
|
||||
void kasan_unpoison_shadow(const void *address, size_t size);
|
||||
#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
|
||||
|
||||
void kasan_unpoison_task_stack(struct task_struct *task);
|
||||
static inline int kasan_add_zero_shadow(void *start, unsigned long size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void kasan_remove_zero_shadow(void *start,
|
||||
unsigned long size)
|
||||
{}
|
||||
|
||||
void kasan_alloc_pages(struct page *page, unsigned int order);
|
||||
void kasan_free_pages(struct page *page, unsigned int order);
|
||||
static inline void kasan_enable_current(void) {}
|
||||
static inline void kasan_disable_current(void) {}
|
||||
|
||||
void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
|
||||
slab_flags_t *flags);
|
||||
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
|
||||
|
||||
void kasan_poison_slab(struct page *page);
|
||||
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
|
||||
void kasan_poison_object_data(struct kmem_cache *cache, void *object);
|
||||
void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
|
||||
const void *object);
|
||||
|
||||
void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
|
||||
gfp_t flags);
|
||||
void kasan_kfree_large(void *ptr, unsigned long ip);
|
||||
void kasan_poison_kfree(void *ptr, unsigned long ip);
|
||||
void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object,
|
||||
size_t size, gfp_t flags);
|
||||
void * __must_check kasan_krealloc(const void *object, size_t new_size,
|
||||
gfp_t flags);
|
||||
|
||||
void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object,
|
||||
gfp_t flags);
|
||||
bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
|
||||
#ifdef CONFIG_KASAN
|
||||
|
||||
struct kasan_cache {
|
||||
int alloc_meta_offset;
|
||||
int free_meta_offset;
|
||||
};
|
||||
|
||||
/*
|
||||
* These functions provide a special case to support backing module
|
||||
* allocations with real shadow memory. With KASAN vmalloc, the special
|
||||
* case is unnecessary, as the work is handled in the generic case.
|
||||
*/
|
||||
#ifndef CONFIG_KASAN_VMALLOC
|
||||
int kasan_module_alloc(void *addr, size_t size);
|
||||
void kasan_free_shadow(const struct vm_struct *vm);
|
||||
#else
|
||||
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
|
||||
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
|
||||
#endif
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
|
||||
int kasan_add_zero_shadow(void *start, unsigned long size);
|
||||
void kasan_remove_zero_shadow(void *start, unsigned long size);
|
||||
DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
|
||||
|
||||
size_t __ksize(const void *);
|
||||
static inline void kasan_unpoison_slab(const void *ptr)
|
||||
static __always_inline bool kasan_enabled(void)
|
||||
{
|
||||
kasan_unpoison_shadow(ptr, __ksize(ptr));
|
||||
return static_branch_likely(&kasan_flag_enabled);
|
||||
}
|
||||
|
||||
#else /* CONFIG_KASAN_HW_TAGS */
|
||||
|
||||
static inline bool kasan_enabled(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_KASAN_HW_TAGS */
|
||||
|
||||
slab_flags_t __kasan_never_merge(void);
|
||||
static __always_inline slab_flags_t kasan_never_merge(void)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
return __kasan_never_merge();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __kasan_unpoison_range(const void *addr, size_t size);
|
||||
static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_unpoison_range(addr, size);
|
||||
}
|
||||
|
||||
void __kasan_alloc_pages(struct page *page, unsigned int order);
|
||||
static __always_inline void kasan_alloc_pages(struct page *page,
|
||||
unsigned int order)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_alloc_pages(page, order);
|
||||
}
|
||||
|
||||
void __kasan_free_pages(struct page *page, unsigned int order);
|
||||
static __always_inline void kasan_free_pages(struct page *page,
|
||||
unsigned int order)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_free_pages(page, order);
|
||||
}
|
||||
|
||||
void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
|
||||
slab_flags_t *flags);
|
||||
static __always_inline void kasan_cache_create(struct kmem_cache *cache,
|
||||
unsigned int *size, slab_flags_t *flags)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_cache_create(cache, size, flags);
|
||||
}
|
||||
|
||||
size_t __kasan_metadata_size(struct kmem_cache *cache);
|
||||
static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
return __kasan_metadata_size(cache);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __kasan_poison_slab(struct page *page);
|
||||
static __always_inline void kasan_poison_slab(struct page *page)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_poison_slab(page);
|
||||
}
|
||||
|
||||
void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
|
||||
static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
|
||||
void *object)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_unpoison_object_data(cache, object);
|
||||
}
|
||||
|
||||
void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
|
||||
static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
|
||||
void *object)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_poison_object_data(cache, object);
|
||||
}
|
||||
|
||||
void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
|
||||
const void *object);
|
||||
static __always_inline void * __must_check kasan_init_slab_obj(
|
||||
struct kmem_cache *cache, const void *object)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
return __kasan_init_slab_obj(cache, object);
|
||||
return (void *)object;
|
||||
}
|
||||
|
||||
bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
|
||||
static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
|
||||
unsigned long ip)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
return __kasan_slab_free(s, object, ip);
|
||||
return false;
|
||||
}
|
||||
|
||||
void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
|
||||
static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_slab_free_mempool(ptr, ip);
|
||||
}
|
||||
|
||||
void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
|
||||
void *object, gfp_t flags);
|
||||
static __always_inline void * __must_check kasan_slab_alloc(
|
||||
struct kmem_cache *s, void *object, gfp_t flags)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
return __kasan_slab_alloc(s, object, flags);
|
||||
return object;
|
||||
}
|
||||
|
||||
void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
|
||||
size_t size, gfp_t flags);
|
||||
static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
|
||||
const void *object, size_t size, gfp_t flags)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
return __kasan_kmalloc(s, object, size, flags);
|
||||
return (void *)object;
|
||||
}
|
||||
|
||||
void * __must_check __kasan_kmalloc_large(const void *ptr,
|
||||
size_t size, gfp_t flags);
|
||||
static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
|
||||
size_t size, gfp_t flags)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
return __kasan_kmalloc_large(ptr, size, flags);
|
||||
return (void *)ptr;
|
||||
}
|
||||
|
||||
void * __must_check __kasan_krealloc(const void *object,
|
||||
size_t new_size, gfp_t flags);
|
||||
static __always_inline void * __must_check kasan_krealloc(const void *object,
|
||||
size_t new_size, gfp_t flags)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
return __kasan_krealloc(object, new_size, flags);
|
||||
return (void *)object;
|
||||
}
|
||||
|
||||
void __kasan_kfree_large(void *ptr, unsigned long ip);
|
||||
static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_kfree_large(ptr, ip);
|
||||
}
|
||||
size_t kasan_metadata_size(struct kmem_cache *cache);
|
||||
|
||||
bool kasan_save_enable_multi_shot(void);
|
||||
void kasan_restore_multi_shot(bool enabled);
|
||||
|
||||
#else /* CONFIG_KASAN */
|
||||
|
||||
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
|
||||
|
||||
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
|
||||
|
||||
static inline void kasan_enable_current(void) {}
|
||||
static inline void kasan_disable_current(void) {}
|
||||
|
||||
static inline bool kasan_enabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline slab_flags_t kasan_never_merge(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void kasan_unpoison_range(const void *address, size_t size) {}
|
||||
static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
|
||||
static inline void kasan_free_pages(struct page *page, unsigned int order) {}
|
||||
|
||||
static inline void kasan_cache_create(struct kmem_cache *cache,
|
||||
unsigned int *size,
|
||||
slab_flags_t *flags) {}
|
||||
|
||||
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
|
||||
static inline void kasan_poison_slab(struct page *page) {}
|
||||
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
|
||||
void *object) {}
|
||||
@ -127,54 +273,42 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
|
||||
{
|
||||
return (void *)object;
|
||||
}
|
||||
|
||||
static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags)
|
||||
static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
|
||||
unsigned long ip)
|
||||
{
|
||||
return ptr;
|
||||
return false;
|
||||
}
|
||||
static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
|
||||
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
|
||||
gfp_t flags)
|
||||
{
|
||||
return object;
|
||||
}
|
||||
static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
|
||||
static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
|
||||
static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
|
||||
size_t size, gfp_t flags)
|
||||
{
|
||||
return (void *)object;
|
||||
}
|
||||
static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
|
||||
{
|
||||
return (void *)ptr;
|
||||
}
|
||||
static inline void *kasan_krealloc(const void *object, size_t new_size,
|
||||
gfp_t flags)
|
||||
{
|
||||
return (void *)object;
|
||||
}
|
||||
|
||||
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
|
||||
gfp_t flags)
|
||||
{
|
||||
return object;
|
||||
}
|
||||
static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
|
||||
unsigned long ip)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
|
||||
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
|
||||
|
||||
static inline int kasan_add_zero_shadow(void *start, unsigned long size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void kasan_remove_zero_shadow(void *start,
|
||||
unsigned long size)
|
||||
{}
|
||||
|
||||
static inline void kasan_unpoison_slab(const void *ptr) { }
|
||||
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
|
||||
static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
|
||||
|
||||
#endif /* CONFIG_KASAN */
|
||||
|
||||
#ifdef CONFIG_KASAN_GENERIC
|
||||
#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
|
||||
void kasan_unpoison_task_stack(struct task_struct *task);
|
||||
#else
|
||||
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
|
||||
#endif
|
||||
|
||||
#define KASAN_SHADOW_INIT 0
|
||||
#ifdef CONFIG_KASAN_GENERIC
|
||||
|
||||
void kasan_cache_shrink(struct kmem_cache *cache);
|
||||
void kasan_cache_shutdown(struct kmem_cache *cache);
|
||||
@ -188,36 +322,50 @@ static inline void kasan_record_aux_stack(void *ptr) {}
|
||||
|
||||
#endif /* CONFIG_KASAN_GENERIC */
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
|
||||
|
||||
#define KASAN_SHADOW_INIT 0xFF
|
||||
|
||||
void kasan_init_tags(void);
|
||||
|
||||
void *kasan_reset_tag(const void *addr);
|
||||
static inline void *kasan_reset_tag(const void *addr)
|
||||
{
|
||||
return (void *)arch_kasan_reset_tag(addr);
|
||||
}
|
||||
|
||||
bool kasan_report(unsigned long addr, size_t size,
|
||||
bool is_write, unsigned long ip);
|
||||
|
||||
#else /* CONFIG_KASAN_SW_TAGS */
|
||||
|
||||
static inline void kasan_init_tags(void) { }
|
||||
#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
|
||||
|
||||
static inline void *kasan_reset_tag(const void *addr)
|
||||
{
|
||||
return (void *)addr;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_KASAN_SW_TAGS */
|
||||
#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
void __init kasan_init_sw_tags(void);
|
||||
#else
|
||||
static inline void kasan_init_sw_tags(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
void kasan_init_hw_tags_cpu(void);
|
||||
void __init kasan_init_hw_tags(void);
|
||||
#else
|
||||
static inline void kasan_init_hw_tags_cpu(void) { }
|
||||
static inline void kasan_init_hw_tags(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_VMALLOC
|
||||
|
||||
int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
|
||||
void kasan_poison_vmalloc(const void *start, unsigned long size);
|
||||
void kasan_unpoison_vmalloc(const void *start, unsigned long size);
|
||||
void kasan_release_vmalloc(unsigned long start, unsigned long end,
|
||||
unsigned long free_region_start,
|
||||
unsigned long free_region_end);
|
||||
#else
|
||||
|
||||
#else /* CONFIG_KASAN_VMALLOC */
|
||||
|
||||
static inline int kasan_populate_vmalloc(unsigned long start,
|
||||
unsigned long size)
|
||||
{
|
||||
@ -232,7 +380,26 @@ static inline void kasan_release_vmalloc(unsigned long start,
|
||||
unsigned long end,
|
||||
unsigned long free_region_start,
|
||||
unsigned long free_region_end) {}
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_KASAN_VMALLOC */
|
||||
|
||||
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
|
||||
!defined(CONFIG_KASAN_VMALLOC)
|
||||
|
||||
/*
|
||||
* These functions provide a special case to support backing module
|
||||
* allocations with real shadow memory. With KASAN vmalloc, the special
|
||||
* case is unnecessary, as the work is handled in the generic case.
|
||||
*/
|
||||
int kasan_module_alloc(void *addr, size_t size);
|
||||
void kasan_free_shadow(const struct vm_struct *vm);
|
||||
|
||||
#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
|
||||
|
||||
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
|
||||
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
|
||||
|
||||
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
|
||||
|
||||
#ifdef CONFIG_KASAN_INLINE
|
||||
void kasan_non_canonical_hook(unsigned long addr);
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/kasan.h>
|
||||
|
||||
struct mempolicy;
|
||||
struct anon_vma;
|
||||
@ -1421,23 +1422,31 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
|
||||
}
|
||||
#endif /* CONFIG_NUMA_BALANCING */
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
|
||||
|
||||
static inline u8 page_kasan_tag(const struct page *page)
|
||||
{
|
||||
return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
|
||||
if (kasan_enabled())
|
||||
return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
|
||||
return 0xff;
|
||||
}
|
||||
|
||||
static inline void page_kasan_tag_set(struct page *page, u8 tag)
|
||||
{
|
||||
page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
|
||||
page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
|
||||
if (kasan_enabled()) {
|
||||
page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
|
||||
page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void page_kasan_tag_reset(struct page *page)
|
||||
{
|
||||
page_kasan_tag_set(page, 0xff);
|
||||
if (kasan_enabled())
|
||||
page_kasan_tag_set(page, 0xff);
|
||||
}
|
||||
#else
|
||||
|
||||
#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
|
||||
|
||||
static inline u8 page_kasan_tag(const struct page *page)
|
||||
{
|
||||
return 0xff;
|
||||
@ -1445,7 +1454,8 @@ static inline u8 page_kasan_tag(const struct page *page)
|
||||
|
||||
static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
|
||||
static inline void page_kasan_tag_reset(struct page *page) { }
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
|
||||
|
||||
static inline struct zone *page_zone(const struct page *page)
|
||||
{
|
||||
|
@ -96,7 +96,8 @@ void module_arch_cleanup(struct module *mod);
|
||||
/* Any cleanup before freeing mod->module_init */
|
||||
void module_arch_freeing_init(struct module *mod);
|
||||
|
||||
#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC)
|
||||
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
|
||||
!defined(CONFIG_KASAN_VMALLOC)
|
||||
#include <linux/kasan.h>
|
||||
#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
|
||||
#else
|
||||
|
@ -77,7 +77,7 @@
|
||||
#define LAST_CPUPID_SHIFT 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
|
||||
#define KASAN_TAG_WIDTH 8
|
||||
#else
|
||||
#define KASAN_TAG_WIDTH 0
|
||||
|
@ -1234,7 +1234,7 @@ struct task_struct {
|
||||
u64 timer_slack_ns;
|
||||
u64 default_timer_slack_ns;
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
unsigned int kasan_depth;
|
||||
#endif
|
||||
|
||||
|
@ -267,7 +267,7 @@ void __write_overflow(void) __compiletime_error("detected write beyond size of o
|
||||
|
||||
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
|
||||
extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
|
||||
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
|
||||
|
@ -176,7 +176,7 @@ struct task_struct init_task
|
||||
.numa_group = NULL,
|
||||
.numa_faults = NULL,
|
||||
#endif
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
.kasan_depth = 1,
|
||||
#endif
|
||||
#ifdef CONFIG_KCSAN
|
||||
|
@ -225,8 +225,8 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
|
||||
if (!s)
|
||||
continue;
|
||||
|
||||
/* Clear the KASAN shadow of the stack. */
|
||||
kasan_unpoison_shadow(s->addr, THREAD_SIZE);
|
||||
/* Mark stack accessible for KASAN. */
|
||||
kasan_unpoison_range(s->addr, THREAD_SIZE);
|
||||
|
||||
/* Clear stale pointers from reused stack. */
|
||||
memset(s->addr, 0, THREAD_SIZE);
|
||||
|
@ -6,7 +6,10 @@ config HAVE_ARCH_KASAN
|
||||
config HAVE_ARCH_KASAN_SW_TAGS
|
||||
bool
|
||||
|
||||
config HAVE_ARCH_KASAN_VMALLOC
|
||||
config HAVE_ARCH_KASAN_HW_TAGS
|
||||
bool
|
||||
|
||||
config HAVE_ARCH_KASAN_VMALLOC
|
||||
bool
|
||||
|
||||
config CC_HAS_KASAN_GENERIC
|
||||
@ -15,15 +18,20 @@ config CC_HAS_KASAN_GENERIC
|
||||
config CC_HAS_KASAN_SW_TAGS
|
||||
def_bool $(cc-option, -fsanitize=kernel-hwaddress)
|
||||
|
||||
# This option is only required for software KASAN modes.
|
||||
# Old GCC versions don't have proper support for no_sanitize_address.
|
||||
# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89124 for details.
|
||||
config CC_HAS_WORKING_NOSANITIZE_ADDRESS
|
||||
def_bool !CC_IS_GCC || GCC_VERSION >= 80300
|
||||
|
||||
menuconfig KASAN
|
||||
bool "KASAN: runtime memory debugger"
|
||||
depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
|
||||
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)
|
||||
depends on (((HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
|
||||
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \
|
||||
CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \
|
||||
HAVE_ARCH_KASAN_HW_TAGS
|
||||
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
|
||||
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
|
||||
select STACKDEPOT
|
||||
help
|
||||
Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
|
||||
designed to find out-of-bounds accesses and use-after-free bugs.
|
||||
@ -35,21 +43,24 @@ choice
|
||||
prompt "KASAN mode"
|
||||
default KASAN_GENERIC
|
||||
help
|
||||
KASAN has two modes: generic KASAN (similar to userspace ASan,
|
||||
x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC) and
|
||||
software tag-based KASAN (a version based on software memory
|
||||
tagging, arm64 only, similar to userspace HWASan, enabled with
|
||||
CONFIG_KASAN_SW_TAGS).
|
||||
KASAN has three modes:
|
||||
1. generic KASAN (similar to userspace ASan,
|
||||
x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC),
|
||||
2. software tag-based KASAN (arm64 only, based on software
|
||||
memory tagging (similar to userspace HWASan), enabled with
|
||||
CONFIG_KASAN_SW_TAGS), and
|
||||
3. hardware tag-based KASAN (arm64 only, based on hardware
|
||||
memory tagging, enabled with CONFIG_KASAN_HW_TAGS).
|
||||
|
||||
Both generic and tag-based KASAN are strictly debugging features.
|
||||
All KASAN modes are strictly debugging features.
|
||||
|
||||
For better error reports enable CONFIG_STACKTRACE.
|
||||
|
||||
config KASAN_GENERIC
|
||||
bool "Generic mode"
|
||||
depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
|
||||
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
|
||||
select SLUB_DEBUG if SLUB
|
||||
select CONSTRUCTORS
|
||||
select STACKDEPOT
|
||||
help
|
||||
Enables generic KASAN mode.
|
||||
|
||||
@ -62,23 +73,22 @@ config KASAN_GENERIC
|
||||
and introduces an overhead of ~x1.5 for the rest of the allocations.
|
||||
The performance slowdown is ~x3.
|
||||
|
||||
For better error detection enable CONFIG_STACKTRACE.
|
||||
|
||||
Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
|
||||
(the resulting kernel does not boot).
|
||||
|
||||
config KASAN_SW_TAGS
|
||||
bool "Software tag-based mode"
|
||||
depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
|
||||
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
|
||||
select SLUB_DEBUG if SLUB
|
||||
select CONSTRUCTORS
|
||||
select STACKDEPOT
|
||||
help
|
||||
Enables software tag-based KASAN mode.
|
||||
|
||||
This mode requires Top Byte Ignore support by the CPU and therefore
|
||||
is only supported for arm64. This mode requires Clang.
|
||||
This mode require software memory tagging support in the form of
|
||||
HWASan-like compiler instrumentation.
|
||||
|
||||
Currently this mode is only implemented for arm64 CPUs and relies on
|
||||
Top Byte Ignore. This mode requires Clang.
|
||||
|
||||
This mode consumes about 1/16th of available memory at kernel start
|
||||
and introduces an overhead of ~20% for the rest of the allocations.
|
||||
@ -86,15 +96,27 @@ config KASAN_SW_TAGS
|
||||
casting and comparison, as it embeds tags into the top byte of each
|
||||
pointer.
|
||||
|
||||
For better error detection enable CONFIG_STACKTRACE.
|
||||
|
||||
Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB
|
||||
(the resulting kernel does not boot).
|
||||
|
||||
config KASAN_HW_TAGS
|
||||
bool "Hardware tag-based mode"
|
||||
depends on HAVE_ARCH_KASAN_HW_TAGS
|
||||
depends on SLUB
|
||||
help
|
||||
Enables hardware tag-based KASAN mode.
|
||||
|
||||
This mode requires hardware memory tagging support, and can be used
|
||||
by any architecture that provides it.
|
||||
|
||||
Currently this mode is only implemented for arm64 CPUs starting from
|
||||
ARMv8.5 and relies on Memory Tagging Extension and Top Byte Ignore.
|
||||
|
||||
endchoice
|
||||
|
||||
choice
|
||||
prompt "Instrumentation type"
|
||||
depends on KASAN_GENERIC || KASAN_SW_TAGS
|
||||
default KASAN_OUTLINE
|
||||
|
||||
config KASAN_OUTLINE
|
||||
@ -118,6 +140,7 @@ endchoice
|
||||
|
||||
config KASAN_STACK_ENABLE
|
||||
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
|
||||
depends on KASAN_GENERIC || KASAN_SW_TAGS
|
||||
help
|
||||
The LLVM stack address sanitizer has a know problem that
|
||||
causes excessive stack usage in a lot of functions, see
|
||||
@ -146,7 +169,7 @@ config KASAN_SW_TAGS_IDENTIFY
|
||||
|
||||
config KASAN_VMALLOC
|
||||
bool "Back mappings in vmalloc space with real shadow memory"
|
||||
depends on HAVE_ARCH_KASAN_VMALLOC
|
||||
depends on KASAN_GENERIC && HAVE_ARCH_KASAN_VMALLOC
|
||||
help
|
||||
By default, the shadow region for vmalloc space is the read-only
|
||||
zero page. This means that KASAN cannot detect errors involving
|
||||
|
@ -25,7 +25,7 @@
|
||||
|
||||
#include "../mm/kasan/kasan.h"
|
||||
|
||||
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
|
||||
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
|
||||
|
||||
/*
|
||||
* We assign some test results to these globals to make sure the tests
|
||||
|
@ -15,7 +15,7 @@
|
||||
|
||||
#include "../mm/kasan/kasan.h"
|
||||
|
||||
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
|
||||
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
|
||||
|
||||
static noinline void __init copy_user_test(void)
|
||||
{
|
||||
|
@ -6,12 +6,15 @@ KCOV_INSTRUMENT := n
|
||||
# Disable ftrace to avoid recursion.
|
||||
CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_generic.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_generic_report.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_init.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_quarantine.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_report.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_tags.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_tags_report.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_report_generic.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_report_hw_tags.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_report_sw_tags.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_shadow.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_hw_tags.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_sw_tags.o = $(CC_FLAGS_FTRACE)
|
||||
|
||||
# Function splitter causes unnecessary splits in __asan_load1/__asan_store1
|
||||
# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
|
||||
@ -22,13 +25,17 @@ CC_FLAGS_KASAN_RUNTIME += -DDISABLE_BRANCH_PROFILING
|
||||
|
||||
CFLAGS_common.o := $(CC_FLAGS_KASAN_RUNTIME)
|
||||
CFLAGS_generic.o := $(CC_FLAGS_KASAN_RUNTIME)
|
||||
CFLAGS_generic_report.o := $(CC_FLAGS_KASAN_RUNTIME)
|
||||
CFLAGS_init.o := $(CC_FLAGS_KASAN_RUNTIME)
|
||||
CFLAGS_quarantine.o := $(CC_FLAGS_KASAN_RUNTIME)
|
||||
CFLAGS_report.o := $(CC_FLAGS_KASAN_RUNTIME)
|
||||
CFLAGS_tags.o := $(CC_FLAGS_KASAN_RUNTIME)
|
||||
CFLAGS_tags_report.o := $(CC_FLAGS_KASAN_RUNTIME)
|
||||
CFLAGS_report_generic.o := $(CC_FLAGS_KASAN_RUNTIME)
|
||||
CFLAGS_report_hw_tags.o := $(CC_FLAGS_KASAN_RUNTIME)
|
||||
CFLAGS_report_sw_tags.o := $(CC_FLAGS_KASAN_RUNTIME)
|
||||
CFLAGS_shadow.o := $(CC_FLAGS_KASAN_RUNTIME)
|
||||
CFLAGS_hw_tags.o := $(CC_FLAGS_KASAN_RUNTIME)
|
||||
CFLAGS_sw_tags.o := $(CC_FLAGS_KASAN_RUNTIME)
|
||||
|
||||
obj-$(CONFIG_KASAN) := common.o init.o report.o
|
||||
obj-$(CONFIG_KASAN_GENERIC) += generic.o generic_report.o quarantine.o
|
||||
obj-$(CONFIG_KASAN_SW_TAGS) += tags.o tags_report.o
|
||||
obj-$(CONFIG_KASAN) := common.o report.o
|
||||
obj-$(CONFIG_KASAN_GENERIC) += init.o generic.o report_generic.o shadow.o quarantine.o
|
||||
obj-$(CONFIG_KASAN_HW_TAGS) += hw_tags.o report_hw_tags.o
|
||||
obj-$(CONFIG_KASAN_SW_TAGS) += init.o report_sw_tags.o shadow.o sw_tags.o
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -7,15 +7,8 @@
|
||||
*
|
||||
* Some code borrowed from https://github.com/xairy/kasan-prototype by
|
||||
* Andrey Konovalov <andreyknvl@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/init.h>
|
||||
@ -51,7 +44,7 @@ static __always_inline bool memory_is_poisoned_1(unsigned long addr)
|
||||
s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
|
||||
|
||||
if (unlikely(shadow_value)) {
|
||||
s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
|
||||
s8 last_accessible_byte = addr & KASAN_GRANULE_MASK;
|
||||
return unlikely(last_accessible_byte >= shadow_value);
|
||||
}
|
||||
|
||||
@ -67,7 +60,7 @@ static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
|
||||
* Access crosses 8(shadow size)-byte boundary. Such access maps
|
||||
* into 2 shadow bytes, so we need to check them both.
|
||||
*/
|
||||
if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
|
||||
if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
|
||||
return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
|
||||
|
||||
return memory_is_poisoned_1(addr + size - 1);
|
||||
@ -78,7 +71,7 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr)
|
||||
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
|
||||
|
||||
/* Unaligned 16-bytes access maps into 3 shadow bytes. */
|
||||
if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
|
||||
if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE)))
|
||||
return *shadow_addr || memory_is_poisoned_1(addr + 15);
|
||||
|
||||
return *shadow_addr;
|
||||
@ -139,7 +132,7 @@ static __always_inline bool memory_is_poisoned_n(unsigned long addr,
|
||||
s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
|
||||
|
||||
if (unlikely(ret != (unsigned long)last_shadow ||
|
||||
((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
|
||||
((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -192,6 +185,13 @@ bool check_memory_region(unsigned long addr, size_t size, bool write,
|
||||
return check_memory_region_inline(addr, size, write, ret_ip);
|
||||
}
|
||||
|
||||
bool check_invalid_free(void *addr)
|
||||
{
|
||||
s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
|
||||
|
||||
return shadow_byte < 0 || shadow_byte >= KASAN_GRANULE_SIZE;
|
||||
}
|
||||
|
||||
void kasan_cache_shrink(struct kmem_cache *cache)
|
||||
{
|
||||
quarantine_remove_cache(cache);
|
||||
@ -205,13 +205,13 @@ void kasan_cache_shutdown(struct kmem_cache *cache)
|
||||
|
||||
static void register_global(struct kasan_global *global)
|
||||
{
|
||||
size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
|
||||
size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
|
||||
|
||||
kasan_unpoison_shadow(global->beg, global->size);
|
||||
unpoison_range(global->beg, global->size);
|
||||
|
||||
kasan_poison_shadow(global->beg + aligned_size,
|
||||
global->size_with_redzone - aligned_size,
|
||||
KASAN_GLOBAL_REDZONE);
|
||||
poison_range(global->beg + aligned_size,
|
||||
global->size_with_redzone - aligned_size,
|
||||
KASAN_GLOBAL_REDZONE);
|
||||
}
|
||||
|
||||
void __asan_register_globals(struct kasan_global *globals, size_t size)
|
||||
@ -279,10 +279,10 @@ EXPORT_SYMBOL(__asan_handle_no_return);
|
||||
/* Emitted by compiler to poison alloca()ed objects. */
|
||||
void __asan_alloca_poison(unsigned long addr, size_t size)
|
||||
{
|
||||
size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
|
||||
size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
|
||||
size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
|
||||
rounded_up_size;
|
||||
size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
|
||||
size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
|
||||
|
||||
const void *left_redzone = (const void *)(addr -
|
||||
KASAN_ALLOCA_REDZONE_SIZE);
|
||||
@ -290,13 +290,12 @@ void __asan_alloca_poison(unsigned long addr, size_t size)
|
||||
|
||||
WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
|
||||
|
||||
kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
|
||||
size - rounded_down_size);
|
||||
kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
|
||||
KASAN_ALLOCA_LEFT);
|
||||
kasan_poison_shadow(right_redzone,
|
||||
padding_size + KASAN_ALLOCA_REDZONE_SIZE,
|
||||
KASAN_ALLOCA_RIGHT);
|
||||
unpoison_range((const void *)(addr + rounded_down_size),
|
||||
size - rounded_down_size);
|
||||
poison_range(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
|
||||
KASAN_ALLOCA_LEFT);
|
||||
poison_range(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
|
||||
KASAN_ALLOCA_RIGHT);
|
||||
}
|
||||
EXPORT_SYMBOL(__asan_alloca_poison);
|
||||
|
||||
@ -306,7 +305,7 @@ void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
|
||||
if (unlikely(!stack_top || stack_top > stack_bottom))
|
||||
return;
|
||||
|
||||
kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
|
||||
unpoison_range(stack_top, stack_bottom - stack_top);
|
||||
}
|
||||
EXPORT_SYMBOL(__asan_allocas_unpoison);
|
||||
|
||||
@ -329,7 +328,7 @@ void kasan_record_aux_stack(void *addr)
|
||||
{
|
||||
struct page *page = kasan_addr_to_page(addr);
|
||||
struct kmem_cache *cache;
|
||||
struct kasan_alloc_meta *alloc_info;
|
||||
struct kasan_alloc_meta *alloc_meta;
|
||||
void *object;
|
||||
|
||||
if (!(page && PageSlab(page)))
|
||||
@ -337,10 +336,10 @@ void kasan_record_aux_stack(void *addr)
|
||||
|
||||
cache = page->slab_cache;
|
||||
object = nearest_obj(cache, page, addr);
|
||||
alloc_info = get_alloc_info(cache, object);
|
||||
alloc_meta = kasan_get_alloc_meta(cache, object);
|
||||
|
||||
alloc_info->aux_stack[1] = alloc_info->aux_stack[0];
|
||||
alloc_info->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
|
||||
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
|
||||
alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
|
||||
}
|
||||
|
||||
void kasan_set_free_info(struct kmem_cache *cache,
|
||||
@ -348,12 +347,12 @@ void kasan_set_free_info(struct kmem_cache *cache,
|
||||
{
|
||||
struct kasan_free_meta *free_meta;
|
||||
|
||||
free_meta = get_free_info(cache, object);
|
||||
kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
|
||||
free_meta = kasan_get_free_meta(cache, object);
|
||||
if (!free_meta)
|
||||
return;
|
||||
|
||||
/*
|
||||
* the object was freed and has free track set
|
||||
*/
|
||||
kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
|
||||
/* The object was freed and has free track set. */
|
||||
*(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREETRACK;
|
||||
}
|
||||
|
||||
@ -362,5 +361,6 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
|
||||
{
|
||||
if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_KMALLOC_FREETRACK)
|
||||
return NULL;
|
||||
return &get_free_info(cache, object)->free_track;
|
||||
/* Free meta must be present with KASAN_KMALLOC_FREETRACK. */
|
||||
return &kasan_get_free_meta(cache, object)->free_track;
|
||||
}
|
||||
|
@ -1,165 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* This file contains generic KASAN specific error reporting code.
|
||||
*
|
||||
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
|
||||
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
||||
*
|
||||
* Some code borrowed from https://github.com/xairy/kasan-prototype by
|
||||
* Andrey Konovalov <andreyknvl@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stackdepot.h>
|
||||
#include <linux/stacktrace.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/sections.h>
|
||||
|
||||
#include "kasan.h"
|
||||
#include "../slab.h"
|
||||
|
||||
void *find_first_bad_addr(void *addr, size_t size)
|
||||
{
|
||||
void *p = addr;
|
||||
|
||||
while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p)))
|
||||
p += KASAN_SHADOW_SCALE_SIZE;
|
||||
return p;
|
||||
}
|
||||
|
||||
static const char *get_shadow_bug_type(struct kasan_access_info *info)
|
||||
{
|
||||
const char *bug_type = "unknown-crash";
|
||||
u8 *shadow_addr;
|
||||
|
||||
shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
|
||||
|
||||
/*
|
||||
* If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look
|
||||
* at the next shadow byte to determine the type of the bad access.
|
||||
*/
|
||||
if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1)
|
||||
shadow_addr++;
|
||||
|
||||
switch (*shadow_addr) {
|
||||
case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
|
||||
/*
|
||||
* In theory it's still possible to see these shadow values
|
||||
* due to a data race in the kernel code.
|
||||
*/
|
||||
bug_type = "out-of-bounds";
|
||||
break;
|
||||
case KASAN_PAGE_REDZONE:
|
||||
case KASAN_KMALLOC_REDZONE:
|
||||
bug_type = "slab-out-of-bounds";
|
||||
break;
|
||||
case KASAN_GLOBAL_REDZONE:
|
||||
bug_type = "global-out-of-bounds";
|
||||
break;
|
||||
case KASAN_STACK_LEFT:
|
||||
case KASAN_STACK_MID:
|
||||
case KASAN_STACK_RIGHT:
|
||||
case KASAN_STACK_PARTIAL:
|
||||
bug_type = "stack-out-of-bounds";
|
||||
break;
|
||||
case KASAN_FREE_PAGE:
|
||||
case KASAN_KMALLOC_FREE:
|
||||
case KASAN_KMALLOC_FREETRACK:
|
||||
bug_type = "use-after-free";
|
||||
break;
|
||||
case KASAN_ALLOCA_LEFT:
|
||||
case KASAN_ALLOCA_RIGHT:
|
||||
bug_type = "alloca-out-of-bounds";
|
||||
break;
|
||||
case KASAN_VMALLOC_INVALID:
|
||||
bug_type = "vmalloc-out-of-bounds";
|
||||
break;
|
||||
}
|
||||
|
||||
return bug_type;
|
||||
}
|
||||
|
||||
static const char *get_wild_bug_type(struct kasan_access_info *info)
|
||||
{
|
||||
const char *bug_type = "unknown-crash";
|
||||
|
||||
if ((unsigned long)info->access_addr < PAGE_SIZE)
|
||||
bug_type = "null-ptr-deref";
|
||||
else if ((unsigned long)info->access_addr < TASK_SIZE)
|
||||
bug_type = "user-memory-access";
|
||||
else
|
||||
bug_type = "wild-memory-access";
|
||||
|
||||
return bug_type;
|
||||
}
|
||||
|
||||
const char *get_bug_type(struct kasan_access_info *info)
|
||||
{
|
||||
/*
|
||||
* If access_size is a negative number, then it has reason to be
|
||||
* defined as out-of-bounds bug type.
|
||||
*
|
||||
* Casting negative numbers to size_t would indeed turn up as
|
||||
* a large size_t and its value will be larger than ULONG_MAX/2,
|
||||
* so that this can qualify as out-of-bounds.
|
||||
*/
|
||||
if (info->access_addr + info->access_size < info->access_addr)
|
||||
return "out-of-bounds";
|
||||
|
||||
if (addr_has_shadow(info->access_addr))
|
||||
return get_shadow_bug_type(info);
|
||||
return get_wild_bug_type(info);
|
||||
}
|
||||
|
||||
#define DEFINE_ASAN_REPORT_LOAD(size) \
|
||||
void __asan_report_load##size##_noabort(unsigned long addr) \
|
||||
{ \
|
||||
kasan_report(addr, size, false, _RET_IP_); \
|
||||
} \
|
||||
EXPORT_SYMBOL(__asan_report_load##size##_noabort)
|
||||
|
||||
#define DEFINE_ASAN_REPORT_STORE(size) \
|
||||
void __asan_report_store##size##_noabort(unsigned long addr) \
|
||||
{ \
|
||||
kasan_report(addr, size, true, _RET_IP_); \
|
||||
} \
|
||||
EXPORT_SYMBOL(__asan_report_store##size##_noabort)
|
||||
|
||||
DEFINE_ASAN_REPORT_LOAD(1);
|
||||
DEFINE_ASAN_REPORT_LOAD(2);
|
||||
DEFINE_ASAN_REPORT_LOAD(4);
|
||||
DEFINE_ASAN_REPORT_LOAD(8);
|
||||
DEFINE_ASAN_REPORT_LOAD(16);
|
||||
DEFINE_ASAN_REPORT_STORE(1);
|
||||
DEFINE_ASAN_REPORT_STORE(2);
|
||||
DEFINE_ASAN_REPORT_STORE(4);
|
||||
DEFINE_ASAN_REPORT_STORE(8);
|
||||
DEFINE_ASAN_REPORT_STORE(16);
|
||||
|
||||
void __asan_report_load_n_noabort(unsigned long addr, size_t size)
|
||||
{
|
||||
kasan_report(addr, size, false, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(__asan_report_load_n_noabort);
|
||||
|
||||
void __asan_report_store_n_noabort(unsigned long addr, size_t size)
|
||||
{
|
||||
kasan_report(addr, size, true, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(__asan_report_store_n_noabort);
|
204
mm/kasan/hw_tags.c
Normal file
204
mm/kasan/hw_tags.c
Normal file
@ -0,0 +1,204 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* This file contains core hardware tag-based KASAN code.
|
||||
*
|
||||
* Copyright (c) 2020 Google, Inc.
|
||||
* Author: Andrey Konovalov <andreyknvl@google.com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "kasan: " fmt
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/static_key.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "kasan.h"
|
||||
|
||||
enum kasan_arg_mode {
|
||||
KASAN_ARG_MODE_DEFAULT,
|
||||
KASAN_ARG_MODE_OFF,
|
||||
KASAN_ARG_MODE_PROD,
|
||||
KASAN_ARG_MODE_FULL,
|
||||
};
|
||||
|
||||
enum kasan_arg_stacktrace {
|
||||
KASAN_ARG_STACKTRACE_DEFAULT,
|
||||
KASAN_ARG_STACKTRACE_OFF,
|
||||
KASAN_ARG_STACKTRACE_ON,
|
||||
};
|
||||
|
||||
enum kasan_arg_fault {
|
||||
KASAN_ARG_FAULT_DEFAULT,
|
||||
KASAN_ARG_FAULT_REPORT,
|
||||
KASAN_ARG_FAULT_PANIC,
|
||||
};
|
||||
|
||||
static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
|
||||
static enum kasan_arg_stacktrace kasan_arg_stacktrace __ro_after_init;
|
||||
static enum kasan_arg_fault kasan_arg_fault __ro_after_init;
|
||||
|
||||
/* Whether KASAN is enabled at all. */
|
||||
DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
|
||||
EXPORT_SYMBOL(kasan_flag_enabled);
|
||||
|
||||
/* Whether to collect alloc/free stack traces. */
|
||||
DEFINE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
|
||||
|
||||
/* Whether panic or disable tag checking on fault. */
|
||||
bool kasan_flag_panic __ro_after_init;
|
||||
|
||||
/* kasan.mode=off/prod/full */
|
||||
static int __init early_kasan_mode(char *arg)
|
||||
{
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(arg, "off"))
|
||||
kasan_arg_mode = KASAN_ARG_MODE_OFF;
|
||||
else if (!strcmp(arg, "prod"))
|
||||
kasan_arg_mode = KASAN_ARG_MODE_PROD;
|
||||
else if (!strcmp(arg, "full"))
|
||||
kasan_arg_mode = KASAN_ARG_MODE_FULL;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("kasan.mode", early_kasan_mode);
|
||||
|
||||
/* kasan.stack=off/on */
|
||||
static int __init early_kasan_flag_stacktrace(char *arg)
|
||||
{
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(arg, "off"))
|
||||
kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_OFF;
|
||||
else if (!strcmp(arg, "on"))
|
||||
kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_ON;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
|
||||
|
||||
/* kasan.fault=report/panic */
|
||||
static int __init early_kasan_fault(char *arg)
|
||||
{
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(arg, "report"))
|
||||
kasan_arg_fault = KASAN_ARG_FAULT_REPORT;
|
||||
else if (!strcmp(arg, "panic"))
|
||||
kasan_arg_fault = KASAN_ARG_FAULT_PANIC;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("kasan.fault", early_kasan_fault);
|
||||
|
||||
/* kasan_init_hw_tags_cpu() is called for each CPU. */
|
||||
void kasan_init_hw_tags_cpu(void)
|
||||
{
|
||||
/*
|
||||
* There's no need to check that the hardware is MTE-capable here,
|
||||
* as this function is only called for MTE-capable hardware.
|
||||
*/
|
||||
|
||||
/* If KASAN is disabled, do nothing. */
|
||||
if (kasan_arg_mode == KASAN_ARG_MODE_OFF)
|
||||
return;
|
||||
|
||||
hw_init_tags(KASAN_TAG_MAX);
|
||||
hw_enable_tagging();
|
||||
}
|
||||
|
||||
/* kasan_init_hw_tags() is called once on boot CPU. */
|
||||
void __init kasan_init_hw_tags(void)
|
||||
{
|
||||
/* If hardware doesn't support MTE, do nothing. */
|
||||
if (!system_supports_mte())
|
||||
return;
|
||||
|
||||
/* Choose KASAN mode if kasan boot parameter is not provided. */
|
||||
if (kasan_arg_mode == KASAN_ARG_MODE_DEFAULT) {
|
||||
if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
|
||||
kasan_arg_mode = KASAN_ARG_MODE_FULL;
|
||||
else
|
||||
kasan_arg_mode = KASAN_ARG_MODE_PROD;
|
||||
}
|
||||
|
||||
/* Preset parameter values based on the mode. */
|
||||
switch (kasan_arg_mode) {
|
||||
case KASAN_ARG_MODE_DEFAULT:
|
||||
/* Shouldn't happen as per the check above. */
|
||||
WARN_ON(1);
|
||||
return;
|
||||
case KASAN_ARG_MODE_OFF:
|
||||
/* If KASAN is disabled, do nothing. */
|
||||
return;
|
||||
case KASAN_ARG_MODE_PROD:
|
||||
static_branch_enable(&kasan_flag_enabled);
|
||||
break;
|
||||
case KASAN_ARG_MODE_FULL:
|
||||
static_branch_enable(&kasan_flag_enabled);
|
||||
static_branch_enable(&kasan_flag_stacktrace);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Now, optionally override the presets. */
|
||||
|
||||
switch (kasan_arg_stacktrace) {
|
||||
case KASAN_ARG_STACKTRACE_DEFAULT:
|
||||
break;
|
||||
case KASAN_ARG_STACKTRACE_OFF:
|
||||
static_branch_disable(&kasan_flag_stacktrace);
|
||||
break;
|
||||
case KASAN_ARG_STACKTRACE_ON:
|
||||
static_branch_enable(&kasan_flag_stacktrace);
|
||||
break;
|
||||
}
|
||||
|
||||
switch (kasan_arg_fault) {
|
||||
case KASAN_ARG_FAULT_DEFAULT:
|
||||
break;
|
||||
case KASAN_ARG_FAULT_REPORT:
|
||||
kasan_flag_panic = false;
|
||||
break;
|
||||
case KASAN_ARG_FAULT_PANIC:
|
||||
kasan_flag_panic = true;
|
||||
break;
|
||||
}
|
||||
|
||||
pr_info("KernelAddressSanitizer initialized\n");
|
||||
}
|
||||
|
||||
void kasan_set_free_info(struct kmem_cache *cache,
|
||||
void *object, u8 tag)
|
||||
{
|
||||
struct kasan_alloc_meta *alloc_meta;
|
||||
|
||||
alloc_meta = kasan_get_alloc_meta(cache, object);
|
||||
if (alloc_meta)
|
||||
kasan_set_track(&alloc_meta->free_track[0], GFP_NOWAIT);
|
||||
}
|
||||
|
||||
struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
|
||||
void *object, u8 tag)
|
||||
{
|
||||
struct kasan_alloc_meta *alloc_meta;
|
||||
|
||||
alloc_meta = kasan_get_alloc_meta(cache, object);
|
||||
if (!alloc_meta)
|
||||
return NULL;
|
||||
|
||||
return &alloc_meta->free_track[0];
|
||||
}
|
@ -1,14 +1,9 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* This file contains some kasan initialization code.
|
||||
* This file contains KASAN shadow initialization code.
|
||||
*
|
||||
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
|
||||
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/memblock.h>
|
||||
@ -446,9 +441,8 @@ void kasan_remove_zero_shadow(void *start, unsigned long size)
|
||||
addr = (unsigned long)kasan_mem_to_shadow(start);
|
||||
end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
|
||||
|
||||
if (WARN_ON((unsigned long)start %
|
||||
(KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
|
||||
WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
|
||||
if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
|
||||
WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
|
||||
return;
|
||||
|
||||
for (; addr < end; addr = next) {
|
||||
@ -481,9 +475,8 @@ int kasan_add_zero_shadow(void *start, unsigned long size)
|
||||
shadow_start = kasan_mem_to_shadow(start);
|
||||
shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
|
||||
|
||||
if (WARN_ON((unsigned long)start %
|
||||
(KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
|
||||
WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
|
||||
if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
|
||||
WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
|
||||
return -EINVAL;
|
||||
|
||||
ret = kasan_populate_early_shadow(shadow_start, shadow_end);
|
||||
|
175
mm/kasan/kasan.h
175
mm/kasan/kasan.h
@ -5,8 +5,32 @@
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/stackdepot.h>
|
||||
|
||||
#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
|
||||
#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
#include <linux/static_key.h>
|
||||
DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
|
||||
static inline bool kasan_stack_collection_enabled(void)
|
||||
{
|
||||
return static_branch_unlikely(&kasan_flag_stacktrace);
|
||||
}
|
||||
#else
|
||||
static inline bool kasan_stack_collection_enabled(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern bool kasan_flag_panic __ro_after_init;
|
||||
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
#define KASAN_GRANULE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
|
||||
#else
|
||||
#include <asm/mte-kasan.h>
|
||||
#define KASAN_GRANULE_SIZE MTE_GRANULE_SIZE
|
||||
#endif
|
||||
|
||||
#define KASAN_GRANULE_MASK (KASAN_GRANULE_SIZE - 1)
|
||||
|
||||
#define KASAN_MEMORY_PER_SHADOW_PAGE (KASAN_GRANULE_SIZE << PAGE_SHIFT)
|
||||
|
||||
#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
|
||||
#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
|
||||
@ -56,6 +80,13 @@
|
||||
#define KASAN_ABI_VERSION 1
|
||||
#endif
|
||||
|
||||
/* Metadata layout customization. */
|
||||
#define META_BYTES_PER_BLOCK 1
|
||||
#define META_BLOCKS_PER_ROW 16
|
||||
#define META_BYTES_PER_ROW (META_BLOCKS_PER_ROW * META_BYTES_PER_BLOCK)
|
||||
#define META_MEM_BYTES_PER_ROW (META_BYTES_PER_ROW * KASAN_GRANULE_SIZE)
|
||||
#define META_ROWS_AROUND_ADDR 2
|
||||
|
||||
struct kasan_access_info {
|
||||
const void *access_addr;
|
||||
const void *first_bad_addr;
|
||||
@ -124,20 +155,33 @@ struct kasan_alloc_meta {
|
||||
struct qlist_node {
|
||||
struct qlist_node *next;
|
||||
};
|
||||
|
||||
/*
|
||||
* Generic mode either stores free meta in the object itself or in the redzone
|
||||
* after the object. In the former case free meta offset is 0, in the latter
|
||||
* case it has some sane value smaller than INT_MAX. Use INT_MAX as free meta
|
||||
* offset when free meta isn't present.
|
||||
*/
|
||||
#define KASAN_NO_FREE_META INT_MAX
|
||||
|
||||
struct kasan_free_meta {
|
||||
#ifdef CONFIG_KASAN_GENERIC
|
||||
/* This field is used while the object is in the quarantine.
|
||||
* Otherwise it might be used for the allocator freelist.
|
||||
*/
|
||||
struct qlist_node quarantine_link;
|
||||
#ifdef CONFIG_KASAN_GENERIC
|
||||
struct kasan_track free_track;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
|
||||
const void *object);
|
||||
struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
|
||||
const void *object);
|
||||
struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
|
||||
const void *object);
|
||||
#ifdef CONFIG_KASAN_GENERIC
|
||||
struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
|
||||
const void *object);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
|
||||
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
|
||||
{
|
||||
@ -145,13 +189,11 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
|
||||
<< KASAN_SHADOW_SCALE_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool addr_has_shadow(const void *addr)
|
||||
static inline bool addr_has_metadata(const void *addr)
|
||||
{
|
||||
return (addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
|
||||
}
|
||||
|
||||
void kasan_poison_shadow(const void *address, size_t size, u8 value);
|
||||
|
||||
/**
|
||||
* check_memory_region - Check memory region, and report if invalid access.
|
||||
* @addr: the accessed address
|
||||
@ -163,8 +205,30 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value);
|
||||
bool check_memory_region(unsigned long addr, size_t size, bool write,
|
||||
unsigned long ret_ip);
|
||||
|
||||
#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
|
||||
|
||||
static inline bool addr_has_metadata(const void *addr)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
|
||||
|
||||
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
|
||||
void print_tags(u8 addr_tag, const void *addr);
|
||||
#else
|
||||
static inline void print_tags(u8 addr_tag, const void *addr) { }
|
||||
#endif
|
||||
|
||||
void *find_first_bad_addr(void *addr, size_t size);
|
||||
const char *get_bug_type(struct kasan_access_info *info);
|
||||
void metadata_fetch_row(char *buffer, void *row);
|
||||
|
||||
#if defined(CONFIG_KASAN_GENERIC) && CONFIG_KASAN_STACK
|
||||
void print_address_stack_frame(const void *addr);
|
||||
#else
|
||||
static inline void print_address_stack_frame(const void *addr) { }
|
||||
#endif
|
||||
|
||||
bool kasan_report(unsigned long addr, size_t size,
|
||||
bool is_write, unsigned long ip);
|
||||
@ -180,50 +244,93 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
|
||||
|
||||
#if defined(CONFIG_KASAN_GENERIC) && \
|
||||
(defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
|
||||
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
|
||||
bool quarantine_put(struct kmem_cache *cache, void *object);
|
||||
void quarantine_reduce(void);
|
||||
void quarantine_remove_cache(struct kmem_cache *cache);
|
||||
#else
|
||||
static inline void quarantine_put(struct kasan_free_meta *info,
|
||||
struct kmem_cache *cache) { }
|
||||
static inline bool quarantine_put(struct kmem_cache *cache, void *object) { return false; }
|
||||
static inline void quarantine_reduce(void) { }
|
||||
static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
|
||||
void print_tags(u8 addr_tag, const void *addr);
|
||||
|
||||
u8 random_tag(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline void print_tags(u8 addr_tag, const void *addr) { }
|
||||
|
||||
static inline u8 random_tag(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef arch_kasan_set_tag
|
||||
static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
|
||||
{
|
||||
return addr;
|
||||
}
|
||||
#endif
|
||||
#ifndef arch_kasan_reset_tag
|
||||
#define arch_kasan_reset_tag(addr) ((void *)(addr))
|
||||
#endif
|
||||
#ifndef arch_kasan_get_tag
|
||||
#define arch_kasan_get_tag(addr) 0
|
||||
#endif
|
||||
|
||||
#define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag)))
|
||||
#define reset_tag(addr) ((void *)arch_kasan_reset_tag(addr))
|
||||
#define get_tag(addr) arch_kasan_get_tag(addr)
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
|
||||
#ifndef arch_enable_tagging
|
||||
#define arch_enable_tagging()
|
||||
#endif
|
||||
#ifndef arch_init_tags
|
||||
#define arch_init_tags(max_tag)
|
||||
#endif
|
||||
#ifndef arch_get_random_tag
|
||||
#define arch_get_random_tag() (0xFF)
|
||||
#endif
|
||||
#ifndef arch_get_mem_tag
|
||||
#define arch_get_mem_tag(addr) (0xFF)
|
||||
#endif
|
||||
#ifndef arch_set_mem_tag_range
|
||||
#define arch_set_mem_tag_range(addr, size, tag) ((void *)(addr))
|
||||
#endif
|
||||
|
||||
#define hw_enable_tagging() arch_enable_tagging()
|
||||
#define hw_init_tags(max_tag) arch_init_tags(max_tag)
|
||||
#define hw_get_random_tag() arch_get_random_tag()
|
||||
#define hw_get_mem_tag(addr) arch_get_mem_tag(addr)
|
||||
#define hw_set_mem_tag_range(addr, size, tag) arch_set_mem_tag_range((addr), (size), (tag))
|
||||
|
||||
#endif /* CONFIG_KASAN_HW_TAGS */
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
u8 random_tag(void);
|
||||
#elif defined(CONFIG_KASAN_HW_TAGS)
|
||||
static inline u8 random_tag(void) { return hw_get_random_tag(); }
|
||||
#else
|
||||
static inline u8 random_tag(void) { return 0; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
|
||||
static inline void poison_range(const void *address, size_t size, u8 value)
|
||||
{
|
||||
hw_set_mem_tag_range(kasan_reset_tag(address),
|
||||
round_up(size, KASAN_GRANULE_SIZE), value);
|
||||
}
|
||||
|
||||
static inline void unpoison_range(const void *address, size_t size)
|
||||
{
|
||||
hw_set_mem_tag_range(kasan_reset_tag(address),
|
||||
round_up(size, KASAN_GRANULE_SIZE), get_tag(address));
|
||||
}
|
||||
|
||||
static inline bool check_invalid_free(void *addr)
|
||||
{
|
||||
u8 ptr_tag = get_tag(addr);
|
||||
u8 mem_tag = hw_get_mem_tag(addr);
|
||||
|
||||
return (mem_tag == KASAN_TAG_INVALID) ||
|
||||
(ptr_tag != KASAN_TAG_KERNEL && ptr_tag != mem_tag);
|
||||
}
|
||||
|
||||
#else /* CONFIG_KASAN_HW_TAGS */
|
||||
|
||||
void poison_range(const void *address, size_t size, u8 value);
|
||||
void unpoison_range(const void *address, size_t size);
|
||||
bool check_invalid_free(void *addr);
|
||||
|
||||
#endif /* CONFIG_KASAN_HW_TAGS */
|
||||
|
||||
/*
|
||||
* Exported functions for interfaces called from assembly or from generated
|
||||
* code. Declarations here to avoid warning about missing declarations.
|
||||
|
@ -6,16 +6,6 @@
|
||||
* Copyright (C) 2016 Google, Inc.
|
||||
*
|
||||
* Based on code by Dmitry Chernenkov.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/gfp.h>
|
||||
@ -147,7 +137,12 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
|
||||
if (IS_ENABLED(CONFIG_SLAB))
|
||||
local_irq_save(flags);
|
||||
|
||||
/*
|
||||
* As the object now gets freed from the quaratine, assume that its
|
||||
* free track is no longer valid.
|
||||
*/
|
||||
*(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREE;
|
||||
|
||||
___cache_free(cache, object, _THIS_IP_);
|
||||
|
||||
if (IS_ENABLED(CONFIG_SLAB))
|
||||
@ -173,11 +168,19 @@ static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
|
||||
qlist_init(q);
|
||||
}
|
||||
|
||||
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
|
||||
bool quarantine_put(struct kmem_cache *cache, void *object)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct qlist_head *q;
|
||||
struct qlist_head temp = QLIST_INIT;
|
||||
struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
|
||||
|
||||
/*
|
||||
* If there's no metadata for this object, don't put it into
|
||||
* quarantine.
|
||||
*/
|
||||
if (!meta)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Note: irq must be disabled until after we move the batch to the
|
||||
@ -192,9 +195,9 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
|
||||
q = this_cpu_ptr(&cpu_quarantine);
|
||||
if (q->offline) {
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
qlist_put(q, &info->quarantine_link, cache->size);
|
||||
qlist_put(q, &meta->quarantine_link, cache->size);
|
||||
if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
|
||||
qlist_move_all(q, &temp);
|
||||
|
||||
@ -215,6 +218,8 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void quarantine_reduce(void)
|
||||
|
@ -1,17 +1,12 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* This file contains common generic and tag-based KASAN error reporting code.
|
||||
* This file contains common KASAN error reporting code.
|
||||
*
|
||||
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
|
||||
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
||||
*
|
||||
* Some code borrowed from https://github.com/xairy/kasan-prototype by
|
||||
* Andrey Konovalov <andreyknvl@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
@ -38,12 +33,6 @@
|
||||
#include "kasan.h"
|
||||
#include "../slab.h"
|
||||
|
||||
/* Shadow layout customization. */
|
||||
#define SHADOW_BYTES_PER_BLOCK 1
|
||||
#define SHADOW_BLOCKS_PER_ROW 16
|
||||
#define SHADOW_BYTES_PER_ROW (SHADOW_BLOCKS_PER_ROW * SHADOW_BYTES_PER_BLOCK)
|
||||
#define SHADOW_ROWS_AROUND_ADDR 2
|
||||
|
||||
static unsigned long kasan_flags;
|
||||
|
||||
#define KASAN_BIT_REPORTED 0
|
||||
@ -73,9 +62,14 @@ static void print_error_description(struct kasan_access_info *info)
|
||||
{
|
||||
pr_err("BUG: KASAN: %s in %pS\n",
|
||||
get_bug_type(info), (void *)info->ip);
|
||||
pr_err("%s of size %zu at addr %px by task %s/%d\n",
|
||||
info->is_write ? "Write" : "Read", info->access_size,
|
||||
info->access_addr, current->comm, task_pid_nr(current));
|
||||
if (info->access_size)
|
||||
pr_err("%s of size %zu at addr %px by task %s/%d\n",
|
||||
info->is_write ? "Write" : "Read", info->access_size,
|
||||
info->access_addr, current->comm, task_pid_nr(current));
|
||||
else
|
||||
pr_err("%s at addr %px by task %s/%d\n",
|
||||
info->is_write ? "Write" : "Read",
|
||||
info->access_addr, current->comm, task_pid_nr(current));
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(report_lock);
|
||||
@ -105,6 +99,10 @@ static void end_report(unsigned long *flags)
|
||||
panic_on_warn = 0;
|
||||
panic("panic_on_warn set ...\n");
|
||||
}
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
if (kasan_flag_panic)
|
||||
panic("kasan.fault=panic set ...\n");
|
||||
#endif
|
||||
kasan_enable_current();
|
||||
}
|
||||
|
||||
@ -167,36 +165,45 @@ static void describe_object_addr(struct kmem_cache *cache, void *object,
|
||||
(void *)(object_addr + cache->object_size));
|
||||
}
|
||||
|
||||
static void describe_object_stacks(struct kmem_cache *cache, void *object,
|
||||
const void *addr, u8 tag)
|
||||
{
|
||||
struct kasan_alloc_meta *alloc_meta;
|
||||
struct kasan_track *free_track;
|
||||
|
||||
alloc_meta = kasan_get_alloc_meta(cache, object);
|
||||
if (alloc_meta) {
|
||||
print_track(&alloc_meta->alloc_track, "Allocated");
|
||||
pr_err("\n");
|
||||
}
|
||||
|
||||
free_track = kasan_get_free_track(cache, object, tag);
|
||||
if (free_track) {
|
||||
print_track(free_track, "Freed");
|
||||
pr_err("\n");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KASAN_GENERIC
|
||||
if (!alloc_meta)
|
||||
return;
|
||||
if (alloc_meta->aux_stack[0]) {
|
||||
pr_err("Last potentially related work creation:\n");
|
||||
print_stack(alloc_meta->aux_stack[0]);
|
||||
pr_err("\n");
|
||||
}
|
||||
if (alloc_meta->aux_stack[1]) {
|
||||
pr_err("Second to last potentially related work creation:\n");
|
||||
print_stack(alloc_meta->aux_stack[1]);
|
||||
pr_err("\n");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void describe_object(struct kmem_cache *cache, void *object,
|
||||
const void *addr, u8 tag)
|
||||
{
|
||||
struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
|
||||
|
||||
if (cache->flags & SLAB_KASAN) {
|
||||
struct kasan_track *free_track;
|
||||
|
||||
print_track(&alloc_info->alloc_track, "Allocated");
|
||||
pr_err("\n");
|
||||
free_track = kasan_get_free_track(cache, object, tag);
|
||||
if (free_track) {
|
||||
print_track(free_track, "Freed");
|
||||
pr_err("\n");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KASAN_GENERIC
|
||||
if (alloc_info->aux_stack[0]) {
|
||||
pr_err("Last potentially related work creation:\n");
|
||||
print_stack(alloc_info->aux_stack[0]);
|
||||
pr_err("\n");
|
||||
}
|
||||
if (alloc_info->aux_stack[1]) {
|
||||
pr_err("Second to last potentially related work creation:\n");
|
||||
print_stack(alloc_info->aux_stack[1]);
|
||||
pr_err("\n");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (kasan_stack_collection_enabled())
|
||||
describe_object_stacks(cache, object, addr, tag);
|
||||
describe_object_addr(cache, object, addr);
|
||||
}
|
||||
|
||||
@ -216,168 +223,6 @@ static inline bool init_task_stack_addr(const void *addr)
|
||||
sizeof(init_thread_union.stack));
|
||||
}
|
||||
|
||||
static bool __must_check tokenize_frame_descr(const char **frame_descr,
|
||||
char *token, size_t max_tok_len,
|
||||
unsigned long *value)
|
||||
{
|
||||
const char *sep = strchr(*frame_descr, ' ');
|
||||
|
||||
if (sep == NULL)
|
||||
sep = *frame_descr + strlen(*frame_descr);
|
||||
|
||||
if (token != NULL) {
|
||||
const size_t tok_len = sep - *frame_descr;
|
||||
|
||||
if (tok_len + 1 > max_tok_len) {
|
||||
pr_err("KASAN internal error: frame description too long: %s\n",
|
||||
*frame_descr);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Copy token (+ 1 byte for '\0'). */
|
||||
strlcpy(token, *frame_descr, tok_len + 1);
|
||||
}
|
||||
|
||||
/* Advance frame_descr past separator. */
|
||||
*frame_descr = sep + 1;
|
||||
|
||||
if (value != NULL && kstrtoul(token, 10, value)) {
|
||||
pr_err("KASAN internal error: not a valid number: %s\n", token);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void print_decoded_frame_descr(const char *frame_descr)
|
||||
{
|
||||
/*
|
||||
* We need to parse the following string:
|
||||
* "n alloc_1 alloc_2 ... alloc_n"
|
||||
* where alloc_i looks like
|
||||
* "offset size len name"
|
||||
* or "offset size len name:line".
|
||||
*/
|
||||
|
||||
char token[64];
|
||||
unsigned long num_objects;
|
||||
|
||||
if (!tokenize_frame_descr(&frame_descr, token, sizeof(token),
|
||||
&num_objects))
|
||||
return;
|
||||
|
||||
pr_err("\n");
|
||||
pr_err("this frame has %lu %s:\n", num_objects,
|
||||
num_objects == 1 ? "object" : "objects");
|
||||
|
||||
while (num_objects--) {
|
||||
unsigned long offset;
|
||||
unsigned long size;
|
||||
|
||||
/* access offset */
|
||||
if (!tokenize_frame_descr(&frame_descr, token, sizeof(token),
|
||||
&offset))
|
||||
return;
|
||||
/* access size */
|
||||
if (!tokenize_frame_descr(&frame_descr, token, sizeof(token),
|
||||
&size))
|
||||
return;
|
||||
/* name length (unused) */
|
||||
if (!tokenize_frame_descr(&frame_descr, NULL, 0, NULL))
|
||||
return;
|
||||
/* object name */
|
||||
if (!tokenize_frame_descr(&frame_descr, token, sizeof(token),
|
||||
NULL))
|
||||
return;
|
||||
|
||||
/* Strip line number; without filename it's not very helpful. */
|
||||
strreplace(token, ':', '\0');
|
||||
|
||||
/* Finally, print object information. */
|
||||
pr_err(" [%lu, %lu) '%s'", offset, offset + size, token);
|
||||
}
|
||||
}
|
||||
|
||||
static bool __must_check get_address_stack_frame_info(const void *addr,
|
||||
unsigned long *offset,
|
||||
const char **frame_descr,
|
||||
const void **frame_pc)
|
||||
{
|
||||
unsigned long aligned_addr;
|
||||
unsigned long mem_ptr;
|
||||
const u8 *shadow_bottom;
|
||||
const u8 *shadow_ptr;
|
||||
const unsigned long *frame;
|
||||
|
||||
BUILD_BUG_ON(IS_ENABLED(CONFIG_STACK_GROWSUP));
|
||||
|
||||
/*
|
||||
* NOTE: We currently only support printing frame information for
|
||||
* accesses to the task's own stack.
|
||||
*/
|
||||
if (!object_is_on_stack(addr))
|
||||
return false;
|
||||
|
||||
aligned_addr = round_down((unsigned long)addr, sizeof(long));
|
||||
mem_ptr = round_down(aligned_addr, KASAN_SHADOW_SCALE_SIZE);
|
||||
shadow_ptr = kasan_mem_to_shadow((void *)aligned_addr);
|
||||
shadow_bottom = kasan_mem_to_shadow(end_of_stack(current));
|
||||
|
||||
while (shadow_ptr >= shadow_bottom && *shadow_ptr != KASAN_STACK_LEFT) {
|
||||
shadow_ptr--;
|
||||
mem_ptr -= KASAN_SHADOW_SCALE_SIZE;
|
||||
}
|
||||
|
||||
while (shadow_ptr >= shadow_bottom && *shadow_ptr == KASAN_STACK_LEFT) {
|
||||
shadow_ptr--;
|
||||
mem_ptr -= KASAN_SHADOW_SCALE_SIZE;
|
||||
}
|
||||
|
||||
if (shadow_ptr < shadow_bottom)
|
||||
return false;
|
||||
|
||||
frame = (const unsigned long *)(mem_ptr + KASAN_SHADOW_SCALE_SIZE);
|
||||
if (frame[0] != KASAN_CURRENT_STACK_FRAME_MAGIC) {
|
||||
pr_err("KASAN internal error: frame info validation failed; invalid marker: %lu\n",
|
||||
frame[0]);
|
||||
return false;
|
||||
}
|
||||
|
||||
*offset = (unsigned long)addr - (unsigned long)frame;
|
||||
*frame_descr = (const char *)frame[1];
|
||||
*frame_pc = (void *)frame[2];
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void print_address_stack_frame(const void *addr)
|
||||
{
|
||||
unsigned long offset;
|
||||
const char *frame_descr;
|
||||
const void *frame_pc;
|
||||
|
||||
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
|
||||
return;
|
||||
|
||||
if (!get_address_stack_frame_info(addr, &offset, &frame_descr,
|
||||
&frame_pc))
|
||||
return;
|
||||
|
||||
/*
|
||||
* get_address_stack_frame_info only returns true if the given addr is
|
||||
* on the current task's stack.
|
||||
*/
|
||||
pr_err("\n");
|
||||
pr_err("addr %px is located in stack of task %s/%d at offset %lu in frame:\n",
|
||||
addr, current->comm, task_pid_nr(current), offset);
|
||||
pr_err(" %pS\n", frame_pc);
|
||||
|
||||
if (!frame_descr)
|
||||
return;
|
||||
|
||||
print_decoded_frame_descr(frame_descr);
|
||||
}
|
||||
|
||||
static void print_address_description(void *addr, u8 tag)
|
||||
{
|
||||
struct page *page = kasan_addr_to_page(addr);
|
||||
@ -405,62 +250,68 @@ static void print_address_description(void *addr, u8 tag)
|
||||
print_address_stack_frame(addr);
|
||||
}
|
||||
|
||||
static bool row_is_guilty(const void *row, const void *guilty)
|
||||
static bool meta_row_is_guilty(const void *row, const void *addr)
|
||||
{
|
||||
return (row <= guilty) && (guilty < row + SHADOW_BYTES_PER_ROW);
|
||||
return (row <= addr) && (addr < row + META_MEM_BYTES_PER_ROW);
|
||||
}
|
||||
|
||||
static int shadow_pointer_offset(const void *row, const void *shadow)
|
||||
static int meta_pointer_offset(const void *row, const void *addr)
|
||||
{
|
||||
/* The length of ">ff00ff00ff00ff00: " is
|
||||
* 3 + (BITS_PER_LONG/8)*2 chars.
|
||||
/*
|
||||
* Memory state around the buggy address:
|
||||
* ff00ff00ff00ff00: 00 00 00 05 fe fe fe fe fe fe fe fe fe fe fe fe
|
||||
* ...
|
||||
*
|
||||
* The length of ">ff00ff00ff00ff00: " is
|
||||
* 3 + (BITS_PER_LONG / 8) * 2 chars.
|
||||
* The length of each granule metadata is 2 bytes
|
||||
* plus 1 byte for space.
|
||||
*/
|
||||
return 3 + (BITS_PER_LONG/8)*2 + (shadow - row)*2 +
|
||||
(shadow - row) / SHADOW_BYTES_PER_BLOCK + 1;
|
||||
return 3 + (BITS_PER_LONG / 8) * 2 +
|
||||
(addr - row) / KASAN_GRANULE_SIZE * 3 + 1;
|
||||
}
|
||||
|
||||
static void print_shadow_for_address(const void *addr)
|
||||
static void print_memory_metadata(const void *addr)
|
||||
{
|
||||
int i;
|
||||
const void *shadow = kasan_mem_to_shadow(addr);
|
||||
const void *shadow_row;
|
||||
void *row;
|
||||
|
||||
shadow_row = (void *)round_down((unsigned long)shadow,
|
||||
SHADOW_BYTES_PER_ROW)
|
||||
- SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW;
|
||||
row = (void *)round_down((unsigned long)addr, META_MEM_BYTES_PER_ROW)
|
||||
- META_ROWS_AROUND_ADDR * META_MEM_BYTES_PER_ROW;
|
||||
|
||||
pr_err("Memory state around the buggy address:\n");
|
||||
|
||||
for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) {
|
||||
const void *kaddr = kasan_shadow_to_mem(shadow_row);
|
||||
char buffer[4 + (BITS_PER_LONG/8)*2];
|
||||
char shadow_buf[SHADOW_BYTES_PER_ROW];
|
||||
for (i = -META_ROWS_AROUND_ADDR; i <= META_ROWS_AROUND_ADDR; i++) {
|
||||
char buffer[4 + (BITS_PER_LONG / 8) * 2];
|
||||
char metadata[META_BYTES_PER_ROW];
|
||||
|
||||
snprintf(buffer, sizeof(buffer),
|
||||
(i == 0) ? ">%px: " : " %px: ", kaddr);
|
||||
(i == 0) ? ">%px: " : " %px: ", row);
|
||||
|
||||
/*
|
||||
* We should not pass a shadow pointer to generic
|
||||
* function, because generic functions may try to
|
||||
* access kasan mapping for the passed address.
|
||||
*/
|
||||
memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW);
|
||||
metadata_fetch_row(&metadata[0], row);
|
||||
|
||||
print_hex_dump(KERN_ERR, buffer,
|
||||
DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1,
|
||||
shadow_buf, SHADOW_BYTES_PER_ROW, 0);
|
||||
DUMP_PREFIX_NONE, META_BYTES_PER_ROW, 1,
|
||||
metadata, META_BYTES_PER_ROW, 0);
|
||||
|
||||
if (row_is_guilty(shadow_row, shadow))
|
||||
pr_err("%*c\n",
|
||||
shadow_pointer_offset(shadow_row, shadow),
|
||||
'^');
|
||||
if (meta_row_is_guilty(row, addr))
|
||||
pr_err("%*c\n", meta_pointer_offset(row, addr), '^');
|
||||
|
||||
shadow_row += SHADOW_BYTES_PER_ROW;
|
||||
row += META_MEM_BYTES_PER_ROW;
|
||||
}
|
||||
}
|
||||
|
||||
static bool report_enabled(void)
|
||||
{
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
if (current->kasan_depth)
|
||||
return false;
|
||||
#endif
|
||||
if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
|
||||
return true;
|
||||
return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
|
||||
@ -490,7 +341,7 @@ void kasan_report_invalid_free(void *object, unsigned long ip)
|
||||
unsigned long flags;
|
||||
u8 tag = get_tag(object);
|
||||
|
||||
object = reset_tag(object);
|
||||
object = kasan_reset_tag(object);
|
||||
|
||||
#if IS_ENABLED(CONFIG_KUNIT)
|
||||
if (current->kunit_test)
|
||||
@ -503,7 +354,7 @@ void kasan_report_invalid_free(void *object, unsigned long ip)
|
||||
pr_err("\n");
|
||||
print_address_description(object, tag);
|
||||
pr_err("\n");
|
||||
print_shadow_for_address(object);
|
||||
print_memory_metadata(object);
|
||||
end_report(&flags);
|
||||
}
|
||||
|
||||
@ -523,10 +374,10 @@ static void __kasan_report(unsigned long addr, size_t size, bool is_write,
|
||||
disable_trace_on_warning();
|
||||
|
||||
tagged_addr = (void *)addr;
|
||||
untagged_addr = reset_tag(tagged_addr);
|
||||
untagged_addr = kasan_reset_tag(tagged_addr);
|
||||
|
||||
info.access_addr = tagged_addr;
|
||||
if (addr_has_shadow(untagged_addr))
|
||||
if (addr_has_metadata(untagged_addr))
|
||||
info.first_bad_addr = find_first_bad_addr(tagged_addr, size);
|
||||
else
|
||||
info.first_bad_addr = untagged_addr;
|
||||
@ -537,14 +388,14 @@ static void __kasan_report(unsigned long addr, size_t size, bool is_write,
|
||||
start_report(&flags);
|
||||
|
||||
print_error_description(&info);
|
||||
if (addr_has_shadow(untagged_addr))
|
||||
if (addr_has_metadata(untagged_addr))
|
||||
print_tags(get_tag(tagged_addr), info.first_bad_addr);
|
||||
pr_err("\n");
|
||||
|
||||
if (addr_has_shadow(untagged_addr)) {
|
||||
if (addr_has_metadata(untagged_addr)) {
|
||||
print_address_description(untagged_addr, get_tag(tagged_addr));
|
||||
pr_err("\n");
|
||||
print_shadow_for_address(info.first_bad_addr);
|
||||
print_memory_metadata(info.first_bad_addr);
|
||||
} else {
|
||||
dump_stack();
|
||||
}
|
||||
@ -604,6 +455,6 @@ void kasan_non_canonical_hook(unsigned long addr)
|
||||
else
|
||||
bug_type = "maybe wild-memory-access";
|
||||
pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
|
||||
orig_addr, orig_addr + KASAN_SHADOW_MASK);
|
||||
orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
|
||||
}
|
||||
#endif
|
||||
|
327
mm/kasan/report_generic.c
Normal file
327
mm/kasan/report_generic.c
Normal file
@ -0,0 +1,327 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* This file contains generic KASAN specific error reporting code.
|
||||
*
|
||||
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
|
||||
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
||||
*
|
||||
* Some code borrowed from https://github.com/xairy/kasan-prototype by
|
||||
* Andrey Konovalov <andreyknvl@gmail.com>
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stackdepot.h>
|
||||
#include <linux/stacktrace.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/sections.h>
|
||||
|
||||
#include "kasan.h"
|
||||
#include "../slab.h"
|
||||
|
||||
void *find_first_bad_addr(void *addr, size_t size)
|
||||
{
|
||||
void *p = addr;
|
||||
|
||||
while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p)))
|
||||
p += KASAN_GRANULE_SIZE;
|
||||
return p;
|
||||
}
|
||||
|
||||
static const char *get_shadow_bug_type(struct kasan_access_info *info)
|
||||
{
|
||||
const char *bug_type = "unknown-crash";
|
||||
u8 *shadow_addr;
|
||||
|
||||
shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
|
||||
|
||||
/*
|
||||
* If shadow byte value is in [0, KASAN_GRANULE_SIZE) we can look
|
||||
* at the next shadow byte to determine the type of the bad access.
|
||||
*/
|
||||
if (*shadow_addr > 0 && *shadow_addr <= KASAN_GRANULE_SIZE - 1)
|
||||
shadow_addr++;
|
||||
|
||||
switch (*shadow_addr) {
|
||||
case 0 ... KASAN_GRANULE_SIZE - 1:
|
||||
/*
|
||||
* In theory it's still possible to see these shadow values
|
||||
* due to a data race in the kernel code.
|
||||
*/
|
||||
bug_type = "out-of-bounds";
|
||||
break;
|
||||
case KASAN_PAGE_REDZONE:
|
||||
case KASAN_KMALLOC_REDZONE:
|
||||
bug_type = "slab-out-of-bounds";
|
||||
break;
|
||||
case KASAN_GLOBAL_REDZONE:
|
||||
bug_type = "global-out-of-bounds";
|
||||
break;
|
||||
case KASAN_STACK_LEFT:
|
||||
case KASAN_STACK_MID:
|
||||
case KASAN_STACK_RIGHT:
|
||||
case KASAN_STACK_PARTIAL:
|
||||
bug_type = "stack-out-of-bounds";
|
||||
break;
|
||||
case KASAN_FREE_PAGE:
|
||||
case KASAN_KMALLOC_FREE:
|
||||
case KASAN_KMALLOC_FREETRACK:
|
||||
bug_type = "use-after-free";
|
||||
break;
|
||||
case KASAN_ALLOCA_LEFT:
|
||||
case KASAN_ALLOCA_RIGHT:
|
||||
bug_type = "alloca-out-of-bounds";
|
||||
break;
|
||||
case KASAN_VMALLOC_INVALID:
|
||||
bug_type = "vmalloc-out-of-bounds";
|
||||
break;
|
||||
}
|
||||
|
||||
return bug_type;
|
||||
}
|
||||
|
||||
static const char *get_wild_bug_type(struct kasan_access_info *info)
|
||||
{
|
||||
const char *bug_type = "unknown-crash";
|
||||
|
||||
if ((unsigned long)info->access_addr < PAGE_SIZE)
|
||||
bug_type = "null-ptr-deref";
|
||||
else if ((unsigned long)info->access_addr < TASK_SIZE)
|
||||
bug_type = "user-memory-access";
|
||||
else
|
||||
bug_type = "wild-memory-access";
|
||||
|
||||
return bug_type;
|
||||
}
|
||||
|
||||
const char *get_bug_type(struct kasan_access_info *info)
|
||||
{
|
||||
/*
|
||||
* If access_size is a negative number, then it has reason to be
|
||||
* defined as out-of-bounds bug type.
|
||||
*
|
||||
* Casting negative numbers to size_t would indeed turn up as
|
||||
* a large size_t and its value will be larger than ULONG_MAX/2,
|
||||
* so that this can qualify as out-of-bounds.
|
||||
*/
|
||||
if (info->access_addr + info->access_size < info->access_addr)
|
||||
return "out-of-bounds";
|
||||
|
||||
if (addr_has_metadata(info->access_addr))
|
||||
return get_shadow_bug_type(info);
|
||||
return get_wild_bug_type(info);
|
||||
}
|
||||
|
||||
void metadata_fetch_row(char *buffer, void *row)
|
||||
{
|
||||
memcpy(buffer, kasan_mem_to_shadow(row), META_BYTES_PER_ROW);
|
||||
}
|
||||
|
||||
#if CONFIG_KASAN_STACK
|
||||
static bool __must_check tokenize_frame_descr(const char **frame_descr,
|
||||
char *token, size_t max_tok_len,
|
||||
unsigned long *value)
|
||||
{
|
||||
const char *sep = strchr(*frame_descr, ' ');
|
||||
|
||||
if (sep == NULL)
|
||||
sep = *frame_descr + strlen(*frame_descr);
|
||||
|
||||
if (token != NULL) {
|
||||
const size_t tok_len = sep - *frame_descr;
|
||||
|
||||
if (tok_len + 1 > max_tok_len) {
|
||||
pr_err("KASAN internal error: frame description too long: %s\n",
|
||||
*frame_descr);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Copy token (+ 1 byte for '\0'). */
|
||||
strlcpy(token, *frame_descr, tok_len + 1);
|
||||
}
|
||||
|
||||
/* Advance frame_descr past separator. */
|
||||
*frame_descr = sep + 1;
|
||||
|
||||
if (value != NULL && kstrtoul(token, 10, value)) {
|
||||
pr_err("KASAN internal error: not a valid number: %s\n", token);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void print_decoded_frame_descr(const char *frame_descr)
|
||||
{
|
||||
/*
|
||||
* We need to parse the following string:
|
||||
* "n alloc_1 alloc_2 ... alloc_n"
|
||||
* where alloc_i looks like
|
||||
* "offset size len name"
|
||||
* or "offset size len name:line".
|
||||
*/
|
||||
|
||||
char token[64];
|
||||
unsigned long num_objects;
|
||||
|
||||
if (!tokenize_frame_descr(&frame_descr, token, sizeof(token),
|
||||
&num_objects))
|
||||
return;
|
||||
|
||||
pr_err("\n");
|
||||
pr_err("this frame has %lu %s:\n", num_objects,
|
||||
num_objects == 1 ? "object" : "objects");
|
||||
|
||||
while (num_objects--) {
|
||||
unsigned long offset;
|
||||
unsigned long size;
|
||||
|
||||
/* access offset */
|
||||
if (!tokenize_frame_descr(&frame_descr, token, sizeof(token),
|
||||
&offset))
|
||||
return;
|
||||
/* access size */
|
||||
if (!tokenize_frame_descr(&frame_descr, token, sizeof(token),
|
||||
&size))
|
||||
return;
|
||||
/* name length (unused) */
|
||||
if (!tokenize_frame_descr(&frame_descr, NULL, 0, NULL))
|
||||
return;
|
||||
/* object name */
|
||||
if (!tokenize_frame_descr(&frame_descr, token, sizeof(token),
|
||||
NULL))
|
||||
return;
|
||||
|
||||
/* Strip line number; without filename it's not very helpful. */
|
||||
strreplace(token, ':', '\0');
|
||||
|
||||
/* Finally, print object information. */
|
||||
pr_err(" [%lu, %lu) '%s'", offset, offset + size, token);
|
||||
}
|
||||
}
|
||||
|
||||
static bool __must_check get_address_stack_frame_info(const void *addr,
|
||||
unsigned long *offset,
|
||||
const char **frame_descr,
|
||||
const void **frame_pc)
|
||||
{
|
||||
unsigned long aligned_addr;
|
||||
unsigned long mem_ptr;
|
||||
const u8 *shadow_bottom;
|
||||
const u8 *shadow_ptr;
|
||||
const unsigned long *frame;
|
||||
|
||||
BUILD_BUG_ON(IS_ENABLED(CONFIG_STACK_GROWSUP));
|
||||
|
||||
/*
|
||||
* NOTE: We currently only support printing frame information for
|
||||
* accesses to the task's own stack.
|
||||
*/
|
||||
if (!object_is_on_stack(addr))
|
||||
return false;
|
||||
|
||||
aligned_addr = round_down((unsigned long)addr, sizeof(long));
|
||||
mem_ptr = round_down(aligned_addr, KASAN_GRANULE_SIZE);
|
||||
shadow_ptr = kasan_mem_to_shadow((void *)aligned_addr);
|
||||
shadow_bottom = kasan_mem_to_shadow(end_of_stack(current));
|
||||
|
||||
while (shadow_ptr >= shadow_bottom && *shadow_ptr != KASAN_STACK_LEFT) {
|
||||
shadow_ptr--;
|
||||
mem_ptr -= KASAN_GRANULE_SIZE;
|
||||
}
|
||||
|
||||
while (shadow_ptr >= shadow_bottom && *shadow_ptr == KASAN_STACK_LEFT) {
|
||||
shadow_ptr--;
|
||||
mem_ptr -= KASAN_GRANULE_SIZE;
|
||||
}
|
||||
|
||||
if (shadow_ptr < shadow_bottom)
|
||||
return false;
|
||||
|
||||
frame = (const unsigned long *)(mem_ptr + KASAN_GRANULE_SIZE);
|
||||
if (frame[0] != KASAN_CURRENT_STACK_FRAME_MAGIC) {
|
||||
pr_err("KASAN internal error: frame info validation failed; invalid marker: %lu\n",
|
||||
frame[0]);
|
||||
return false;
|
||||
}
|
||||
|
||||
*offset = (unsigned long)addr - (unsigned long)frame;
|
||||
*frame_descr = (const char *)frame[1];
|
||||
*frame_pc = (void *)frame[2];
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void print_address_stack_frame(const void *addr)
|
||||
{
|
||||
unsigned long offset;
|
||||
const char *frame_descr;
|
||||
const void *frame_pc;
|
||||
|
||||
if (!get_address_stack_frame_info(addr, &offset, &frame_descr,
|
||||
&frame_pc))
|
||||
return;
|
||||
|
||||
/*
|
||||
* get_address_stack_frame_info only returns true if the given addr is
|
||||
* on the current task's stack.
|
||||
*/
|
||||
pr_err("\n");
|
||||
pr_err("addr %px is located in stack of task %s/%d at offset %lu in frame:\n",
|
||||
addr, current->comm, task_pid_nr(current), offset);
|
||||
pr_err(" %pS\n", frame_pc);
|
||||
|
||||
if (!frame_descr)
|
||||
return;
|
||||
|
||||
print_decoded_frame_descr(frame_descr);
|
||||
}
|
||||
#endif /* CONFIG_KASAN_STACK */
|
||||
|
||||
#define DEFINE_ASAN_REPORT_LOAD(size) \
|
||||
void __asan_report_load##size##_noabort(unsigned long addr) \
|
||||
{ \
|
||||
kasan_report(addr, size, false, _RET_IP_); \
|
||||
} \
|
||||
EXPORT_SYMBOL(__asan_report_load##size##_noabort)
|
||||
|
||||
#define DEFINE_ASAN_REPORT_STORE(size) \
|
||||
void __asan_report_store##size##_noabort(unsigned long addr) \
|
||||
{ \
|
||||
kasan_report(addr, size, true, _RET_IP_); \
|
||||
} \
|
||||
EXPORT_SYMBOL(__asan_report_store##size##_noabort)
|
||||
|
||||
DEFINE_ASAN_REPORT_LOAD(1);
|
||||
DEFINE_ASAN_REPORT_LOAD(2);
|
||||
DEFINE_ASAN_REPORT_LOAD(4);
|
||||
DEFINE_ASAN_REPORT_LOAD(8);
|
||||
DEFINE_ASAN_REPORT_LOAD(16);
|
||||
DEFINE_ASAN_REPORT_STORE(1);
|
||||
DEFINE_ASAN_REPORT_STORE(2);
|
||||
DEFINE_ASAN_REPORT_STORE(4);
|
||||
DEFINE_ASAN_REPORT_STORE(8);
|
||||
DEFINE_ASAN_REPORT_STORE(16);
|
||||
|
||||
void __asan_report_load_n_noabort(unsigned long addr, size_t size)
|
||||
{
|
||||
kasan_report(addr, size, false, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(__asan_report_load_n_noabort);
|
||||
|
||||
void __asan_report_store_n_noabort(unsigned long addr, size_t size)
|
||||
{
|
||||
kasan_report(addr, size, true, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(__asan_report_store_n_noabort);
|
42
mm/kasan/report_hw_tags.c
Normal file
42
mm/kasan/report_hw_tags.c
Normal file
@ -0,0 +1,42 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* This file contains hardware tag-based KASAN specific error reporting code.
|
||||
*
|
||||
* Copyright (c) 2020 Google, Inc.
|
||||
* Author: Andrey Konovalov <andreyknvl@google.com>
|
||||
*/
|
||||
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "kasan.h"
|
||||
|
||||
const char *get_bug_type(struct kasan_access_info *info)
|
||||
{
|
||||
return "invalid-access";
|
||||
}
|
||||
|
||||
void *find_first_bad_addr(void *addr, size_t size)
|
||||
{
|
||||
return kasan_reset_tag(addr);
|
||||
}
|
||||
|
||||
void metadata_fetch_row(char *buffer, void *row)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < META_BYTES_PER_ROW; i++)
|
||||
buffer[i] = hw_get_mem_tag(row + i * KASAN_GRANULE_SIZE);
|
||||
}
|
||||
|
||||
void print_tags(u8 addr_tag, const void *addr)
|
||||
{
|
||||
u8 memory_tag = hw_get_mem_tag((void *)addr);
|
||||
|
||||
pr_err("Pointer tag: [%02x], memory tag: [%02x]\n",
|
||||
addr_tag, memory_tag);
|
||||
}
|
@ -1,17 +1,12 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* This file contains tag-based KASAN specific error reporting code.
|
||||
* This file contains software tag-based KASAN specific error reporting code.
|
||||
*
|
||||
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
|
||||
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
||||
*
|
||||
* Some code borrowed from https://github.com/xairy/kasan-prototype by
|
||||
* Andrey Konovalov <andreyknvl@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
@ -46,16 +41,19 @@ const char *get_bug_type(struct kasan_access_info *info)
|
||||
int i;
|
||||
|
||||
tag = get_tag(info->access_addr);
|
||||
addr = reset_tag(info->access_addr);
|
||||
addr = kasan_reset_tag(info->access_addr);
|
||||
page = kasan_addr_to_page(addr);
|
||||
if (page && PageSlab(page)) {
|
||||
cache = page->slab_cache;
|
||||
object = nearest_obj(cache, page, (void *)addr);
|
||||
alloc_meta = get_alloc_info(cache, object);
|
||||
alloc_meta = kasan_get_alloc_meta(cache, object);
|
||||
|
||||
for (i = 0; i < KASAN_NR_FREE_STACKS; i++)
|
||||
if (alloc_meta->free_pointer_tag[i] == tag)
|
||||
return "use-after-free";
|
||||
if (alloc_meta) {
|
||||
for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
|
||||
if (alloc_meta->free_pointer_tag[i] == tag)
|
||||
return "use-after-free";
|
||||
}
|
||||
}
|
||||
return "out-of-bounds";
|
||||
}
|
||||
|
||||
@ -77,14 +75,19 @@ const char *get_bug_type(struct kasan_access_info *info)
|
||||
void *find_first_bad_addr(void *addr, size_t size)
|
||||
{
|
||||
u8 tag = get_tag(addr);
|
||||
void *p = reset_tag(addr);
|
||||
void *p = kasan_reset_tag(addr);
|
||||
void *end = p + size;
|
||||
|
||||
while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p))
|
||||
p += KASAN_SHADOW_SCALE_SIZE;
|
||||
p += KASAN_GRANULE_SIZE;
|
||||
return p;
|
||||
}
|
||||
|
||||
void metadata_fetch_row(char *buffer, void *row)
|
||||
{
|
||||
memcpy(buffer, kasan_mem_to_shadow(row), META_BYTES_PER_ROW);
|
||||
}
|
||||
|
||||
void print_tags(u8 addr_tag, const void *addr)
|
||||
{
|
||||
u8 *shadow = (u8 *)kasan_mem_to_shadow(addr);
|
504
mm/kasan/shadow.c
Normal file
504
mm/kasan/shadow.c
Normal file
@ -0,0 +1,504 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* This file contains KASAN runtime code that manages shadow memory for
|
||||
* generic and software tag-based KASAN modes.
|
||||
*
|
||||
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
|
||||
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
||||
*
|
||||
* Some code borrowed from https://github.com/xairy/kasan-prototype by
|
||||
* Andrey Konovalov <andreyknvl@gmail.com>
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#include "kasan.h"
|
||||
|
||||
bool __kasan_check_read(const volatile void *p, unsigned int size)
|
||||
{
|
||||
return check_memory_region((unsigned long)p, size, false, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(__kasan_check_read);
|
||||
|
||||
bool __kasan_check_write(const volatile void *p, unsigned int size)
|
||||
{
|
||||
return check_memory_region((unsigned long)p, size, true, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(__kasan_check_write);
|
||||
|
||||
#undef memset
|
||||
void *memset(void *addr, int c, size_t len)
|
||||
{
|
||||
if (!check_memory_region((unsigned long)addr, len, true, _RET_IP_))
|
||||
return NULL;
|
||||
|
||||
return __memset(addr, c, len);
|
||||
}
|
||||
|
||||
#ifdef __HAVE_ARCH_MEMMOVE
|
||||
#undef memmove
|
||||
void *memmove(void *dest, const void *src, size_t len)
|
||||
{
|
||||
if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
|
||||
!check_memory_region((unsigned long)dest, len, true, _RET_IP_))
|
||||
return NULL;
|
||||
|
||||
return __memmove(dest, src, len);
|
||||
}
|
||||
#endif
|
||||
|
||||
#undef memcpy
|
||||
void *memcpy(void *dest, const void *src, size_t len)
|
||||
{
|
||||
if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
|
||||
!check_memory_region((unsigned long)dest, len, true, _RET_IP_))
|
||||
return NULL;
|
||||
|
||||
return __memcpy(dest, src, len);
|
||||
}
|
||||
|
||||
/*
|
||||
* Poisons the shadow memory for 'size' bytes starting from 'addr'.
|
||||
* Memory addresses should be aligned to KASAN_GRANULE_SIZE.
|
||||
*/
|
||||
void poison_range(const void *address, size_t size, u8 value)
|
||||
{
|
||||
void *shadow_start, *shadow_end;
|
||||
|
||||
/*
|
||||
* Perform shadow offset calculation based on untagged address, as
|
||||
* some of the callers (e.g. kasan_poison_object_data) pass tagged
|
||||
* addresses to this function.
|
||||
*/
|
||||
address = kasan_reset_tag(address);
|
||||
size = round_up(size, KASAN_GRANULE_SIZE);
|
||||
|
||||
shadow_start = kasan_mem_to_shadow(address);
|
||||
shadow_end = kasan_mem_to_shadow(address + size);
|
||||
|
||||
__memset(shadow_start, value, shadow_end - shadow_start);
|
||||
}
|
||||
|
||||
void unpoison_range(const void *address, size_t size)
|
||||
{
|
||||
u8 tag = get_tag(address);
|
||||
|
||||
/*
|
||||
* Perform shadow offset calculation based on untagged address, as
|
||||
* some of the callers (e.g. kasan_unpoison_object_data) pass tagged
|
||||
* addresses to this function.
|
||||
*/
|
||||
address = kasan_reset_tag(address);
|
||||
|
||||
poison_range(address, size, tag);
|
||||
|
||||
if (size & KASAN_GRANULE_MASK) {
|
||||
u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
|
||||
|
||||
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
|
||||
*shadow = tag;
|
||||
else /* CONFIG_KASAN_GENERIC */
|
||||
*shadow = size & KASAN_GRANULE_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
static bool shadow_mapped(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset_k(addr);
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
if (pgd_none(*pgd))
|
||||
return false;
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (p4d_none(*p4d))
|
||||
return false;
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (pud_none(*pud))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We can't use pud_large() or pud_huge(), the first one is
|
||||
* arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
|
||||
* pud_bad(), if pud is bad then it's bad because it's huge.
|
||||
*/
|
||||
if (pud_bad(*pud))
|
||||
return true;
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd))
|
||||
return false;
|
||||
|
||||
if (pmd_bad(*pmd))
|
||||
return true;
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
return !pte_none(*pte);
|
||||
}
|
||||
|
||||
static int __meminit kasan_mem_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct memory_notify *mem_data = data;
|
||||
unsigned long nr_shadow_pages, start_kaddr, shadow_start;
|
||||
unsigned long shadow_end, shadow_size;
|
||||
|
||||
nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
|
||||
start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
|
||||
shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
|
||||
shadow_size = nr_shadow_pages << PAGE_SHIFT;
|
||||
shadow_end = shadow_start + shadow_size;
|
||||
|
||||
if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
|
||||
WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE))
|
||||
return NOTIFY_BAD;
|
||||
|
||||
switch (action) {
|
||||
case MEM_GOING_ONLINE: {
|
||||
void *ret;
|
||||
|
||||
/*
|
||||
* If shadow is mapped already than it must have been mapped
|
||||
* during the boot. This could happen if we onlining previously
|
||||
* offlined memory.
|
||||
*/
|
||||
if (shadow_mapped(shadow_start))
|
||||
return NOTIFY_OK;
|
||||
|
||||
ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
|
||||
shadow_end, GFP_KERNEL,
|
||||
PAGE_KERNEL, VM_NO_GUARD,
|
||||
pfn_to_nid(mem_data->start_pfn),
|
||||
__builtin_return_address(0));
|
||||
if (!ret)
|
||||
return NOTIFY_BAD;
|
||||
|
||||
kmemleak_ignore(ret);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
case MEM_CANCEL_ONLINE:
|
||||
case MEM_OFFLINE: {
|
||||
struct vm_struct *vm;
|
||||
|
||||
/*
|
||||
* shadow_start was either mapped during boot by kasan_init()
|
||||
* or during memory online by __vmalloc_node_range().
|
||||
* In the latter case we can use vfree() to free shadow.
|
||||
* Non-NULL result of the find_vm_area() will tell us if
|
||||
* that was the second case.
|
||||
*
|
||||
* Currently it's not possible to free shadow mapped
|
||||
* during boot by kasan_init(). It's because the code
|
||||
* to do that hasn't been written yet. So we'll just
|
||||
* leak the memory.
|
||||
*/
|
||||
vm = find_vm_area((void *)shadow_start);
|
||||
if (vm)
|
||||
vfree((void *)shadow_start);
|
||||
}
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int __init kasan_memhotplug_init(void)
|
||||
{
|
||||
hotplug_memory_notifier(kasan_mem_notifier, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
core_initcall(kasan_memhotplug_init);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_VMALLOC
|
||||
|
||||
static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
|
||||
void *unused)
|
||||
{
|
||||
unsigned long page;
|
||||
pte_t pte;
|
||||
|
||||
if (likely(!pte_none(*ptep)))
|
||||
return 0;
|
||||
|
||||
page = __get_free_page(GFP_KERNEL);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
|
||||
pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
if (likely(pte_none(*ptep))) {
|
||||
set_pte_at(&init_mm, addr, ptep, pte);
|
||||
page = 0;
|
||||
}
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
if (page)
|
||||
free_page(page);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
|
||||
{
|
||||
unsigned long shadow_start, shadow_end;
|
||||
int ret;
|
||||
|
||||
if (!is_vmalloc_or_module_addr((void *)addr))
|
||||
return 0;
|
||||
|
||||
shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
|
||||
shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
|
||||
shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
|
||||
shadow_end = ALIGN(shadow_end, PAGE_SIZE);
|
||||
|
||||
ret = apply_to_page_range(&init_mm, shadow_start,
|
||||
shadow_end - shadow_start,
|
||||
kasan_populate_vmalloc_pte, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
flush_cache_vmap(shadow_start, shadow_end);
|
||||
|
||||
/*
|
||||
* We need to be careful about inter-cpu effects here. Consider:
|
||||
*
|
||||
* CPU#0 CPU#1
|
||||
* WRITE_ONCE(p, vmalloc(100)); while (x = READ_ONCE(p)) ;
|
||||
* p[99] = 1;
|
||||
*
|
||||
* With compiler instrumentation, that ends up looking like this:
|
||||
*
|
||||
* CPU#0 CPU#1
|
||||
* // vmalloc() allocates memory
|
||||
* // let a = area->addr
|
||||
* // we reach kasan_populate_vmalloc
|
||||
* // and call unpoison_range:
|
||||
* STORE shadow(a), unpoison_val
|
||||
* ...
|
||||
* STORE shadow(a+99), unpoison_val x = LOAD p
|
||||
* // rest of vmalloc process <data dependency>
|
||||
* STORE p, a LOAD shadow(x+99)
|
||||
*
|
||||
* If there is no barrier between the end of unpoisioning the shadow
|
||||
* and the store of the result to p, the stores could be committed
|
||||
* in a different order by CPU#0, and CPU#1 could erroneously observe
|
||||
* poison in the shadow.
|
||||
*
|
||||
* We need some sort of barrier between the stores.
|
||||
*
|
||||
* In the vmalloc() case, this is provided by a smp_wmb() in
|
||||
* clear_vm_uninitialized_flag(). In the per-cpu allocator and in
|
||||
* get_vm_area() and friends, the caller gets shadow allocated but
|
||||
* doesn't have any pages mapped into the virtual address space that
|
||||
* has been reserved. Mapping those pages in will involve taking and
|
||||
* releasing a page-table lock, which will provide the barrier.
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Poison the shadow for a vmalloc region. Called as part of the
|
||||
* freeing process at the time the region is freed.
|
||||
*/
|
||||
void kasan_poison_vmalloc(const void *start, unsigned long size)
|
||||
{
|
||||
if (!is_vmalloc_or_module_addr(start))
|
||||
return;
|
||||
|
||||
size = round_up(size, KASAN_GRANULE_SIZE);
|
||||
poison_range(start, size, KASAN_VMALLOC_INVALID);
|
||||
}
|
||||
|
||||
void kasan_unpoison_vmalloc(const void *start, unsigned long size)
|
||||
{
|
||||
if (!is_vmalloc_or_module_addr(start))
|
||||
return;
|
||||
|
||||
unpoison_range(start, size);
|
||||
}
|
||||
|
||||
static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
|
||||
void *unused)
|
||||
{
|
||||
unsigned long page;
|
||||
|
||||
page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT);
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
|
||||
if (likely(!pte_none(*ptep))) {
|
||||
pte_clear(&init_mm, addr, ptep);
|
||||
free_page(page);
|
||||
}
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Release the backing for the vmalloc region [start, end), which
|
||||
* lies within the free region [free_region_start, free_region_end).
|
||||
*
|
||||
* This can be run lazily, long after the region was freed. It runs
|
||||
* under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap
|
||||
* infrastructure.
|
||||
*
|
||||
* How does this work?
|
||||
* -------------------
|
||||
*
|
||||
* We have a region that is page aligned, labelled as A.
|
||||
* That might not map onto the shadow in a way that is page-aligned:
|
||||
*
|
||||
* start end
|
||||
* v v
|
||||
* |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc
|
||||
* -------- -------- -------- -------- --------
|
||||
* | | | | |
|
||||
* | | | /-------/ |
|
||||
* \-------\|/------/ |/---------------/
|
||||
* ||| ||
|
||||
* |??AAAAAA|AAAAAAAA|AA??????| < shadow
|
||||
* (1) (2) (3)
|
||||
*
|
||||
* First we align the start upwards and the end downwards, so that the
|
||||
* shadow of the region aligns with shadow page boundaries. In the
|
||||
* example, this gives us the shadow page (2). This is the shadow entirely
|
||||
* covered by this allocation.
|
||||
*
|
||||
* Then we have the tricky bits. We want to know if we can free the
|
||||
* partially covered shadow pages - (1) and (3) in the example. For this,
|
||||
* we are given the start and end of the free region that contains this
|
||||
* allocation. Extending our previous example, we could have:
|
||||
*
|
||||
* free_region_start free_region_end
|
||||
* | start end |
|
||||
* v v v v
|
||||
* |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc
|
||||
* -------- -------- -------- -------- --------
|
||||
* | | | | |
|
||||
* | | | /-------/ |
|
||||
* \-------\|/------/ |/---------------/
|
||||
* ||| ||
|
||||
* |FFAAAAAA|AAAAAAAA|AAF?????| < shadow
|
||||
* (1) (2) (3)
|
||||
*
|
||||
* Once again, we align the start of the free region up, and the end of
|
||||
* the free region down so that the shadow is page aligned. So we can free
|
||||
* page (1) - we know no allocation currently uses anything in that page,
|
||||
* because all of it is in the vmalloc free region. But we cannot free
|
||||
* page (3), because we can't be sure that the rest of it is unused.
|
||||
*
|
||||
* We only consider pages that contain part of the original region for
|
||||
* freeing: we don't try to free other pages from the free region or we'd
|
||||
* end up trying to free huge chunks of virtual address space.
|
||||
*
|
||||
* Concurrency
|
||||
* -----------
|
||||
*
|
||||
* How do we know that we're not freeing a page that is simultaneously
|
||||
* being used for a fresh allocation in kasan_populate_vmalloc(_pte)?
|
||||
*
|
||||
* We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running
|
||||
* at the same time. While we run under free_vmap_area_lock, the population
|
||||
* code does not.
|
||||
*
|
||||
* free_vmap_area_lock instead operates to ensure that the larger range
|
||||
* [free_region_start, free_region_end) is safe: because __alloc_vmap_area and
|
||||
* the per-cpu region-finding algorithm both run under free_vmap_area_lock,
|
||||
* no space identified as free will become used while we are running. This
|
||||
* means that so long as we are careful with alignment and only free shadow
|
||||
* pages entirely covered by the free region, we will not run in to any
|
||||
* trouble - any simultaneous allocations will be for disjoint regions.
|
||||
*/
|
||||
void kasan_release_vmalloc(unsigned long start, unsigned long end,
|
||||
unsigned long free_region_start,
|
||||
unsigned long free_region_end)
|
||||
{
|
||||
void *shadow_start, *shadow_end;
|
||||
unsigned long region_start, region_end;
|
||||
unsigned long size;
|
||||
|
||||
region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
|
||||
region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
|
||||
|
||||
free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE);
|
||||
|
||||
if (start != region_start &&
|
||||
free_region_start < region_start)
|
||||
region_start -= KASAN_MEMORY_PER_SHADOW_PAGE;
|
||||
|
||||
free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE);
|
||||
|
||||
if (end != region_end &&
|
||||
free_region_end > region_end)
|
||||
region_end += KASAN_MEMORY_PER_SHADOW_PAGE;
|
||||
|
||||
shadow_start = kasan_mem_to_shadow((void *)region_start);
|
||||
shadow_end = kasan_mem_to_shadow((void *)region_end);
|
||||
|
||||
if (shadow_end > shadow_start) {
|
||||
size = shadow_end - shadow_start;
|
||||
apply_to_existing_page_range(&init_mm,
|
||||
(unsigned long)shadow_start,
|
||||
size, kasan_depopulate_vmalloc_pte,
|
||||
NULL);
|
||||
flush_tlb_kernel_range((unsigned long)shadow_start,
|
||||
(unsigned long)shadow_end);
|
||||
}
|
||||
}
|
||||
|
||||
#else /* CONFIG_KASAN_VMALLOC */
|
||||
|
||||
int kasan_module_alloc(void *addr, size_t size)
|
||||
{
|
||||
void *ret;
|
||||
size_t scaled_size;
|
||||
size_t shadow_size;
|
||||
unsigned long shadow_start;
|
||||
|
||||
shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
|
||||
scaled_size = (size + KASAN_GRANULE_SIZE - 1) >>
|
||||
KASAN_SHADOW_SCALE_SHIFT;
|
||||
shadow_size = round_up(scaled_size, PAGE_SIZE);
|
||||
|
||||
if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
|
||||
return -EINVAL;
|
||||
|
||||
ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
|
||||
shadow_start + shadow_size,
|
||||
GFP_KERNEL,
|
||||
PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
|
||||
if (ret) {
|
||||
__memset(ret, KASAN_SHADOW_INIT, shadow_size);
|
||||
find_vm_area(addr)->flags |= VM_KASAN;
|
||||
kmemleak_ignore(ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void kasan_free_shadow(const struct vm_struct *vm)
|
||||
{
|
||||
if (vm->flags & VM_KASAN)
|
||||
vfree(kasan_mem_to_shadow(vm->addr));
|
||||
}
|
||||
|
||||
#endif
|
@ -1,17 +1,12 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* This file contains core tag-based KASAN code.
|
||||
* This file contains core software tag-based KASAN code.
|
||||
*
|
||||
* Copyright (c) 2018 Google, Inc.
|
||||
* Author: Andrey Konovalov <andreyknvl@google.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
#define pr_fmt(fmt) "kasan: " fmt
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/interrupt.h>
|
||||
@ -40,12 +35,14 @@
|
||||
|
||||
static DEFINE_PER_CPU(u32, prng_state);
|
||||
|
||||
void kasan_init_tags(void)
|
||||
void __init kasan_init_sw_tags(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
per_cpu(prng_state, cpu) = (u32)get_cycles();
|
||||
|
||||
pr_info("KernelAddressSanitizer initialized\n");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -70,11 +67,6 @@ u8 random_tag(void)
|
||||
return (u8)(state % (KASAN_TAG_MAX + 1));
|
||||
}
|
||||
|
||||
void *kasan_reset_tag(const void *addr)
|
||||
{
|
||||
return reset_tag(addr);
|
||||
}
|
||||
|
||||
bool check_memory_region(unsigned long addr, size_t size, bool write,
|
||||
unsigned long ret_ip)
|
||||
{
|
||||
@ -110,7 +102,7 @@ bool check_memory_region(unsigned long addr, size_t size, bool write,
|
||||
if (tag == KASAN_TAG_KERNEL)
|
||||
return true;
|
||||
|
||||
untagged_addr = reset_tag((const void *)addr);
|
||||
untagged_addr = kasan_reset_tag((const void *)addr);
|
||||
if (unlikely(untagged_addr <
|
||||
kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
|
||||
return !kasan_report(addr, size, write, ret_ip);
|
||||
@ -126,6 +118,15 @@ bool check_memory_region(unsigned long addr, size_t size, bool write,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool check_invalid_free(void *addr)
|
||||
{
|
||||
u8 tag = get_tag(addr);
|
||||
u8 shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(kasan_reset_tag(addr)));
|
||||
|
||||
return (shadow_byte == KASAN_TAG_INVALID) ||
|
||||
(tag != KASAN_TAG_KERNEL && tag != shadow_byte);
|
||||
}
|
||||
|
||||
#define DEFINE_HWASAN_LOAD_STORE(size) \
|
||||
void __hwasan_load##size##_noabort(unsigned long addr) \
|
||||
{ \
|
||||
@ -158,7 +159,7 @@ EXPORT_SYMBOL(__hwasan_storeN_noabort);
|
||||
|
||||
void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size)
|
||||
{
|
||||
kasan_poison_shadow((void *)addr, size, tag);
|
||||
poison_range((void *)addr, size, tag);
|
||||
}
|
||||
EXPORT_SYMBOL(__hwasan_tag_memory);
|
||||
|
||||
@ -168,7 +169,9 @@ void kasan_set_free_info(struct kmem_cache *cache,
|
||||
struct kasan_alloc_meta *alloc_meta;
|
||||
u8 idx = 0;
|
||||
|
||||
alloc_meta = get_alloc_info(cache, object);
|
||||
alloc_meta = kasan_get_alloc_meta(cache, object);
|
||||
if (!alloc_meta)
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
|
||||
idx = alloc_meta->free_track_idx;
|
||||
@ -185,7 +188,9 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
|
||||
struct kasan_alloc_meta *alloc_meta;
|
||||
int i = 0;
|
||||
|
||||
alloc_meta = get_alloc_info(cache, object);
|
||||
alloc_meta = kasan_get_alloc_meta(cache, object);
|
||||
if (!alloc_meta)
|
||||
return NULL;
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
|
||||
for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
|
@ -104,7 +104,7 @@ static inline void poison_element(mempool_t *pool, void *element)
|
||||
static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
|
||||
{
|
||||
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
|
||||
kasan_poison_kfree(element, _RET_IP_);
|
||||
kasan_slab_free_mempool(element, _RET_IP_);
|
||||
else if (pool->alloc == mempool_alloc_pages)
|
||||
kasan_free_pages(element, (unsigned long)pool->pool_data);
|
||||
}
|
||||
@ -112,7 +112,7 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
|
||||
static void kasan_unpoison_element(mempool_t *pool, void *element)
|
||||
{
|
||||
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
|
||||
kasan_unpoison_slab(element);
|
||||
kasan_unpoison_range(element, __ksize(element));
|
||||
else if (pool->alloc == mempool_alloc_pages)
|
||||
kasan_alloc_pages(element, (unsigned long)pool->pool_data);
|
||||
}
|
||||
|
@ -1204,8 +1204,10 @@ static void kernel_init_free_pages(struct page *page, int numpages)
|
||||
|
||||
/* s390's use of memset() could override KASAN redzones. */
|
||||
kasan_disable_current();
|
||||
for (i = 0; i < numpages; i++)
|
||||
for (i = 0; i < numpages; i++) {
|
||||
page_kasan_tag_reset(page + i);
|
||||
clear_highpage(page + i);
|
||||
}
|
||||
kasan_enable_current();
|
||||
}
|
||||
|
||||
@ -7671,6 +7673,11 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char
|
||||
* alias for the memset().
|
||||
*/
|
||||
direct_map_addr = page_address(page);
|
||||
/*
|
||||
* Perform a kasan-unchecked memset() since this memory
|
||||
* has not been initialized.
|
||||
*/
|
||||
direct_map_addr = kasan_reset_tag(direct_map_addr);
|
||||
if ((unsigned int)poison <= 0xFF)
|
||||
memset(direct_map_addr, poison, PAGE_SIZE);
|
||||
|
||||
|
@ -25,7 +25,7 @@ static void poison_page(struct page *page)
|
||||
|
||||
/* KASAN still think the page is in-use, so skip it. */
|
||||
kasan_disable_current();
|
||||
memset(addr, PAGE_POISON, PAGE_SIZE);
|
||||
memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE);
|
||||
kasan_enable_current();
|
||||
kunmap_atomic(addr);
|
||||
}
|
||||
|
13
mm/ptdump.c
13
mm/ptdump.c
@ -4,7 +4,7 @@
|
||||
#include <linux/ptdump.h>
|
||||
#include <linux/kasan.h>
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
/*
|
||||
* This is an optimization for KASAN=y case. Since all kasan page tables
|
||||
* eventually point to the kasan_early_shadow_page we could call note_page()
|
||||
@ -31,7 +31,8 @@ static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr,
|
||||
struct ptdump_state *st = walk->private;
|
||||
pgd_t val = READ_ONCE(*pgd);
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS > 4 && defined(CONFIG_KASAN)
|
||||
#if CONFIG_PGTABLE_LEVELS > 4 && \
|
||||
(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
|
||||
if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d)))
|
||||
return note_kasan_page_table(walk, addr);
|
||||
#endif
|
||||
@ -51,7 +52,8 @@ static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr,
|
||||
struct ptdump_state *st = walk->private;
|
||||
p4d_t val = READ_ONCE(*p4d);
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS > 3 && defined(CONFIG_KASAN)
|
||||
#if CONFIG_PGTABLE_LEVELS > 3 && \
|
||||
(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
|
||||
if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud)))
|
||||
return note_kasan_page_table(walk, addr);
|
||||
#endif
|
||||
@ -71,7 +73,8 @@ static int ptdump_pud_entry(pud_t *pud, unsigned long addr,
|
||||
struct ptdump_state *st = walk->private;
|
||||
pud_t val = READ_ONCE(*pud);
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_KASAN)
|
||||
#if CONFIG_PGTABLE_LEVELS > 2 && \
|
||||
(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
|
||||
if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd)))
|
||||
return note_kasan_page_table(walk, addr);
|
||||
#endif
|
||||
@ -91,7 +94,7 @@ static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
struct ptdump_state *st = walk->private;
|
||||
pmd_t val = READ_ONCE(*pmd);
|
||||
|
||||
#if defined(CONFIG_KASAN)
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte)))
|
||||
return note_kasan_page_table(walk, addr);
|
||||
#endif
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/page.h>
|
||||
@ -53,7 +54,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
|
||||
*/
|
||||
#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
|
||||
SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
|
||||
SLAB_FAILSLAB | SLAB_KASAN)
|
||||
SLAB_FAILSLAB | kasan_never_merge())
|
||||
|
||||
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
|
||||
SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
|
||||
@ -1176,7 +1177,7 @@ size_t ksize(const void *objp)
|
||||
* We assume that ksize callers could use whole allocated area,
|
||||
* so we need to unpoison this area.
|
||||
*/
|
||||
kasan_unpoison_shadow(objp, size);
|
||||
kasan_unpoison_range(objp, size);
|
||||
return size;
|
||||
}
|
||||
EXPORT_SYMBOL(ksize);
|
||||
|
29
mm/slub.c
29
mm/slub.c
@ -249,7 +249,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
|
||||
{
|
||||
#ifdef CONFIG_SLAB_FREELIST_HARDENED
|
||||
/*
|
||||
* When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
|
||||
* When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged.
|
||||
* Normally, this doesn't cause any issues, as both set_freepointer()
|
||||
* and get_freepointer() are called with a pointer with the same tag.
|
||||
* However, there are some issues with CONFIG_SLUB_DEBUG code. For
|
||||
@ -275,6 +275,7 @@ static inline void *freelist_dereference(const struct kmem_cache *s,
|
||||
|
||||
static inline void *get_freepointer(struct kmem_cache *s, void *object)
|
||||
{
|
||||
object = kasan_reset_tag(object);
|
||||
return freelist_dereference(s, object + s->offset);
|
||||
}
|
||||
|
||||
@ -304,6 +305,7 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
|
||||
BUG_ON(object == fp); /* naive detection of double free or corruption */
|
||||
#endif
|
||||
|
||||
freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
|
||||
*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
|
||||
}
|
||||
|
||||
@ -538,8 +540,8 @@ static void print_section(char *level, char *text, u8 *addr,
|
||||
unsigned int length)
|
||||
{
|
||||
metadata_access_enable();
|
||||
print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
|
||||
length, 1);
|
||||
print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS,
|
||||
16, 1, addr, length, 1);
|
||||
metadata_access_disable();
|
||||
}
|
||||
|
||||
@ -570,7 +572,7 @@ static struct track *get_track(struct kmem_cache *s, void *object,
|
||||
|
||||
p = object + get_info_end(s);
|
||||
|
||||
return p + alloc;
|
||||
return kasan_reset_tag(p + alloc);
|
||||
}
|
||||
|
||||
static void set_track(struct kmem_cache *s, void *object,
|
||||
@ -583,7 +585,8 @@ static void set_track(struct kmem_cache *s, void *object,
|
||||
unsigned int nr_entries;
|
||||
|
||||
metadata_access_enable();
|
||||
nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3);
|
||||
nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
|
||||
TRACK_ADDRS_COUNT, 3);
|
||||
metadata_access_disable();
|
||||
|
||||
if (nr_entries < TRACK_ADDRS_COUNT)
|
||||
@ -747,7 +750,7 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
|
||||
|
||||
static void init_object(struct kmem_cache *s, void *object, u8 val)
|
||||
{
|
||||
u8 *p = object;
|
||||
u8 *p = kasan_reset_tag(object);
|
||||
|
||||
if (s->flags & SLAB_RED_ZONE)
|
||||
memset(p - s->red_left_pad, val, s->red_left_pad);
|
||||
@ -777,7 +780,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
|
||||
u8 *addr = page_address(page);
|
||||
|
||||
metadata_access_enable();
|
||||
fault = memchr_inv(start, value, bytes);
|
||||
fault = memchr_inv(kasan_reset_tag(start), value, bytes);
|
||||
metadata_access_disable();
|
||||
if (!fault)
|
||||
return 1;
|
||||
@ -873,7 +876,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
|
||||
|
||||
pad = end - remainder;
|
||||
metadata_access_enable();
|
||||
fault = memchr_inv(pad, POISON_INUSE, remainder);
|
||||
fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
|
||||
metadata_access_disable();
|
||||
if (!fault)
|
||||
return 1;
|
||||
@ -1118,7 +1121,7 @@ void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
|
||||
return;
|
||||
|
||||
metadata_access_enable();
|
||||
memset(addr, POISON_INUSE, page_size(page));
|
||||
memset(kasan_reset_tag(addr), POISON_INUSE, page_size(page));
|
||||
metadata_access_disable();
|
||||
}
|
||||
|
||||
@ -1566,10 +1569,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
|
||||
* Clear the object and the metadata, but don't touch
|
||||
* the redzone.
|
||||
*/
|
||||
memset(object, 0, s->object_size);
|
||||
memset(kasan_reset_tag(object), 0, s->object_size);
|
||||
rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad
|
||||
: 0;
|
||||
memset((char *)object + s->inuse, 0,
|
||||
memset((char *)kasan_reset_tag(object) + s->inuse, 0,
|
||||
s->size - s->inuse - rsize);
|
||||
|
||||
}
|
||||
@ -2881,10 +2884,10 @@ redo:
|
||||
stat(s, ALLOC_FASTPATH);
|
||||
}
|
||||
|
||||
maybe_wipe_obj_freeptr(s, object);
|
||||
maybe_wipe_obj_freeptr(s, kasan_reset_tag(object));
|
||||
|
||||
if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
|
||||
memset(object, 0, s->object_size);
|
||||
memset(kasan_reset_tag(object), 0, s->object_size);
|
||||
|
||||
slab_post_alloc_hook(s, objcg, gfpflags, 1, &object);
|
||||
|
||||
|
@ -148,10 +148,12 @@ endif
|
||||
# we don't want to check (depends on variables KASAN_SANITIZE_obj.o, KASAN_SANITIZE)
|
||||
#
|
||||
ifeq ($(CONFIG_KASAN),y)
|
||||
ifneq ($(CONFIG_KASAN_HW_TAGS),y)
|
||||
_c_flags += $(if $(patsubst n%,, \
|
||||
$(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \
|
||||
$(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE))
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_UBSAN),y)
|
||||
_c_flags += $(if $(patsubst n%,, \
|
||||
|
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Copyright (C) 2020 ARM Limited
|
||||
|
||||
CFLAGS += -std=gnu99 -I.
|
||||
CFLAGS += -std=gnu99 -I. -lpthread
|
||||
SRCS := $(filter-out mte_common_util.c,$(wildcard *.c))
|
||||
PROGS := $(patsubst %.c,%,$(SRCS))
|
||||
|
||||
|
154
tools/testing/selftests/arm64/mte/check_gcr_el1_cswitch.c
Normal file
154
tools/testing/selftests/arm64/mte/check_gcr_el1_cswitch.c
Normal file
@ -0,0 +1,154 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2020 ARM Limited
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/auxv.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/prctl.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
|
||||
#include "kselftest.h"
|
||||
#include "mte_common_util.h"
|
||||
|
||||
#define PR_SET_TAGGED_ADDR_CTRL 55
|
||||
#define PR_GET_TAGGED_ADDR_CTRL 56
|
||||
# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
|
||||
# define PR_MTE_TCF_SHIFT 1
|
||||
# define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
|
||||
# define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
|
||||
# define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
|
||||
# define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
|
||||
# define PR_MTE_TAG_SHIFT 3
|
||||
# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
|
||||
|
||||
#include "mte_def.h"
|
||||
|
||||
#define NUM_ITERATIONS 1024
|
||||
#define MAX_THREADS 5
|
||||
#define THREAD_ITERATIONS 1000
|
||||
|
||||
void *execute_thread(void *x)
|
||||
{
|
||||
pid_t pid = *((pid_t *)x);
|
||||
pid_t tid = gettid();
|
||||
uint64_t prctl_tag_mask;
|
||||
uint64_t prctl_set;
|
||||
uint64_t prctl_get;
|
||||
uint64_t prctl_tcf;
|
||||
|
||||
srand(time(NULL) ^ (pid << 16) ^ (tid << 16));
|
||||
|
||||
prctl_tag_mask = rand() & 0xffff;
|
||||
|
||||
if (prctl_tag_mask % 2)
|
||||
prctl_tcf = PR_MTE_TCF_SYNC;
|
||||
else
|
||||
prctl_tcf = PR_MTE_TCF_ASYNC;
|
||||
|
||||
prctl_set = PR_TAGGED_ADDR_ENABLE | prctl_tcf | (prctl_tag_mask << PR_MTE_TAG_SHIFT);
|
||||
|
||||
for (int j = 0; j < THREAD_ITERATIONS; j++) {
|
||||
if (prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_set, 0, 0, 0)) {
|
||||
perror("prctl() failed");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
prctl_get = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
|
||||
|
||||
if (prctl_set != prctl_get) {
|
||||
ksft_print_msg("Error: prctl_set: 0x%lx != prctl_get: 0x%lx\n",
|
||||
prctl_set, prctl_get);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
return (void *)KSFT_PASS;
|
||||
|
||||
fail:
|
||||
return (void *)KSFT_FAIL;
|
||||
}
|
||||
|
||||
int execute_test(pid_t pid)
|
||||
{
|
||||
pthread_t thread_id[MAX_THREADS];
|
||||
int thread_data[MAX_THREADS];
|
||||
|
||||
for (int i = 0; i < MAX_THREADS; i++)
|
||||
pthread_create(&thread_id[i], NULL,
|
||||
execute_thread, (void *)&pid);
|
||||
|
||||
for (int i = 0; i < MAX_THREADS; i++)
|
||||
pthread_join(thread_id[i], (void *)&thread_data[i]);
|
||||
|
||||
for (int i = 0; i < MAX_THREADS; i++)
|
||||
if (thread_data[i] == KSFT_FAIL)
|
||||
return KSFT_FAIL;
|
||||
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
int mte_gcr_fork_test(void)
|
||||
{
|
||||
pid_t pid;
|
||||
int results[NUM_ITERATIONS];
|
||||
pid_t cpid;
|
||||
int res;
|
||||
|
||||
for (int i = 0; i < NUM_ITERATIONS; i++) {
|
||||
pid = fork();
|
||||
|
||||
if (pid < 0)
|
||||
return KSFT_FAIL;
|
||||
|
||||
if (pid == 0) {
|
||||
cpid = getpid();
|
||||
|
||||
res = execute_test(cpid);
|
||||
|
||||
exit(res);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < NUM_ITERATIONS; i++) {
|
||||
wait(&res);
|
||||
|
||||
if (WIFEXITED(res))
|
||||
results[i] = WEXITSTATUS(res);
|
||||
else
|
||||
--i;
|
||||
}
|
||||
|
||||
for (int i = 0; i < NUM_ITERATIONS; i++)
|
||||
if (results[i] == KSFT_FAIL)
|
||||
return KSFT_FAIL;
|
||||
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mte_default_setup();
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ksft_set_plan(1);
|
||||
|
||||
evaluate_test(mte_gcr_fork_test(),
|
||||
"Verify that GCR_EL1 is set correctly on context switch\n");
|
||||
|
||||
mte_restore_setup();
|
||||
ksft_print_cnts();
|
||||
|
||||
return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
|
||||
}
|
Loading…
Reference in New Issue
Block a user