2022-01-14 22:06:37 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2021, Google LLC.
|
|
|
|
* Pasha Tatashin <pasha.tatashin@soleen.com>
|
|
|
|
*/
|
2022-11-01 21:14:09 +00:00
|
|
|
#include <linux/kstrtox.h>
|
2022-01-14 22:06:37 +00:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/page_table_check.h>
|
mm/page_table_check: support userfault wr-protect entries
Allow page_table_check hooks to check over userfaultfd wr-protect criteria
upon pgtable updates. The rule is no co-existance allowed for any
writable flag against userfault wr-protect flag.
This should be better than c2da319c2e, where we used to only sanitize such
issues during a pgtable walk, but when hitting such issue we don't have a
good chance to know where does that writable bit came from [1], so that
even the pgtable walk exposes a kernel bug (which is still helpful on
triaging) but not easy to track and debug.
Now we switch to track the source. It's much easier too with the recent
introduction of page table check.
There are some limitations with using the page table check here for
userfaultfd wr-protect purpose:
- It is only enabled with explicit enablement of page table check configs
and/or boot parameters, but should be good enough to track at least
syzbot issues, as syzbot should enable PAGE_TABLE_CHECK[_ENFORCED] for
x86 [1]. We used to have DEBUG_VM but it's now off for most distros,
while distros also normally not enable PAGE_TABLE_CHECK[_ENFORCED], which
is similar.
- It conditionally works with the ptep_modify_prot API. It will be
bypassed when e.g. XEN PV is enabled, however still work for most of the
rest scenarios, which should be the common cases so should be good
enough.
- Hugetlb check is a bit hairy, as the page table check cannot identify
hugetlb pte or normal pte via trapping at set_pte_at(), because of the
current design where hugetlb maps every layers to pte_t... For example,
the default set_huge_pte_at() can invoke set_pte_at() directly and lose
the hugetlb context, treating it the same as a normal pte_t. So far it's
fine because we have huge_pte_uffd_wp() always equals to pte_uffd_wp() as
long as supported (x86 only). It'll be a bigger problem when we'll
define _PAGE_UFFD_WP differently at various pgtable levels, because then
one huge_pte_uffd_wp() per-arch will stop making sense first.. as of now
we can leave this for later too.
This patch also removes commit c2da319c2e altogether, as we have something
better now.
[1] https://lore.kernel.org/all/000000000000dce0530615c89210@google.com/
Link: https://lkml.kernel.org/r/20240417212549.2766883-1-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-04-17 21:25:49 +00:00
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/swapops.h>
|
2022-01-14 22:06:37 +00:00
|
|
|
|
|
|
|
#undef pr_fmt
|
|
|
|
#define pr_fmt(fmt) "page_table_check: " fmt
|
|
|
|
|
|
|
|
struct page_table_check {
|
|
|
|
atomic_t anon_map_count;
|
|
|
|
atomic_t file_map_count;
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool __page_table_check_enabled __initdata =
|
|
|
|
IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
|
|
|
|
|
|
|
|
DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
|
|
|
|
EXPORT_SYMBOL(page_table_check_disabled);
|
|
|
|
|
|
|
|
static int __init early_page_table_check_param(char *buf)
|
|
|
|
{
|
2022-11-01 21:14:09 +00:00
|
|
|
return kstrtobool(buf, &__page_table_check_enabled);
|
2022-01-14 22:06:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
early_param("page_table_check", early_page_table_check_param);
|
|
|
|
|
|
|
|
static bool __init need_page_table_check(void)
|
|
|
|
{
|
|
|
|
return __page_table_check_enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init init_page_table_check(void)
|
|
|
|
{
|
|
|
|
if (!__page_table_check_enabled)
|
|
|
|
return;
|
|
|
|
static_branch_disable(&page_table_check_disabled);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct page_ext_operations page_table_check_ops = {
|
|
|
|
.size = sizeof(struct page_table_check),
|
|
|
|
.need = need_page_table_check,
|
|
|
|
.init = init_page_table_check,
|
2023-01-13 15:42:53 +00:00
|
|
|
.need_shared_flags = false,
|
2022-01-14 22:06:37 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
|
|
|
|
{
|
|
|
|
BUG_ON(!page_ext);
|
2023-07-18 14:58:11 +00:00
|
|
|
return page_ext_data(page_ext, &page_table_check_ops);
|
2022-01-14 22:06:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-09-16 09:04:34 +00:00
|
|
|
* An entry is removed from the page table, decrement the counters for that page
|
2022-01-14 22:06:37 +00:00
|
|
|
* verify that it is of correct type and counters do not become negative.
|
|
|
|
*/
|
2023-07-13 17:26:29 +00:00
|
|
|
static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
|
2022-01-14 22:06:37 +00:00
|
|
|
{
|
|
|
|
struct page_ext *page_ext;
|
|
|
|
struct page *page;
|
2022-02-04 04:49:15 +00:00
|
|
|
unsigned long i;
|
2022-01-14 22:06:37 +00:00
|
|
|
bool anon;
|
|
|
|
|
|
|
|
if (!pfn_valid(pfn))
|
|
|
|
return;
|
|
|
|
|
|
|
|
page = pfn_to_page(pfn);
|
2022-08-18 13:50:00 +00:00
|
|
|
page_ext = page_ext_get(page);
|
2023-05-15 13:09:58 +00:00
|
|
|
|
2024-06-05 21:21:46 +00:00
|
|
|
if (!page_ext)
|
|
|
|
return;
|
|
|
|
|
2023-05-15 13:09:58 +00:00
|
|
|
BUG_ON(PageSlab(page));
|
2022-01-14 22:06:37 +00:00
|
|
|
anon = PageAnon(page);
|
|
|
|
|
|
|
|
for (i = 0; i < pgcnt; i++) {
|
|
|
|
struct page_table_check *ptc = get_page_table_check(page_ext);
|
|
|
|
|
|
|
|
if (anon) {
|
|
|
|
BUG_ON(atomic_read(&ptc->file_map_count));
|
|
|
|
BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
|
|
|
|
} else {
|
|
|
|
BUG_ON(atomic_read(&ptc->anon_map_count));
|
|
|
|
BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
|
|
|
|
}
|
|
|
|
page_ext = page_ext_next(page_ext);
|
|
|
|
}
|
2022-08-18 13:50:00 +00:00
|
|
|
page_ext_put(page_ext);
|
2022-01-14 22:06:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-09-16 09:04:34 +00:00
|
|
|
* A new entry is added to the page table, increment the counters for that page
|
2022-01-14 22:06:37 +00:00
|
|
|
* verify that it is of correct type and is not being mapped with a different
|
|
|
|
* type to a different process.
|
|
|
|
*/
|
2023-07-13 17:26:30 +00:00
|
|
|
static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
|
2022-01-14 22:06:37 +00:00
|
|
|
bool rw)
|
|
|
|
{
|
|
|
|
struct page_ext *page_ext;
|
|
|
|
struct page *page;
|
2022-02-04 04:49:15 +00:00
|
|
|
unsigned long i;
|
2022-01-14 22:06:37 +00:00
|
|
|
bool anon;
|
|
|
|
|
|
|
|
if (!pfn_valid(pfn))
|
|
|
|
return;
|
|
|
|
|
|
|
|
page = pfn_to_page(pfn);
|
2022-08-18 13:50:00 +00:00
|
|
|
page_ext = page_ext_get(page);
|
2023-05-15 13:09:58 +00:00
|
|
|
|
2024-06-05 21:21:46 +00:00
|
|
|
if (!page_ext)
|
|
|
|
return;
|
|
|
|
|
2023-05-15 13:09:58 +00:00
|
|
|
BUG_ON(PageSlab(page));
|
2022-01-14 22:06:37 +00:00
|
|
|
anon = PageAnon(page);
|
|
|
|
|
|
|
|
for (i = 0; i < pgcnt; i++) {
|
|
|
|
struct page_table_check *ptc = get_page_table_check(page_ext);
|
|
|
|
|
|
|
|
if (anon) {
|
|
|
|
BUG_ON(atomic_read(&ptc->file_map_count));
|
|
|
|
BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
|
|
|
|
} else {
|
|
|
|
BUG_ON(atomic_read(&ptc->anon_map_count));
|
|
|
|
BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
|
|
|
|
}
|
|
|
|
page_ext = page_ext_next(page_ext);
|
|
|
|
}
|
2022-08-18 13:50:00 +00:00
|
|
|
page_ext_put(page_ext);
|
2022-01-14 22:06:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* page is on free list, or is being allocated, verify that counters are zeroes
|
|
|
|
* crash if they are not.
|
|
|
|
*/
|
|
|
|
void __page_table_check_zero(struct page *page, unsigned int order)
|
|
|
|
{
|
2022-08-18 13:50:00 +00:00
|
|
|
struct page_ext *page_ext;
|
2022-02-04 04:49:15 +00:00
|
|
|
unsigned long i;
|
2022-01-14 22:06:37 +00:00
|
|
|
|
2023-05-15 13:09:58 +00:00
|
|
|
BUG_ON(PageSlab(page));
|
|
|
|
|
2022-08-18 13:50:00 +00:00
|
|
|
page_ext = page_ext_get(page);
|
2024-06-05 21:21:46 +00:00
|
|
|
|
|
|
|
if (!page_ext)
|
|
|
|
return;
|
|
|
|
|
2022-02-04 04:49:15 +00:00
|
|
|
for (i = 0; i < (1ul << order); i++) {
|
2022-01-14 22:06:37 +00:00
|
|
|
struct page_table_check *ptc = get_page_table_check(page_ext);
|
|
|
|
|
|
|
|
BUG_ON(atomic_read(&ptc->anon_map_count));
|
|
|
|
BUG_ON(atomic_read(&ptc->file_map_count));
|
|
|
|
page_ext = page_ext_next(page_ext);
|
|
|
|
}
|
2022-08-18 13:50:00 +00:00
|
|
|
page_ext_put(page_ext);
|
2022-01-14 22:06:37 +00:00
|
|
|
}
|
|
|
|
|
2023-07-13 17:26:31 +00:00
|
|
|
void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
|
2022-01-14 22:06:37 +00:00
|
|
|
{
|
|
|
|
if (&init_mm == mm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (pte_user_accessible_page(pte)) {
|
2023-07-13 17:26:29 +00:00
|
|
|
page_table_check_clear(pte_pfn(pte), PAGE_SIZE >> PAGE_SHIFT);
|
2022-01-14 22:06:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__page_table_check_pte_clear);
|
|
|
|
|
2023-07-13 17:26:32 +00:00
|
|
|
void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
|
2022-01-14 22:06:37 +00:00
|
|
|
{
|
|
|
|
if (&init_mm == mm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (pmd_user_accessible_page(pmd)) {
|
2023-07-13 17:26:29 +00:00
|
|
|
page_table_check_clear(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT);
|
2022-01-14 22:06:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__page_table_check_pmd_clear);
|
|
|
|
|
2023-07-13 17:26:33 +00:00
|
|
|
void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
|
2022-01-14 22:06:37 +00:00
|
|
|
{
|
|
|
|
if (&init_mm == mm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (pud_user_accessible_page(pud)) {
|
2023-07-13 17:26:29 +00:00
|
|
|
page_table_check_clear(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT);
|
2022-01-14 22:06:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__page_table_check_pud_clear);
|
|
|
|
|
mm/page_table_check: support userfault wr-protect entries
Allow page_table_check hooks to check over userfaultfd wr-protect criteria
upon pgtable updates. The rule is no co-existance allowed for any
writable flag against userfault wr-protect flag.
This should be better than c2da319c2e, where we used to only sanitize such
issues during a pgtable walk, but when hitting such issue we don't have a
good chance to know where does that writable bit came from [1], so that
even the pgtable walk exposes a kernel bug (which is still helpful on
triaging) but not easy to track and debug.
Now we switch to track the source. It's much easier too with the recent
introduction of page table check.
There are some limitations with using the page table check here for
userfaultfd wr-protect purpose:
- It is only enabled with explicit enablement of page table check configs
and/or boot parameters, but should be good enough to track at least
syzbot issues, as syzbot should enable PAGE_TABLE_CHECK[_ENFORCED] for
x86 [1]. We used to have DEBUG_VM but it's now off for most distros,
while distros also normally not enable PAGE_TABLE_CHECK[_ENFORCED], which
is similar.
- It conditionally works with the ptep_modify_prot API. It will be
bypassed when e.g. XEN PV is enabled, however still work for most of the
rest scenarios, which should be the common cases so should be good
enough.
- Hugetlb check is a bit hairy, as the page table check cannot identify
hugetlb pte or normal pte via trapping at set_pte_at(), because of the
current design where hugetlb maps every layers to pte_t... For example,
the default set_huge_pte_at() can invoke set_pte_at() directly and lose
the hugetlb context, treating it the same as a normal pte_t. So far it's
fine because we have huge_pte_uffd_wp() always equals to pte_uffd_wp() as
long as supported (x86 only). It'll be a bigger problem when we'll
define _PAGE_UFFD_WP differently at various pgtable levels, because then
one huge_pte_uffd_wp() per-arch will stop making sense first.. as of now
we can leave this for later too.
This patch also removes commit c2da319c2e altogether, as we have something
better now.
[1] https://lore.kernel.org/all/000000000000dce0530615c89210@google.com/
Link: https://lkml.kernel.org/r/20240417212549.2766883-1-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-04-17 21:25:49 +00:00
|
|
|
/* Whether the swap entry cached writable information */
|
|
|
|
static inline bool swap_cached_writable(swp_entry_t entry)
|
|
|
|
{
|
|
|
|
return is_writable_device_exclusive_entry(entry) ||
|
|
|
|
is_writable_device_private_entry(entry) ||
|
|
|
|
is_writable_migration_entry(entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void page_table_check_pte_flags(pte_t pte)
|
|
|
|
{
|
|
|
|
if (pte_present(pte) && pte_uffd_wp(pte))
|
|
|
|
WARN_ON_ONCE(pte_write(pte));
|
|
|
|
else if (is_swap_pte(pte) && pte_swp_uffd_wp(pte))
|
|
|
|
WARN_ON_ONCE(swap_cached_writable(pte_to_swp_entry(pte)));
|
|
|
|
}
|
|
|
|
|
2023-08-02 15:13:30 +00:00
|
|
|
void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
|
|
|
|
unsigned int nr)
|
2022-01-14 22:06:37 +00:00
|
|
|
{
|
2023-08-02 15:13:30 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
2022-01-14 22:06:37 +00:00
|
|
|
if (&init_mm == mm)
|
|
|
|
return;
|
|
|
|
|
mm/page_table_check: support userfault wr-protect entries
Allow page_table_check hooks to check over userfaultfd wr-protect criteria
upon pgtable updates. The rule is no co-existance allowed for any
writable flag against userfault wr-protect flag.
This should be better than c2da319c2e, where we used to only sanitize such
issues during a pgtable walk, but when hitting such issue we don't have a
good chance to know where does that writable bit came from [1], so that
even the pgtable walk exposes a kernel bug (which is still helpful on
triaging) but not easy to track and debug.
Now we switch to track the source. It's much easier too with the recent
introduction of page table check.
There are some limitations with using the page table check here for
userfaultfd wr-protect purpose:
- It is only enabled with explicit enablement of page table check configs
and/or boot parameters, but should be good enough to track at least
syzbot issues, as syzbot should enable PAGE_TABLE_CHECK[_ENFORCED] for
x86 [1]. We used to have DEBUG_VM but it's now off for most distros,
while distros also normally not enable PAGE_TABLE_CHECK[_ENFORCED], which
is similar.
- It conditionally works with the ptep_modify_prot API. It will be
bypassed when e.g. XEN PV is enabled, however still work for most of the
rest scenarios, which should be the common cases so should be good
enough.
- Hugetlb check is a bit hairy, as the page table check cannot identify
hugetlb pte or normal pte via trapping at set_pte_at(), because of the
current design where hugetlb maps every layers to pte_t... For example,
the default set_huge_pte_at() can invoke set_pte_at() directly and lose
the hugetlb context, treating it the same as a normal pte_t. So far it's
fine because we have huge_pte_uffd_wp() always equals to pte_uffd_wp() as
long as supported (x86 only). It'll be a bigger problem when we'll
define _PAGE_UFFD_WP differently at various pgtable levels, because then
one huge_pte_uffd_wp() per-arch will stop making sense first.. as of now
we can leave this for later too.
This patch also removes commit c2da319c2e altogether, as we have something
better now.
[1] https://lore.kernel.org/all/000000000000dce0530615c89210@google.com/
Link: https://lkml.kernel.org/r/20240417212549.2766883-1-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-04-17 21:25:49 +00:00
|
|
|
page_table_check_pte_flags(pte);
|
|
|
|
|
2023-08-02 15:13:30 +00:00
|
|
|
for (i = 0; i < nr; i++)
|
|
|
|
__page_table_check_pte_clear(mm, ptep_get(ptep + i));
|
|
|
|
if (pte_user_accessible_page(pte))
|
|
|
|
page_table_check_set(pte_pfn(pte), nr, pte_write(pte));
|
2022-01-14 22:06:37 +00:00
|
|
|
}
|
2023-08-02 15:13:30 +00:00
|
|
|
EXPORT_SYMBOL(__page_table_check_ptes_set);
|
2022-01-14 22:06:37 +00:00
|
|
|
|
mm/page_table_check: support userfault wr-protect entries
Allow page_table_check hooks to check over userfaultfd wr-protect criteria
upon pgtable updates. The rule is no co-existance allowed for any
writable flag against userfault wr-protect flag.
This should be better than c2da319c2e, where we used to only sanitize such
issues during a pgtable walk, but when hitting such issue we don't have a
good chance to know where does that writable bit came from [1], so that
even the pgtable walk exposes a kernel bug (which is still helpful on
triaging) but not easy to track and debug.
Now we switch to track the source. It's much easier too with the recent
introduction of page table check.
There are some limitations with using the page table check here for
userfaultfd wr-protect purpose:
- It is only enabled with explicit enablement of page table check configs
and/or boot parameters, but should be good enough to track at least
syzbot issues, as syzbot should enable PAGE_TABLE_CHECK[_ENFORCED] for
x86 [1]. We used to have DEBUG_VM but it's now off for most distros,
while distros also normally not enable PAGE_TABLE_CHECK[_ENFORCED], which
is similar.
- It conditionally works with the ptep_modify_prot API. It will be
bypassed when e.g. XEN PV is enabled, however still work for most of the
rest scenarios, which should be the common cases so should be good
enough.
- Hugetlb check is a bit hairy, as the page table check cannot identify
hugetlb pte or normal pte via trapping at set_pte_at(), because of the
current design where hugetlb maps every layers to pte_t... For example,
the default set_huge_pte_at() can invoke set_pte_at() directly and lose
the hugetlb context, treating it the same as a normal pte_t. So far it's
fine because we have huge_pte_uffd_wp() always equals to pte_uffd_wp() as
long as supported (x86 only). It'll be a bigger problem when we'll
define _PAGE_UFFD_WP differently at various pgtable levels, because then
one huge_pte_uffd_wp() per-arch will stop making sense first.. as of now
we can leave this for later too.
This patch also removes commit c2da319c2e altogether, as we have something
better now.
[1] https://lore.kernel.org/all/000000000000dce0530615c89210@google.com/
Link: https://lkml.kernel.org/r/20240417212549.2766883-1-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-04-17 21:25:49 +00:00
|
|
|
static inline void page_table_check_pmd_flags(pmd_t pmd)
|
|
|
|
{
|
|
|
|
if (pmd_present(pmd) && pmd_uffd_wp(pmd))
|
|
|
|
WARN_ON_ONCE(pmd_write(pmd));
|
|
|
|
else if (is_swap_pmd(pmd) && pmd_swp_uffd_wp(pmd))
|
|
|
|
WARN_ON_ONCE(swap_cached_writable(pmd_to_swp_entry(pmd)));
|
|
|
|
}
|
|
|
|
|
2023-07-13 17:26:35 +00:00
|
|
|
void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd)
|
2022-01-14 22:06:37 +00:00
|
|
|
{
|
|
|
|
if (&init_mm == mm)
|
|
|
|
return;
|
|
|
|
|
mm/page_table_check: support userfault wr-protect entries
Allow page_table_check hooks to check over userfaultfd wr-protect criteria
upon pgtable updates. The rule is no co-existance allowed for any
writable flag against userfault wr-protect flag.
This should be better than c2da319c2e, where we used to only sanitize such
issues during a pgtable walk, but when hitting such issue we don't have a
good chance to know where does that writable bit came from [1], so that
even the pgtable walk exposes a kernel bug (which is still helpful on
triaging) but not easy to track and debug.
Now we switch to track the source. It's much easier too with the recent
introduction of page table check.
There are some limitations with using the page table check here for
userfaultfd wr-protect purpose:
- It is only enabled with explicit enablement of page table check configs
and/or boot parameters, but should be good enough to track at least
syzbot issues, as syzbot should enable PAGE_TABLE_CHECK[_ENFORCED] for
x86 [1]. We used to have DEBUG_VM but it's now off for most distros,
while distros also normally not enable PAGE_TABLE_CHECK[_ENFORCED], which
is similar.
- It conditionally works with the ptep_modify_prot API. It will be
bypassed when e.g. XEN PV is enabled, however still work for most of the
rest scenarios, which should be the common cases so should be good
enough.
- Hugetlb check is a bit hairy, as the page table check cannot identify
hugetlb pte or normal pte via trapping at set_pte_at(), because of the
current design where hugetlb maps every layers to pte_t... For example,
the default set_huge_pte_at() can invoke set_pte_at() directly and lose
the hugetlb context, treating it the same as a normal pte_t. So far it's
fine because we have huge_pte_uffd_wp() always equals to pte_uffd_wp() as
long as supported (x86 only). It'll be a bigger problem when we'll
define _PAGE_UFFD_WP differently at various pgtable levels, because then
one huge_pte_uffd_wp() per-arch will stop making sense first.. as of now
we can leave this for later too.
This patch also removes commit c2da319c2e altogether, as we have something
better now.
[1] https://lore.kernel.org/all/000000000000dce0530615c89210@google.com/
Link: https://lkml.kernel.org/r/20240417212549.2766883-1-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-04-17 21:25:49 +00:00
|
|
|
page_table_check_pmd_flags(pmd);
|
|
|
|
|
2023-07-13 17:26:32 +00:00
|
|
|
__page_table_check_pmd_clear(mm, *pmdp);
|
2022-01-14 22:06:37 +00:00
|
|
|
if (pmd_user_accessible_page(pmd)) {
|
2023-07-13 17:26:30 +00:00
|
|
|
page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT,
|
2022-01-14 22:06:37 +00:00
|
|
|
pmd_write(pmd));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__page_table_check_pmd_set);
|
|
|
|
|
2023-07-13 17:26:36 +00:00
|
|
|
void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud)
|
2022-01-14 22:06:37 +00:00
|
|
|
{
|
|
|
|
if (&init_mm == mm)
|
|
|
|
return;
|
|
|
|
|
2023-07-13 17:26:33 +00:00
|
|
|
__page_table_check_pud_clear(mm, *pudp);
|
2022-01-14 22:06:37 +00:00
|
|
|
if (pud_user_accessible_page(pud)) {
|
2023-07-13 17:26:30 +00:00
|
|
|
page_table_check_set(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT,
|
2022-01-14 22:06:37 +00:00
|
|
|
pud_write(pud));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__page_table_check_pud_set);
|
2022-02-04 04:49:24 +00:00
|
|
|
|
|
|
|
void __page_table_check_pte_clear_range(struct mm_struct *mm,
|
|
|
|
unsigned long addr,
|
|
|
|
pmd_t pmd)
|
|
|
|
{
|
|
|
|
if (&init_mm == mm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
|
|
|
|
pte_t *ptep = pte_offset_map(&pmd, addr);
|
|
|
|
unsigned long i;
|
|
|
|
|
2023-06-09 01:27:52 +00:00
|
|
|
if (WARN_ON(!ptep))
|
|
|
|
return;
|
2022-02-04 04:49:24 +00:00
|
|
|
for (i = 0; i < PTRS_PER_PTE; i++) {
|
2023-07-13 17:26:31 +00:00
|
|
|
__page_table_check_pte_clear(mm, ptep_get(ptep));
|
2022-02-04 04:49:24 +00:00
|
|
|
addr += PAGE_SIZE;
|
|
|
|
ptep++;
|
|
|
|
}
|
2022-05-26 11:33:50 +00:00
|
|
|
pte_unmap(ptep - PTRS_PER_PTE);
|
2022-02-04 04:49:24 +00:00
|
|
|
}
|
|
|
|
}
|