mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 14:21:47 +00:00
11 hotfixes. 7 are cc:stable and the other 4 address post-6.6 issues or
are not considered backporting material. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZYys4AAKCRDdBJ7gKXxA jtmaAQC+o04Ia7IfB8MIqp1p7dNZQo64x/EnGA8YjUnQ8N6IwQD+ImU7dHl9g9Oo ROiiAbtMRBUfeJRsExX/Yzc1DV9E9QM= =ZGcs -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2023-12-27-15-00' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "11 hotfixes. 7 are cc:stable and the other 4 address post-6.6 issues or are not considered backporting material" * tag 'mm-hotfixes-stable-2023-12-27-15-00' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mailmap: add an old address for Naoya Horiguchi mm/memory-failure: cast index to loff_t before shifting it mm/memory-failure: check the mapcount of the precise page mm/memory-failure: pass the folio and the page to collect_procs() selftests: secretmem: floor the memory size to the multiple of page_size mm: migrate high-order folios in swap cache correctly maple_tree: do not preallocate nodes for slot stores mm/filemap: avoid buffered read/write race to read inconsistent data kunit: kasan_test: disable fortify string checker on kmalloc_oob_memset kexec: select CRYPTO from KEXEC_FILE instead of depending on it kexec: fix KEXEC_FILE dependencies
This commit is contained in:
commit
f5837722ff
1
.mailmap
1
.mailmap
@ -436,6 +436,7 @@ Muna Sinada <quic_msinada@quicinc.com> <msinada@codeaurora.org>
|
||||
Murali Nalajala <quic_mnalajal@quicinc.com> <mnalajal@codeaurora.org>
|
||||
Mythri P K <mythripk@ti.com>
|
||||
Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
|
||||
Naoya Horiguchi <naoya.horiguchi@nec.com> <n-horiguchi@ah.jp.nec.com>
|
||||
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
|
||||
Neeraj Upadhyay <quic_neeraju@quicinc.com> <neeraju@codeaurora.org>
|
||||
Neil Armstrong <neil.armstrong@linaro.org> <narmstrong@baylibre.com>
|
||||
|
@ -608,10 +608,10 @@ config ARCH_SUPPORTS_KEXEC
|
||||
def_bool PPC_BOOK3S || PPC_E500 || (44x && !SMP)
|
||||
|
||||
config ARCH_SUPPORTS_KEXEC_FILE
|
||||
def_bool PPC64 && CRYPTO=y && CRYPTO_SHA256=y
|
||||
def_bool PPC64
|
||||
|
||||
config ARCH_SUPPORTS_KEXEC_PURGATORY
|
||||
def_bool KEXEC_FILE
|
||||
def_bool y
|
||||
|
||||
config ARCH_SELECTS_KEXEC_FILE
|
||||
def_bool y
|
||||
|
@ -702,9 +702,7 @@ config ARCH_SELECTS_KEXEC_FILE
|
||||
select KEXEC_ELF
|
||||
|
||||
config ARCH_SUPPORTS_KEXEC_PURGATORY
|
||||
def_bool KEXEC_FILE
|
||||
depends on CRYPTO=y
|
||||
depends on CRYPTO_SHA256=y
|
||||
def_bool ARCH_SUPPORTS_KEXEC_FILE
|
||||
|
||||
config ARCH_SUPPORTS_CRASH_DUMP
|
||||
def_bool y
|
||||
|
@ -254,13 +254,13 @@ config ARCH_SUPPORTS_KEXEC
|
||||
def_bool y
|
||||
|
||||
config ARCH_SUPPORTS_KEXEC_FILE
|
||||
def_bool CRYPTO && CRYPTO_SHA256 && CRYPTO_SHA256_S390
|
||||
def_bool y
|
||||
|
||||
config ARCH_SUPPORTS_KEXEC_SIG
|
||||
def_bool MODULE_SIG_FORMAT
|
||||
|
||||
config ARCH_SUPPORTS_KEXEC_PURGATORY
|
||||
def_bool KEXEC_FILE
|
||||
def_bool y
|
||||
|
||||
config ARCH_SUPPORTS_CRASH_DUMP
|
||||
def_bool y
|
||||
|
@ -2072,7 +2072,7 @@ config ARCH_SUPPORTS_KEXEC
|
||||
def_bool y
|
||||
|
||||
config ARCH_SUPPORTS_KEXEC_FILE
|
||||
def_bool X86_64 && CRYPTO && CRYPTO_SHA256
|
||||
def_bool X86_64
|
||||
|
||||
config ARCH_SELECTS_KEXEC_FILE
|
||||
def_bool y
|
||||
@ -2080,7 +2080,7 @@ config ARCH_SELECTS_KEXEC_FILE
|
||||
select HAVE_IMA_KEXEC if IMA
|
||||
|
||||
config ARCH_SUPPORTS_KEXEC_PURGATORY
|
||||
def_bool KEXEC_FILE
|
||||
def_bool y
|
||||
|
||||
config ARCH_SUPPORTS_KEXEC_SIG
|
||||
def_bool y
|
||||
|
@ -36,6 +36,8 @@ config KEXEC
|
||||
config KEXEC_FILE
|
||||
bool "Enable kexec file based system call"
|
||||
depends on ARCH_SUPPORTS_KEXEC_FILE
|
||||
select CRYPTO
|
||||
select CRYPTO_SHA256
|
||||
select KEXEC_CORE
|
||||
help
|
||||
This is new version of kexec system call. This system call is
|
||||
|
@ -5501,6 +5501,17 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
|
||||
|
||||
mas_wr_end_piv(&wr_mas);
|
||||
node_size = mas_wr_new_end(&wr_mas);
|
||||
|
||||
/* Slot store, does not require additional nodes */
|
||||
if (node_size == wr_mas.node_end) {
|
||||
/* reuse node */
|
||||
if (!mt_in_rcu(mas->tree))
|
||||
return 0;
|
||||
/* shifting boundary */
|
||||
if (wr_mas.offset_end - mas->offset == 1)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (node_size >= mt_slots[wr_mas.type]) {
|
||||
/* Split, worst case for now. */
|
||||
request = 1 + mas_mt_height(mas) * 2;
|
||||
|
@ -2607,6 +2607,15 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
|
||||
goto put_folios;
|
||||
end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
|
||||
|
||||
/*
|
||||
* Pairs with a barrier in
|
||||
* block_write_end()->mark_buffer_dirty() or other page
|
||||
* dirtying routines like iomap_write_end() to ensure
|
||||
* changes to page contents are visible before we see
|
||||
* increased inode size.
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
/*
|
||||
* Once we start copying data, we don't want to be touching any
|
||||
* cachelines that might be contended:
|
||||
|
@ -493,14 +493,17 @@ static void kmalloc_oob_memset_2(struct kunit *test)
|
||||
{
|
||||
char *ptr;
|
||||
size_t size = 128 - KASAN_GRANULE_SIZE;
|
||||
size_t memset_size = 2;
|
||||
|
||||
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
|
||||
|
||||
ptr = kmalloc(size, GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
||||
|
||||
OPTIMIZER_HIDE_VAR(ptr);
|
||||
OPTIMIZER_HIDE_VAR(size);
|
||||
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2));
|
||||
OPTIMIZER_HIDE_VAR(memset_size);
|
||||
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, memset_size));
|
||||
kfree(ptr);
|
||||
}
|
||||
|
||||
@ -508,14 +511,17 @@ static void kmalloc_oob_memset_4(struct kunit *test)
|
||||
{
|
||||
char *ptr;
|
||||
size_t size = 128 - KASAN_GRANULE_SIZE;
|
||||
size_t memset_size = 4;
|
||||
|
||||
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
|
||||
|
||||
ptr = kmalloc(size, GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
||||
|
||||
OPTIMIZER_HIDE_VAR(ptr);
|
||||
OPTIMIZER_HIDE_VAR(size);
|
||||
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4));
|
||||
OPTIMIZER_HIDE_VAR(memset_size);
|
||||
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, memset_size));
|
||||
kfree(ptr);
|
||||
}
|
||||
|
||||
@ -523,14 +529,17 @@ static void kmalloc_oob_memset_8(struct kunit *test)
|
||||
{
|
||||
char *ptr;
|
||||
size_t size = 128 - KASAN_GRANULE_SIZE;
|
||||
size_t memset_size = 8;
|
||||
|
||||
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
|
||||
|
||||
ptr = kmalloc(size, GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
||||
|
||||
OPTIMIZER_HIDE_VAR(ptr);
|
||||
OPTIMIZER_HIDE_VAR(size);
|
||||
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8));
|
||||
OPTIMIZER_HIDE_VAR(memset_size);
|
||||
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, memset_size));
|
||||
kfree(ptr);
|
||||
}
|
||||
|
||||
@ -538,14 +547,17 @@ static void kmalloc_oob_memset_16(struct kunit *test)
|
||||
{
|
||||
char *ptr;
|
||||
size_t size = 128 - KASAN_GRANULE_SIZE;
|
||||
size_t memset_size = 16;
|
||||
|
||||
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
|
||||
|
||||
ptr = kmalloc(size, GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
||||
|
||||
OPTIMIZER_HIDE_VAR(ptr);
|
||||
OPTIMIZER_HIDE_VAR(size);
|
||||
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16));
|
||||
OPTIMIZER_HIDE_VAR(memset_size);
|
||||
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, memset_size));
|
||||
kfree(ptr);
|
||||
}
|
||||
|
||||
|
@ -595,10 +595,9 @@ struct task_struct *task_early_kill(struct task_struct *tsk, int force_early)
|
||||
/*
|
||||
* Collect processes when the error hit an anonymous page.
|
||||
*/
|
||||
static void collect_procs_anon(struct page *page, struct list_head *to_kill,
|
||||
int force_early)
|
||||
static void collect_procs_anon(struct folio *folio, struct page *page,
|
||||
struct list_head *to_kill, int force_early)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct vm_area_struct *vma;
|
||||
struct task_struct *tsk;
|
||||
struct anon_vma *av;
|
||||
@ -633,12 +632,12 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
|
||||
/*
|
||||
* Collect processes when the error hit a file mapped page.
|
||||
*/
|
||||
static void collect_procs_file(struct page *page, struct list_head *to_kill,
|
||||
int force_early)
|
||||
static void collect_procs_file(struct folio *folio, struct page *page,
|
||||
struct list_head *to_kill, int force_early)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct task_struct *tsk;
|
||||
struct address_space *mapping = page->mapping;
|
||||
struct address_space *mapping = folio->mapping;
|
||||
pgoff_t pgoff;
|
||||
|
||||
i_mmap_lock_read(mapping);
|
||||
@ -704,17 +703,17 @@ static void collect_procs_fsdax(struct page *page,
|
||||
/*
|
||||
* Collect the processes who have the corrupted page mapped to kill.
|
||||
*/
|
||||
static void collect_procs(struct page *page, struct list_head *tokill,
|
||||
int force_early)
|
||||
static void collect_procs(struct folio *folio, struct page *page,
|
||||
struct list_head *tokill, int force_early)
|
||||
{
|
||||
if (!page->mapping)
|
||||
if (!folio->mapping)
|
||||
return;
|
||||
if (unlikely(PageKsm(page)))
|
||||
collect_procs_ksm(page, tokill, force_early);
|
||||
else if (PageAnon(page))
|
||||
collect_procs_anon(page, tokill, force_early);
|
||||
collect_procs_anon(folio, page, tokill, force_early);
|
||||
else
|
||||
collect_procs_file(page, tokill, force_early);
|
||||
collect_procs_file(folio, page, tokill, force_early);
|
||||
}
|
||||
|
||||
struct hwpoison_walk {
|
||||
@ -1571,7 +1570,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
||||
* This check implies we don't kill processes if their pages
|
||||
* are in the swap cache early. Those are always late kills.
|
||||
*/
|
||||
if (!page_mapped(hpage))
|
||||
if (!page_mapped(p))
|
||||
return true;
|
||||
|
||||
if (PageSwapCache(p)) {
|
||||
@ -1602,7 +1601,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
||||
* mapped in dirty form. This has to be done before try_to_unmap,
|
||||
* because ttu takes the rmap data structures down.
|
||||
*/
|
||||
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
|
||||
collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
|
||||
|
||||
if (PageHuge(hpage) && !PageAnon(hpage)) {
|
||||
/*
|
||||
@ -1622,10 +1621,10 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
||||
try_to_unmap(folio, ttu);
|
||||
}
|
||||
|
||||
unmap_success = !page_mapped(hpage);
|
||||
unmap_success = !page_mapped(p);
|
||||
if (!unmap_success)
|
||||
pr_err("%#lx: failed to unmap page (mapcount=%d)\n",
|
||||
pfn, page_mapcount(hpage));
|
||||
pfn, page_mapcount(p));
|
||||
|
||||
/*
|
||||
* try_to_unmap() might put mlocked page in lru cache, so call
|
||||
@ -1705,7 +1704,7 @@ static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
|
||||
* mapping being torn down is communicated in siginfo, see
|
||||
* kill_proc()
|
||||
*/
|
||||
loff_t start = (index << PAGE_SHIFT) & ~(size - 1);
|
||||
loff_t start = ((loff_t)index << PAGE_SHIFT) & ~(size - 1);
|
||||
|
||||
unmap_mapping_range(mapping, start, size, 0);
|
||||
}
|
||||
@ -1772,7 +1771,7 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags,
|
||||
* SIGBUS (i.e. MF_MUST_KILL)
|
||||
*/
|
||||
flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
|
||||
collect_procs(&folio->page, &to_kill, true);
|
||||
collect_procs(folio, &folio->page, &to_kill, true);
|
||||
|
||||
unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags);
|
||||
unlock:
|
||||
|
@ -405,6 +405,7 @@ int folio_migrate_mapping(struct address_space *mapping,
|
||||
int dirty;
|
||||
int expected_count = folio_expected_refs(mapping, folio) + extra_count;
|
||||
long nr = folio_nr_pages(folio);
|
||||
long entries, i;
|
||||
|
||||
if (!mapping) {
|
||||
/* Anonymous page without mapping */
|
||||
@ -442,8 +443,10 @@ int folio_migrate_mapping(struct address_space *mapping,
|
||||
folio_set_swapcache(newfolio);
|
||||
newfolio->private = folio_get_private(folio);
|
||||
}
|
||||
entries = nr;
|
||||
} else {
|
||||
VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
|
||||
entries = 1;
|
||||
}
|
||||
|
||||
/* Move dirty while page refs frozen and newpage not yet exposed */
|
||||
@ -453,7 +456,11 @@ int folio_migrate_mapping(struct address_space *mapping,
|
||||
folio_set_dirty(newfolio);
|
||||
}
|
||||
|
||||
xas_store(&xas, newfolio);
|
||||
/* Swap cache still stores N entries instead of a high-order entry */
|
||||
for (i = 0; i < entries; i++) {
|
||||
xas_store(&xas, newfolio);
|
||||
xas_next(&xas);
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop cache reference from old page by unfreezing
|
||||
|
@ -35538,7 +35538,7 @@ static noinline void __init check_prealloc(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
|
||||
allocated = mas_allocated(&mas);
|
||||
height = mas_mt_height(&mas);
|
||||
MT_BUG_ON(mt, allocated != 1);
|
||||
MT_BUG_ON(mt, allocated != 0);
|
||||
mas_store_prealloc(&mas, ptr);
|
||||
MT_BUG_ON(mt, mas_allocated(&mas) != 0);
|
||||
|
||||
|
@ -62,6 +62,9 @@ static void test_mlock_limit(int fd)
|
||||
char *mem;
|
||||
|
||||
len = mlock_limit_cur;
|
||||
if (len % page_size != 0)
|
||||
len = (len/page_size) * page_size;
|
||||
|
||||
mem = mmap(NULL, len, prot, mode, fd, 0);
|
||||
if (mem == MAP_FAILED) {
|
||||
fail("unable to mmap secret memory\n");
|
||||
|
Loading…
Reference in New Issue
Block a user