mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
12 hotfixes, mostly against mm/. Five of these fixes are cc:stable.
-----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCY+qxtQAKCRDdBJ7gKXxA jmvNAP4vwrZJ/eXlp/JC35r84fT6ykMQLbv+oT6rG7lx8aH2JgEA5QSYTBvcb4VF n6tf6OpZbCHtvTPy4/+aVj7hW0XUnAY= =C92n -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2023-02-13-13-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "Twelve hotfixes, mostly against mm/. Five of these fixes are cc:stable" * tag 'mm-hotfixes-stable-2023-02-13-13-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: of: reserved_mem: Have kmemleak ignore dynamically allocated reserved mem scripts/gdb: fix 'lx-current' for x86 lib: parser: optimize match_NUMBER apis to use local array mm: shrinkers: fix deadlock in shrinker debugfs mm: hwpoison: support recovery from ksm_might_need_to_copy() kasan: fix Oops due to missing calls to kasan_arch_is_ready() revert "squashfs: harden sanity check in squashfs_read_xattr_id_table" fsdax: dax_unshare_iter() should return a valid length mm/gup: add folio to list when folio_isolate_lru() succeed aio: fix mremap after fork null-deref mailmap: add entry for Alexander Mikhalitsyn mm: extend max struct page size for kmsan
This commit is contained in:
commit
f6feea56f6
2
.mailmap
2
.mailmap
@ -25,6 +25,8 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
|
||||
Alexander Lobakin <alobakin@pm.me> <alobakin@dlink.ru>
|
||||
Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com>
|
||||
Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>
|
||||
Alexander Mikhalitsyn <alexander@mihalicyn.com> <alexander.mikhalitsyn@virtuozzo.com>
|
||||
Alexander Mikhalitsyn <alexander@mihalicyn.com> <aleksandr.mikhalitsyn@canonical.com>
|
||||
Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com>
|
||||
Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
|
||||
Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
|
||||
|
@ -48,9 +48,10 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
|
||||
err = memblock_mark_nomap(base, size);
|
||||
if (err)
|
||||
memblock_phys_free(base, size);
|
||||
kmemleak_ignore_phys(base);
|
||||
}
|
||||
|
||||
kmemleak_ignore_phys(base);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
4
fs/aio.c
4
fs/aio.c
@ -361,6 +361,9 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
|
||||
spin_lock(&mm->ioctx_lock);
|
||||
rcu_read_lock();
|
||||
table = rcu_dereference(mm->ioctx_table);
|
||||
if (!table)
|
||||
goto out_unlock;
|
||||
|
||||
for (i = 0; i < table->nr; i++) {
|
||||
struct kioctx *ctx;
|
||||
|
||||
@ -374,6 +377,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
|
||||
}
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
spin_unlock(&mm->ioctx_lock);
|
||||
return res;
|
||||
|
5
fs/dax.c
5
fs/dax.c
@ -1271,8 +1271,9 @@ static s64 dax_unshare_iter(struct iomap_iter *iter)
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
|
||||
ret = copy_mc_to_kernel(daddr, saddr, length);
|
||||
if (ret)
|
||||
if (copy_mc_to_kernel(daddr, saddr, length) == 0)
|
||||
ret = length;
|
||||
else
|
||||
ret = -EIO;
|
||||
|
||||
out_unlock:
|
||||
|
@ -76,7 +76,7 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
|
||||
/* Sanity check values */
|
||||
|
||||
/* there is always at least one xattr id */
|
||||
if (*xattr_ids <= 0)
|
||||
if (*xattr_ids == 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
|
||||
|
@ -137,7 +137,7 @@ extern int mmap_rnd_compat_bits __read_mostly;
|
||||
* define their own version of this macro in <asm/pgtable.h>
|
||||
*/
|
||||
#if BITS_PER_LONG == 64
|
||||
/* This function must be updated when the size of struct page grows above 80
|
||||
/* This function must be updated when the size of struct page grows above 96
|
||||
* or reduces below 56. The idea that compiler optimizes out switch()
|
||||
* statement, and only leaves move/store instructions. Also the compiler can
|
||||
* combine write statements if they are both assignments and can be reordered,
|
||||
@ -148,12 +148,18 @@ static inline void __mm_zero_struct_page(struct page *page)
|
||||
{
|
||||
unsigned long *_pp = (void *)page;
|
||||
|
||||
/* Check that struct page is either 56, 64, 72, or 80 bytes */
|
||||
/* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
|
||||
BUILD_BUG_ON(sizeof(struct page) & 7);
|
||||
BUILD_BUG_ON(sizeof(struct page) < 56);
|
||||
BUILD_BUG_ON(sizeof(struct page) > 80);
|
||||
BUILD_BUG_ON(sizeof(struct page) > 96);
|
||||
|
||||
switch (sizeof(struct page)) {
|
||||
case 96:
|
||||
_pp[11] = 0;
|
||||
fallthrough;
|
||||
case 88:
|
||||
_pp[10] = 0;
|
||||
fallthrough;
|
||||
case 80:
|
||||
_pp[9] = 0;
|
||||
fallthrough;
|
||||
|
@ -107,7 +107,7 @@ extern void synchronize_shrinkers(void);
|
||||
|
||||
#ifdef CONFIG_SHRINKER_DEBUG
|
||||
extern int shrinker_debugfs_add(struct shrinker *shrinker);
|
||||
extern void shrinker_debugfs_remove(struct shrinker *shrinker);
|
||||
extern struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker);
|
||||
extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
|
||||
const char *fmt, ...);
|
||||
#else /* CONFIG_SHRINKER_DEBUG */
|
||||
@ -115,8 +115,9 @@ static inline int shrinker_debugfs_add(struct shrinker *shrinker)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void shrinker_debugfs_remove(struct shrinker *shrinker)
|
||||
static inline struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline __printf(2, 3)
|
||||
int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
|
||||
|
39
lib/parser.c
39
lib/parser.c
@ -11,6 +11,15 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
/*
|
||||
* max size needed by different bases to express U64
|
||||
* HEX: "0xFFFFFFFFFFFFFFFF" --> 18
|
||||
* DEC: "18446744073709551615" --> 20
|
||||
* OCT: "01777777777777777777777" --> 23
|
||||
* pick the max one to define NUMBER_BUF_LEN
|
||||
*/
|
||||
#define NUMBER_BUF_LEN 24
|
||||
|
||||
/**
|
||||
* match_one - Determines if a string matches a simple pattern
|
||||
* @s: the string to examine for presence of the pattern
|
||||
@ -129,14 +138,12 @@ EXPORT_SYMBOL(match_token);
|
||||
static int match_number(substring_t *s, int *result, int base)
|
||||
{
|
||||
char *endp;
|
||||
char *buf;
|
||||
char buf[NUMBER_BUF_LEN];
|
||||
int ret;
|
||||
long val;
|
||||
|
||||
buf = match_strdup(s);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN)
|
||||
return -ERANGE;
|
||||
ret = 0;
|
||||
val = simple_strtol(buf, &endp, base);
|
||||
if (endp == buf)
|
||||
@ -145,7 +152,6 @@ static int match_number(substring_t *s, int *result, int base)
|
||||
ret = -ERANGE;
|
||||
else
|
||||
*result = (int) val;
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -163,18 +169,15 @@ static int match_number(substring_t *s, int *result, int base)
|
||||
*/
|
||||
static int match_u64int(substring_t *s, u64 *result, int base)
|
||||
{
|
||||
char *buf;
|
||||
char buf[NUMBER_BUF_LEN];
|
||||
int ret;
|
||||
u64 val;
|
||||
|
||||
buf = match_strdup(s);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN)
|
||||
return -ERANGE;
|
||||
ret = kstrtoull(buf, base, &val);
|
||||
if (!ret)
|
||||
*result = val;
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -206,14 +209,12 @@ EXPORT_SYMBOL(match_int);
|
||||
*/
|
||||
int match_uint(substring_t *s, unsigned int *result)
|
||||
{
|
||||
int err = -ENOMEM;
|
||||
char *buf = match_strdup(s);
|
||||
char buf[NUMBER_BUF_LEN];
|
||||
|
||||
if (buf) {
|
||||
err = kstrtouint(buf, 10, result);
|
||||
kfree(buf);
|
||||
}
|
||||
return err;
|
||||
if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN)
|
||||
return -ERANGE;
|
||||
|
||||
return kstrtouint(buf, 10, result);
|
||||
}
|
||||
EXPORT_SYMBOL(match_uint);
|
||||
|
||||
|
2
mm/gup.c
2
mm/gup.c
@ -1914,7 +1914,7 @@ static unsigned long collect_longterm_unpinnable_pages(
|
||||
drain_allow = false;
|
||||
}
|
||||
|
||||
if (!folio_isolate_lru(folio))
|
||||
if (folio_isolate_lru(folio))
|
||||
continue;
|
||||
|
||||
list_add_tail(&folio->lru, movable_page_list);
|
||||
|
@ -246,6 +246,9 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object,
|
||||
|
||||
static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
|
||||
{
|
||||
if (!kasan_arch_is_ready())
|
||||
return false;
|
||||
|
||||
if (ptr != page_address(virt_to_head_page(ptr))) {
|
||||
kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
|
||||
return true;
|
||||
|
@ -191,7 +191,12 @@ bool kasan_check_range(unsigned long addr, size_t size, bool write,
|
||||
|
||||
bool kasan_byte_accessible(const void *addr)
|
||||
{
|
||||
s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
|
||||
s8 shadow_byte;
|
||||
|
||||
if (!kasan_arch_is_ready())
|
||||
return true;
|
||||
|
||||
shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
|
||||
|
||||
return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
|
||||
}
|
||||
|
@ -291,6 +291,9 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
|
||||
unsigned long shadow_start, shadow_end;
|
||||
int ret;
|
||||
|
||||
if (!kasan_arch_is_ready())
|
||||
return 0;
|
||||
|
||||
if (!is_vmalloc_or_module_addr((void *)addr))
|
||||
return 0;
|
||||
|
||||
@ -459,6 +462,9 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
|
||||
unsigned long region_start, region_end;
|
||||
unsigned long size;
|
||||
|
||||
if (!kasan_arch_is_ready())
|
||||
return;
|
||||
|
||||
region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
|
||||
region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
|
||||
|
||||
@ -502,6 +508,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
|
||||
* with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
|
||||
*/
|
||||
|
||||
if (!kasan_arch_is_ready())
|
||||
return (void *)start;
|
||||
|
||||
if (!is_vmalloc_or_module_addr(start))
|
||||
return (void *)start;
|
||||
|
||||
@ -524,6 +533,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
|
||||
*/
|
||||
void __kasan_poison_vmalloc(const void *start, unsigned long size)
|
||||
{
|
||||
if (!kasan_arch_is_ready())
|
||||
return;
|
||||
|
||||
if (!is_vmalloc_or_module_addr(start))
|
||||
return;
|
||||
|
||||
|
7
mm/ksm.c
7
mm/ksm.c
@ -2629,8 +2629,11 @@ struct page *ksm_might_need_to_copy(struct page *page,
|
||||
new_page = NULL;
|
||||
}
|
||||
if (new_page) {
|
||||
copy_user_highpage(new_page, page, address, vma);
|
||||
|
||||
if (copy_mc_user_highpage(new_page, page, address, vma)) {
|
||||
put_page(new_page);
|
||||
memory_failure_queue(page_to_pfn(page), 0);
|
||||
return ERR_PTR(-EHWPOISON);
|
||||
}
|
||||
SetPageDirty(new_page);
|
||||
__SetPageUptodate(new_page);
|
||||
__SetPageLocked(new_page);
|
||||
|
@ -3840,6 +3840,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
||||
if (unlikely(!page)) {
|
||||
ret = VM_FAULT_OOM;
|
||||
goto out_page;
|
||||
} else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
|
||||
ret = VM_FAULT_HWPOISON;
|
||||
goto out_page;
|
||||
}
|
||||
folio = page_folio(page);
|
||||
|
||||
|
@ -246,18 +246,21 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
|
||||
}
|
||||
EXPORT_SYMBOL(shrinker_debugfs_rename);
|
||||
|
||||
void shrinker_debugfs_remove(struct shrinker *shrinker)
|
||||
struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
|
||||
{
|
||||
struct dentry *entry = shrinker->debugfs_entry;
|
||||
|
||||
lockdep_assert_held(&shrinker_rwsem);
|
||||
|
||||
kfree_const(shrinker->name);
|
||||
shrinker->name = NULL;
|
||||
|
||||
if (!shrinker->debugfs_entry)
|
||||
return;
|
||||
if (entry) {
|
||||
ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id);
|
||||
shrinker->debugfs_entry = NULL;
|
||||
}
|
||||
|
||||
debugfs_remove_recursive(shrinker->debugfs_entry);
|
||||
ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id);
|
||||
return entry;
|
||||
}
|
||||
|
||||
static int __init shrinker_debugfs_init(void)
|
||||
|
@ -1764,12 +1764,15 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
struct page *swapcache;
|
||||
spinlock_t *ptl;
|
||||
pte_t *pte, new_pte;
|
||||
bool hwposioned = false;
|
||||
int ret = 1;
|
||||
|
||||
swapcache = page;
|
||||
page = ksm_might_need_to_copy(page, vma, addr);
|
||||
if (unlikely(!page))
|
||||
return -ENOMEM;
|
||||
else if (unlikely(PTR_ERR(page) == -EHWPOISON))
|
||||
hwposioned = true;
|
||||
|
||||
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
||||
if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
|
||||
@ -1777,15 +1780,19 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(!PageUptodate(page))) {
|
||||
pte_t pteval;
|
||||
if (unlikely(hwposioned || !PageUptodate(page))) {
|
||||
swp_entry_t swp_entry;
|
||||
|
||||
dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
|
||||
pteval = swp_entry_to_pte(make_swapin_error_entry());
|
||||
set_pte_at(vma->vm_mm, addr, pte, pteval);
|
||||
swap_free(entry);
|
||||
if (hwposioned) {
|
||||
swp_entry = make_hwpoison_entry(swapcache);
|
||||
page = swapcache;
|
||||
} else {
|
||||
swp_entry = make_swapin_error_entry();
|
||||
}
|
||||
new_pte = swp_entry_to_pte(swp_entry);
|
||||
ret = 0;
|
||||
goto out;
|
||||
goto setpte;
|
||||
}
|
||||
|
||||
/* See do_swap_page() */
|
||||
@ -1817,6 +1824,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
new_pte = pte_mksoft_dirty(new_pte);
|
||||
if (pte_swp_uffd_wp(*pte))
|
||||
new_pte = pte_mkuffd_wp(new_pte);
|
||||
setpte:
|
||||
set_pte_at(vma->vm_mm, addr, pte, new_pte);
|
||||
swap_free(entry);
|
||||
out:
|
||||
|
@ -741,6 +741,8 @@ EXPORT_SYMBOL(register_shrinker);
|
||||
*/
|
||||
void unregister_shrinker(struct shrinker *shrinker)
|
||||
{
|
||||
struct dentry *debugfs_entry;
|
||||
|
||||
if (!(shrinker->flags & SHRINKER_REGISTERED))
|
||||
return;
|
||||
|
||||
@ -749,9 +751,11 @@ void unregister_shrinker(struct shrinker *shrinker)
|
||||
shrinker->flags &= ~SHRINKER_REGISTERED;
|
||||
if (shrinker->flags & SHRINKER_MEMCG_AWARE)
|
||||
unregister_memcg_shrinker(shrinker);
|
||||
shrinker_debugfs_remove(shrinker);
|
||||
debugfs_entry = shrinker_debugfs_remove(shrinker);
|
||||
up_write(&shrinker_rwsem);
|
||||
|
||||
debugfs_remove_recursive(debugfs_entry);
|
||||
|
||||
kfree(shrinker->nr_deferred);
|
||||
shrinker->nr_deferred = NULL;
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ def get_current_task(cpu):
|
||||
task_ptr_type = task_type.get_type().pointer()
|
||||
|
||||
if utils.is_target_arch("x86"):
|
||||
var_ptr = gdb.parse_and_eval("¤t_task")
|
||||
var_ptr = gdb.parse_and_eval("&pcpu_hot.current_task")
|
||||
return per_cpu(var_ptr, cpu).dereference()
|
||||
elif utils.is_target_arch("aarch64"):
|
||||
current_task_addr = gdb.parse_and_eval("$SP_EL0")
|
||||
|
Loading…
Reference in New Issue
Block a user