From 0093de693fe7baf31641d1863793e6db93a43b6e Mon Sep 17 00:00:00 2001 From: Yixuan Cao Date: Wed, 10 Nov 2021 20:32:30 -0800 Subject: [PATCH 1/7] mm/page_owner.c: modify the type of argument "order" in some functions The type of "order" in struct page_owner is unsigned short. However, it is unsigned int in the following 3 functions: __reset_page_owner __set_page_owner_handle __set_page_owner_handle The type of "order" in argument list is unsigned int, which is inconsistent. [akpm@linux-foundation.org: update include/linux/page_owner.h] Link: https://lkml.kernel.org/r/20211020125945.47792-1-caoyixuan2019@email.szu.edu.cn Signed-off-by: Yixuan Cao Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page_owner.h | 12 ++++++------ mm/page_owner.c | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index 43c638c51c1f..119a0c9d2a8b 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -8,9 +8,9 @@ extern struct static_key_false page_owner_inited; extern struct page_ext_operations page_owner_ops; -extern void __reset_page_owner(struct page *page, unsigned int order); +extern void __reset_page_owner(struct page *page, unsigned short order); extern void __set_page_owner(struct page *page, - unsigned int order, gfp_t gfp_mask); + unsigned short order, gfp_t gfp_mask); extern void __split_page_owner(struct page *page, unsigned int nr); extern void __folio_copy_owner(struct folio *newfolio, struct folio *old); extern void __set_page_owner_migrate_reason(struct page *page, int reason); @@ -18,14 +18,14 @@ extern void __dump_page_owner(const struct page *page); extern void pagetypeinfo_showmixedcount_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone); -static inline void reset_page_owner(struct page *page, unsigned int order) +static inline void reset_page_owner(struct page *page, unsigned short order) { if (static_branch_unlikely(&page_owner_inited)) __reset_page_owner(page, order); } static inline void set_page_owner(struct page *page, - unsigned int order, gfp_t gfp_mask) + unsigned short order, gfp_t gfp_mask) { if (static_branch_unlikely(&page_owner_inited)) __set_page_owner(page, order, gfp_mask); @@ -52,7 +52,7 @@ static inline void dump_page_owner(const struct page *page) __dump_page_owner(page); } #else -static inline void reset_page_owner(struct page *page, unsigned int order) +static inline void reset_page_owner(struct page *page, unsigned short order) { } static inline void set_page_owner(struct page *page, @@ -60,7 +60,7 @@ static inline void set_page_owner(struct page *page, { } static inline void split_page_owner(struct page *page, - unsigned int order) + unsigned short order) { } static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio) diff --git a/mm/page_owner.c b/mm/page_owner.c index 79936db59859..4f924957ce7a 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -125,7 +125,7 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags) return handle; } -void __reset_page_owner(struct page *page, unsigned int order) +void __reset_page_owner(struct page *page, unsigned short order) { int i; struct page_ext *page_ext; @@ -149,7 +149,7 @@ void __reset_page_owner(struct page *page, unsigned int order) static inline void __set_page_owner_handle(struct page_ext *page_ext, depot_stack_handle_t handle, - unsigned int order, gfp_t gfp_mask) + unsigned short order, gfp_t gfp_mask) { struct page_owner *page_owner; int i; @@ -169,7 +169,7 @@ static inline void __set_page_owner_handle(struct page_ext *page_ext, } } -noinline void __set_page_owner(struct page *page, unsigned int order, +noinline void __set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask) { struct page_ext *page_ext = lookup_page_ext(page); From 252220dab9d4e4aa61f87b729468e8ac68fcc8bb Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 10 Nov 2021 20:32:33 -0800 Subject: [PATCH 2/7] mm: allow only SLUB on PREEMPT_RT Memory allocators may disable interrupts or preemption as part of the allocation and freeing process. For PREEMPT_RT it is important that these sections remain deterministic and short and therefore don't depend on the size of the memory to allocate/ free or the inner state of the algorithm. Until v3.12-RT the SLAB allocator was an option but involved several changes to meet all the requirements. The SLUB design fits better with PREEMPT_RT model and so the SLAB patches were dropped in the 3.12-RT patchset. Comparing the two allocator, SLUB outperformed SLAB in both throughput (time needed to allocate and free memory) and the maximal latency of the system measured with cyclictest during hackbench. SLOB was never evaluated since it was unlikely that it preforms better than SLAB. During a quick test, the kernel crashed with SLOB enabled during boot. Disable SLAB and SLOB on PREEMPT_RT. [bigeasy@linutronix.de: commit description] Link: https://lkml.kernel.org/r/20211015210336.gen3tib33ig5q2md@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior Acked-by: Vlastimil Babka Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- init/Kconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/init/Kconfig b/init/Kconfig index 21b1f4870c80..45bcaa8e7481 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1896,6 +1896,7 @@ choice config SLAB bool "SLAB" + depends on !PREEMPT_RT select HAVE_HARDENED_USERCOPY_ALLOCATOR help The regular slab allocator that is established and known to work @@ -1916,6 +1917,7 @@ config SLUB config SLOB depends on EXPERT bool "SLOB (Simple Allocator)" + depends on !PREEMPT_RT help SLOB replaces the stock allocator with a drastically simpler allocator. SLOB is generally more space efficient but From 0ef024621417fa3fcdeb2c3320f90ee34e18a5d9 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Wed, 10 Nov 2021 20:32:37 -0800 Subject: [PATCH 3/7] mm: migrate: simplify the file-backed pages validation when migrating its mapping There is no need to validate the file-backed page's refcount before trying to freeze the page's expected refcount, instead we can rely on the folio_ref_freeze() to validate if the page has the expected refcount before migrating its mapping. Moreover we are always under the page lock when migrating the page mapping, which means nowhere else can remove it from the page cache, so we can remove the xas_load() validation under the i_pages lock. Link: https://lkml.kernel.org/r/cover.1629447552.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/df4c129fd8e86a95dbc55f4663d77441cc0d3bd1.1629447552.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Suggested-by: Matthew Wilcox Cc: Yang Shi Cc: Alistair Popple Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index a11e948593df..43dd88c7fcdc 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -404,12 +404,6 @@ int folio_migrate_mapping(struct address_space *mapping, newzone = folio_zone(newfolio); xas_lock_irq(&xas); - if (folio_ref_count(folio) != expected_count || - xas_load(&xas) != folio) { - xas_unlock_irq(&xas); - return -EAGAIN; - } - if (!folio_ref_freeze(folio, expected_count)) { xas_unlock_irq(&xas); return -EAGAIN; From ab09243aa95a72bac5c71e852773de34116f8d0f Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Wed, 10 Nov 2021 20:32:40 -0800 Subject: [PATCH 4/7] mm/migrate.c: remove MIGRATE_PFN_LOCKED MIGRATE_PFN_LOCKED is used to indicate to migrate_vma_prepare() that a source page was already locked during migrate_vma_collect(). If it wasn't then the a second attempt is made to lock the page. However if the first attempt failed it's unlikely a second attempt will succeed, and the retry adds complexity. So clean this up by removing the retry and MIGRATE_PFN_LOCKED flag. Destination pages are also meant to have the MIGRATE_PFN_LOCKED flag set, but nothing actually checks that. Link: https://lkml.kernel.org/r/20211025041608.289017-1-apopple@nvidia.com Signed-off-by: Alistair Popple Reviewed-by: Ralph Campbell Acked-by: Felix Kuehling Cc: Alex Deucher Cc: Jerome Glisse Cc: John Hubbard Cc: Zi Yan Cc: Christoph Hellwig Cc: Ben Skeggs Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/vm/hmm.rst | 2 +- arch/powerpc/kvm/book3s_hv_uvmem.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 2 - drivers/gpu/drm/nouveau/nouveau_dmem.c | 4 +- include/linux/migrate.h | 1 - lib/test_hmm.c | 5 +- mm/migrate.c | 149 +++++------------------ 7 files changed, 37 insertions(+), 130 deletions(-) diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst index a14c2938e7af..f2a59ed82ed3 100644 --- a/Documentation/vm/hmm.rst +++ b/Documentation/vm/hmm.rst @@ -360,7 +360,7 @@ between device driver specific code and shared common code: system memory page, locks the page with ``lock_page()``, and fills in the ``dst`` array entry with:: - dst[i] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + dst[i] = migrate_pfn(page_to_pfn(dpage)); Now that the driver knows that this page is being migrated, it can invalidate device private MMU mappings and copy device private memory diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index a7061ee3b157..28c436df9935 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -560,7 +560,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma, gpa, 0, page_shift); if (ret == U_SUCCESS) - *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED; + *mig.dst = migrate_pfn(pfn); else { unlock_page(dpage); __free_page(dpage); @@ -774,7 +774,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma, } } - *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + *mig.dst = migrate_pfn(page_to_pfn(dpage)); migrate_vma_pages(&mig); out_finalize: migrate_vma_finalize(&mig); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 6d8634e40b3b..d43bfd8b35ae 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -317,7 +317,6 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]); svm_migrate_get_vram_page(prange, migrate->dst[i]); migrate->dst[i] = migrate_pfn(migrate->dst[i]); - migrate->dst[i] |= MIGRATE_PFN_LOCKED; src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_TO_DEVICE); r = dma_mapping_error(dev, src[i]); @@ -610,7 +609,6 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); - migrate->dst[i] |= MIGRATE_PFN_LOCKED; j++; } diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 92987daa5e17..3828aafd3ac4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -166,7 +166,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm, goto error_dma_unmap; mutex_unlock(&svmm->mutex); - args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + args->dst[0] = migrate_pfn(page_to_pfn(dpage)); return 0; error_dma_unmap: @@ -602,7 +602,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT); if (src & MIGRATE_PFN_WRITE) *pfn |= NVIF_VMM_PFNMAP_V0_W; - return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + return migrate_pfn(page_to_pfn(dpage)); out_dma_unmap: dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); diff --git a/include/linux/migrate.h b/include/linux/migrate.h index eeb818c4fc78..4850cc5bf813 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -110,7 +110,6 @@ static inline int migrate_misplaced_page(struct page *page, */ #define MIGRATE_PFN_VALID (1UL << 0) #define MIGRATE_PFN_MIGRATE (1UL << 1) -#define MIGRATE_PFN_LOCKED (1UL << 2) #define MIGRATE_PFN_WRITE (1UL << 3) #define MIGRATE_PFN_SHIFT 6 diff --git a/lib/test_hmm.c b/lib/test_hmm.c index c259842f6d44..e2ce8f9b7605 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -613,8 +613,7 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args, */ rpage->zone_device_data = dmirror; - *dst = migrate_pfn(page_to_pfn(dpage)) | - MIGRATE_PFN_LOCKED; + *dst = migrate_pfn(page_to_pfn(dpage)); if ((*src & MIGRATE_PFN_WRITE) || (!spage && args->vma->vm_flags & VM_WRITE)) *dst |= MIGRATE_PFN_WRITE; @@ -1137,7 +1136,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args, lock_page(dpage); xa_erase(&dmirror->pt, addr >> PAGE_SHIFT); copy_highpage(dpage, spage); - *dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + *dst = migrate_pfn(page_to_pfn(dpage)); if (*src & MIGRATE_PFN_WRITE) *dst |= MIGRATE_PFN_WRITE; } diff --git a/mm/migrate.c b/mm/migrate.c index 43dd88c7fcdc..cf25b00f03c8 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2362,7 +2362,6 @@ again: * can't be dropped from it). */ get_page(page); - migrate->cpages++; /* * Optimize for the common case where page is only mapped once @@ -2372,7 +2371,7 @@ again: if (trylock_page(page)) { pte_t swp_pte; - mpfn |= MIGRATE_PFN_LOCKED; + migrate->cpages++; ptep_get_and_clear(mm, addr, ptep); /* Setup special migration page table entry */ @@ -2406,6 +2405,9 @@ again: if (pte_present(pte)) unmapped++; + } else { + put_page(page); + mpfn = 0; } next: @@ -2510,15 +2512,17 @@ static bool migrate_vma_check_page(struct page *page) } /* - * migrate_vma_prepare() - lock pages and isolate them from the lru + * migrate_vma_unmap() - replace page mapping with special migration pte entry * @migrate: migrate struct containing all migration information * - * This locks pages that have been collected by migrate_vma_collect(). Once each - * page is locked it is isolated from the lru (for non-device pages). Finally, - * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be - * migrated by concurrent kernel threads. + * Isolate pages from the LRU and replace mappings (CPU page table pte) with a + * special migration pte entry and check if it has been pinned. Pinned pages are + * restored because we cannot migrate them. + * + * This is the last step before we call the device driver callback to allocate + * destination memory and copy contents of original page over to new page. */ -static void migrate_vma_prepare(struct migrate_vma *migrate) +static void migrate_vma_unmap(struct migrate_vma *migrate) { const unsigned long npages = migrate->npages; const unsigned long start = migrate->start; @@ -2527,32 +2531,12 @@ static void migrate_vma_prepare(struct migrate_vma *migrate) lru_add_drain(); - for (i = 0; (i < npages) && migrate->cpages; i++) { + for (i = 0; i < npages; i++) { struct page *page = migrate_pfn_to_page(migrate->src[i]); - bool remap = true; if (!page) continue; - if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) { - /* - * Because we are migrating several pages there can be - * a deadlock between 2 concurrent migration where each - * are waiting on each other page lock. - * - * Make migrate_vma() a best effort thing and backoff - * for any page we can not lock right away. - */ - if (!trylock_page(page)) { - migrate->src[i] = 0; - migrate->cpages--; - put_page(page); - continue; - } - remap = false; - migrate->src[i] |= MIGRATE_PFN_LOCKED; - } - /* ZONE_DEVICE pages are not on LRU */ if (!is_zone_device_page(page)) { if (!PageLRU(page) && allow_drain) { @@ -2562,16 +2546,9 @@ static void migrate_vma_prepare(struct migrate_vma *migrate) } if (isolate_lru_page(page)) { - if (remap) { - migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; - migrate->cpages--; - restore++; - } else { - migrate->src[i] = 0; - unlock_page(page); - migrate->cpages--; - put_page(page); - } + migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; + migrate->cpages--; + restore++; continue; } @@ -2579,80 +2556,20 @@ static void migrate_vma_prepare(struct migrate_vma *migrate) put_page(page); } - if (!migrate_vma_check_page(page)) { - if (remap) { - migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; - migrate->cpages--; - restore++; - - if (!is_zone_device_page(page)) { - get_page(page); - putback_lru_page(page); - } - } else { - migrate->src[i] = 0; - unlock_page(page); - migrate->cpages--; - - if (!is_zone_device_page(page)) - putback_lru_page(page); - else - put_page(page); - } - } - } - - for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) { - struct page *page = migrate_pfn_to_page(migrate->src[i]); - - if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) - continue; - - remove_migration_pte(page, migrate->vma, addr, page); - - migrate->src[i] = 0; - unlock_page(page); - put_page(page); - restore--; - } -} - -/* - * migrate_vma_unmap() - replace page mapping with special migration pte entry - * @migrate: migrate struct containing all migration information - * - * Replace page mapping (CPU page table pte) with a special migration pte entry - * and check again if it has been pinned. Pinned pages are restored because we - * cannot migrate them. - * - * This is the last step before we call the device driver callback to allocate - * destination memory and copy contents of original page over to new page. - */ -static void migrate_vma_unmap(struct migrate_vma *migrate) -{ - const unsigned long npages = migrate->npages; - const unsigned long start = migrate->start; - unsigned long addr, i, restore = 0; - - for (i = 0; i < npages; i++) { - struct page *page = migrate_pfn_to_page(migrate->src[i]); - - if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) - continue; - - if (page_mapped(page)) { + if (page_mapped(page)) try_to_migrate(page, 0); - if (page_mapped(page)) - goto restore; - } - if (migrate_vma_check_page(page)) + if (page_mapped(page) || !migrate_vma_check_page(page)) { + if (!is_zone_device_page(page)) { + get_page(page); + putback_lru_page(page); + } + + migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; + migrate->cpages--; + restore++; continue; - -restore: - migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; - migrate->cpages--; - restore++; + } } for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) { @@ -2665,12 +2582,8 @@ restore: migrate->src[i] = 0; unlock_page(page); + put_page(page); restore--; - - if (is_zone_device_page(page)) - put_page(page); - else - putback_lru_page(page); } } @@ -2693,8 +2606,8 @@ restore: * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE * flag set). Once these are allocated and copied, the caller must update each * corresponding entry in the dst array with the pfn value of the destination - * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set - * (destination pages must have their struct pages locked, via lock_page()). + * page and with MIGRATE_PFN_VALID. Destination pages must be locked via + * lock_page(). * * Note that the caller does not have to migrate all the pages that are marked * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from @@ -2763,8 +2676,6 @@ int migrate_vma_setup(struct migrate_vma *args) migrate_vma_collect(args); - if (args->cpages) - migrate_vma_prepare(args); if (args->cpages) migrate_vma_unmap(args); From 913ffbdd99856184b4e8004f984d7432dcb990cd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 10 Nov 2021 20:32:43 -0800 Subject: [PATCH 5/7] mm: unexport folio_memcg_{,un}lock Patch series "unexport memcg locking helpers". Neither the old page-based nor the new folio-based memcg locking helpers are used in modular code at all, so drop the exports. This patch (of 2): folio_memcg_{,un}lock are only used in built-in core mm code. Link: https://lkml.kernel.org/r/20210820095815.445392-1-hch@lst.de Link: https://lkml.kernel.org/r/20210820095815.445392-2-hch@lst.de Signed-off-by: Christoph Hellwig Cc: Johannes Weiner Cc: Michal Hocko Cc: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 508bcea7df56..070bcc647e66 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2058,7 +2058,6 @@ again: memcg->move_lock_task = current; memcg->move_lock_flags = flags; } -EXPORT_SYMBOL(folio_memcg_lock); void lock_page_memcg(struct page *page) { @@ -2092,7 +2091,6 @@ void folio_memcg_unlock(struct folio *folio) { __folio_memcg_unlock(folio_memcg(folio)); } -EXPORT_SYMBOL(folio_memcg_unlock); void unlock_page_memcg(struct page *page) { From ab2f9d2d3626a8f31bfe2f47746e04819d8ffe9c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 10 Nov 2021 20:32:46 -0800 Subject: [PATCH 6/7] mm: unexport {,un}lock_page_memcg These are only used in built-in core mm code. Link: https://lkml.kernel.org/r/20210820095815.445392-3-hch@lst.de Signed-off-by: Christoph Hellwig Acked-by: Johannes Weiner Cc: Michal Hocko Cc: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 070bcc647e66..781605e92015 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2063,7 +2063,6 @@ void lock_page_memcg(struct page *page) { folio_memcg_lock(page_folio(page)); } -EXPORT_SYMBOL(lock_page_memcg); static void __folio_memcg_unlock(struct mem_cgroup *memcg) { @@ -2096,7 +2095,6 @@ void unlock_page_memcg(struct page *page) { folio_memcg_unlock(page_folio(page)); } -EXPORT_SYMBOL(unlock_page_memcg); struct obj_stock { #ifdef CONFIG_MEMCG_KMEM From b873e986816a0b8408c177b2c52a6915cca8713c Mon Sep 17 00:00:00 2001 From: Kuan-Ying Lee Date: Wed, 10 Nov 2021 20:32:49 -0800 Subject: [PATCH 7/7] kasan: add kasan mode messages when kasan init There are multiple kasan modes. It makes sense that we add some messages to know which kasan mode is active when booting up [1]. Link: https://bugzilla.kernel.org/show_bug.cgi?id=212195 [1] Link: https://lkml.kernel.org/r/20211020094850.4113-1-Kuan-Ying.Lee@mediatek.com Signed-off-by: Kuan-Ying Lee Reviewed-by: Marco Elver Reviewed-by: David Hildenbrand Cc: Andrey Ryabinin Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Dmitry Vyukov Cc: Catalin Marinas Cc: Will Deacon Cc: Matthias Brugger Cc: Chinwen Chang Cc: Yee Lee Cc: Nicholas Tang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/mm/kasan_init.c | 2 +- mm/kasan/hw_tags.c | 14 +++++++++++++- mm/kasan/sw_tags.c | 2 +- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index ec276f75fa05..c12cd700598f 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -310,7 +310,7 @@ void __init kasan_init(void) kasan_init_depth(); #if defined(CONFIG_KASAN_GENERIC) /* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */ - pr_info("KernelAddressSanitizer initialized\n"); + pr_info("KernelAddressSanitizer initialized (generic)\n"); #endif } diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index dc892119e88f..7355cb534e4f 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -106,6 +106,16 @@ static int __init early_kasan_flag_stacktrace(char *arg) } early_param("kasan.stacktrace", early_kasan_flag_stacktrace); +static inline const char *kasan_mode_info(void) +{ + if (kasan_mode == KASAN_MODE_ASYNC) + return "async"; + else if (kasan_mode == KASAN_MODE_ASYMM) + return "asymm"; + else + return "sync"; +} + /* kasan_init_hw_tags_cpu() is called for each CPU. */ void kasan_init_hw_tags_cpu(void) { @@ -177,7 +187,9 @@ void __init kasan_init_hw_tags(void) break; } - pr_info("KernelAddressSanitizer initialized\n"); + pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, stacktrace=%s)\n", + kasan_mode_info(), + kasan_stack_collection_enabled() ? "on" : "off"); } void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags) diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c index bd3f540feb47..77f13f391b57 100644 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@ -42,7 +42,7 @@ void __init kasan_init_sw_tags(void) for_each_possible_cpu(cpu) per_cpu(prng_state, cpu) = (u32)get_cycles(); - pr_info("KernelAddressSanitizer initialized\n"); + pr_info("KernelAddressSanitizer initialized (sw-tags)\n"); } /*