mirror of
https://github.com/torvalds/linux.git
synced 2024-11-13 07:31:45 +00:00
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "The post-linux-next material. 7 patches. Subsystems affected by this patch series (all mm): debug, slab-generic, migration, memcg, and kasan" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: kasan: add kasan mode messages when kasan init mm: unexport {,un}lock_page_memcg mm: unexport folio_memcg_{,un}lock mm/migrate.c: remove MIGRATE_PFN_LOCKED mm: migrate: simplify the file-backed pages validation when migrating its mapping mm: allow only SLUB on PREEMPT_RT mm/page_owner.c: modify the type of argument "order" in some functions
This commit is contained in:
commit
dbf4989618
@ -360,7 +360,7 @@ between device driver specific code and shared common code:
|
||||
system memory page, locks the page with ``lock_page()``, and fills in the
|
||||
``dst`` array entry with::
|
||||
|
||||
dst[i] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
|
||||
dst[i] = migrate_pfn(page_to_pfn(dpage));
|
||||
|
||||
Now that the driver knows that this page is being migrated, it can
|
||||
invalidate device private MMU mappings and copy device private memory
|
||||
|
@ -310,7 +310,7 @@ void __init kasan_init(void)
|
||||
kasan_init_depth();
|
||||
#if defined(CONFIG_KASAN_GENERIC)
|
||||
/* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
|
||||
pr_info("KernelAddressSanitizer initialized\n");
|
||||
pr_info("KernelAddressSanitizer initialized (generic)\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -560,7 +560,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
|
||||
gpa, 0, page_shift);
|
||||
|
||||
if (ret == U_SUCCESS)
|
||||
*mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
|
||||
*mig.dst = migrate_pfn(pfn);
|
||||
else {
|
||||
unlock_page(dpage);
|
||||
__free_page(dpage);
|
||||
@ -774,7 +774,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma,
|
||||
}
|
||||
}
|
||||
|
||||
*mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
|
||||
*mig.dst = migrate_pfn(page_to_pfn(dpage));
|
||||
migrate_vma_pages(&mig);
|
||||
out_finalize:
|
||||
migrate_vma_finalize(&mig);
|
||||
|
@ -317,7 +317,6 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
|
||||
svm_migrate_get_vram_page(prange, migrate->dst[i]);
|
||||
migrate->dst[i] = migrate_pfn(migrate->dst[i]);
|
||||
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
|
||||
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
|
||||
DMA_TO_DEVICE);
|
||||
r = dma_mapping_error(dev, src[i]);
|
||||
@ -610,7 +609,6 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
|
||||
|
||||
migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
|
||||
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
|
||||
j++;
|
||||
}
|
||||
|
||||
|
@ -166,7 +166,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
|
||||
goto error_dma_unmap;
|
||||
mutex_unlock(&svmm->mutex);
|
||||
|
||||
args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
|
||||
args->dst[0] = migrate_pfn(page_to_pfn(dpage));
|
||||
return 0;
|
||||
|
||||
error_dma_unmap:
|
||||
@ -602,7 +602,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
|
||||
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
|
||||
if (src & MIGRATE_PFN_WRITE)
|
||||
*pfn |= NVIF_VMM_PFNMAP_V0_W;
|
||||
return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
|
||||
return migrate_pfn(page_to_pfn(dpage));
|
||||
|
||||
out_dma_unmap:
|
||||
dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
|
@ -110,7 +110,6 @@ static inline int migrate_misplaced_page(struct page *page,
|
||||
*/
|
||||
#define MIGRATE_PFN_VALID (1UL << 0)
|
||||
#define MIGRATE_PFN_MIGRATE (1UL << 1)
|
||||
#define MIGRATE_PFN_LOCKED (1UL << 2)
|
||||
#define MIGRATE_PFN_WRITE (1UL << 3)
|
||||
#define MIGRATE_PFN_SHIFT 6
|
||||
|
||||
|
@ -8,9 +8,9 @@
|
||||
extern struct static_key_false page_owner_inited;
|
||||
extern struct page_ext_operations page_owner_ops;
|
||||
|
||||
extern void __reset_page_owner(struct page *page, unsigned int order);
|
||||
extern void __reset_page_owner(struct page *page, unsigned short order);
|
||||
extern void __set_page_owner(struct page *page,
|
||||
unsigned int order, gfp_t gfp_mask);
|
||||
unsigned short order, gfp_t gfp_mask);
|
||||
extern void __split_page_owner(struct page *page, unsigned int nr);
|
||||
extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
|
||||
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
|
||||
@ -18,14 +18,14 @@ extern void __dump_page_owner(const struct page *page);
|
||||
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
|
||||
pg_data_t *pgdat, struct zone *zone);
|
||||
|
||||
static inline void reset_page_owner(struct page *page, unsigned int order)
|
||||
static inline void reset_page_owner(struct page *page, unsigned short order)
|
||||
{
|
||||
if (static_branch_unlikely(&page_owner_inited))
|
||||
__reset_page_owner(page, order);
|
||||
}
|
||||
|
||||
static inline void set_page_owner(struct page *page,
|
||||
unsigned int order, gfp_t gfp_mask)
|
||||
unsigned short order, gfp_t gfp_mask)
|
||||
{
|
||||
if (static_branch_unlikely(&page_owner_inited))
|
||||
__set_page_owner(page, order, gfp_mask);
|
||||
@ -52,7 +52,7 @@ static inline void dump_page_owner(const struct page *page)
|
||||
__dump_page_owner(page);
|
||||
}
|
||||
#else
|
||||
static inline void reset_page_owner(struct page *page, unsigned int order)
|
||||
static inline void reset_page_owner(struct page *page, unsigned short order)
|
||||
{
|
||||
}
|
||||
static inline void set_page_owner(struct page *page,
|
||||
@ -60,7 +60,7 @@ static inline void set_page_owner(struct page *page,
|
||||
{
|
||||
}
|
||||
static inline void split_page_owner(struct page *page,
|
||||
unsigned int order)
|
||||
unsigned short order)
|
||||
{
|
||||
}
|
||||
static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
|
||||
|
@ -1896,6 +1896,7 @@ choice
|
||||
|
||||
config SLAB
|
||||
bool "SLAB"
|
||||
depends on !PREEMPT_RT
|
||||
select HAVE_HARDENED_USERCOPY_ALLOCATOR
|
||||
help
|
||||
The regular slab allocator that is established and known to work
|
||||
@ -1916,6 +1917,7 @@ config SLUB
|
||||
config SLOB
|
||||
depends on EXPERT
|
||||
bool "SLOB (Simple Allocator)"
|
||||
depends on !PREEMPT_RT
|
||||
help
|
||||
SLOB replaces the stock allocator with a drastically simpler
|
||||
allocator. SLOB is generally more space efficient but
|
||||
|
@ -613,8 +613,7 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
|
||||
*/
|
||||
rpage->zone_device_data = dmirror;
|
||||
|
||||
*dst = migrate_pfn(page_to_pfn(dpage)) |
|
||||
MIGRATE_PFN_LOCKED;
|
||||
*dst = migrate_pfn(page_to_pfn(dpage));
|
||||
if ((*src & MIGRATE_PFN_WRITE) ||
|
||||
(!spage && args->vma->vm_flags & VM_WRITE))
|
||||
*dst |= MIGRATE_PFN_WRITE;
|
||||
@ -1137,7 +1136,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
|
||||
lock_page(dpage);
|
||||
xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
|
||||
copy_highpage(dpage, spage);
|
||||
*dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
|
||||
*dst = migrate_pfn(page_to_pfn(dpage));
|
||||
if (*src & MIGRATE_PFN_WRITE)
|
||||
*dst |= MIGRATE_PFN_WRITE;
|
||||
}
|
||||
|
@ -106,6 +106,16 @@ static int __init early_kasan_flag_stacktrace(char *arg)
|
||||
}
|
||||
early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
|
||||
|
||||
static inline const char *kasan_mode_info(void)
|
||||
{
|
||||
if (kasan_mode == KASAN_MODE_ASYNC)
|
||||
return "async";
|
||||
else if (kasan_mode == KASAN_MODE_ASYMM)
|
||||
return "asymm";
|
||||
else
|
||||
return "sync";
|
||||
}
|
||||
|
||||
/* kasan_init_hw_tags_cpu() is called for each CPU. */
|
||||
void kasan_init_hw_tags_cpu(void)
|
||||
{
|
||||
@ -177,7 +187,9 @@ void __init kasan_init_hw_tags(void)
|
||||
break;
|
||||
}
|
||||
|
||||
pr_info("KernelAddressSanitizer initialized\n");
|
||||
pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, stacktrace=%s)\n",
|
||||
kasan_mode_info(),
|
||||
kasan_stack_collection_enabled() ? "on" : "off");
|
||||
}
|
||||
|
||||
void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
|
||||
|
@ -42,7 +42,7 @@ void __init kasan_init_sw_tags(void)
|
||||
for_each_possible_cpu(cpu)
|
||||
per_cpu(prng_state, cpu) = (u32)get_cycles();
|
||||
|
||||
pr_info("KernelAddressSanitizer initialized\n");
|
||||
pr_info("KernelAddressSanitizer initialized (sw-tags)\n");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2058,13 +2058,11 @@ again:
|
||||
memcg->move_lock_task = current;
|
||||
memcg->move_lock_flags = flags;
|
||||
}
|
||||
EXPORT_SYMBOL(folio_memcg_lock);
|
||||
|
||||
void lock_page_memcg(struct page *page)
|
||||
{
|
||||
folio_memcg_lock(page_folio(page));
|
||||
}
|
||||
EXPORT_SYMBOL(lock_page_memcg);
|
||||
|
||||
static void __folio_memcg_unlock(struct mem_cgroup *memcg)
|
||||
{
|
||||
@ -2092,13 +2090,11 @@ void folio_memcg_unlock(struct folio *folio)
|
||||
{
|
||||
__folio_memcg_unlock(folio_memcg(folio));
|
||||
}
|
||||
EXPORT_SYMBOL(folio_memcg_unlock);
|
||||
|
||||
void unlock_page_memcg(struct page *page)
|
||||
{
|
||||
folio_memcg_unlock(page_folio(page));
|
||||
}
|
||||
EXPORT_SYMBOL(unlock_page_memcg);
|
||||
|
||||
struct obj_stock {
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
|
155
mm/migrate.c
155
mm/migrate.c
@ -404,12 +404,6 @@ int folio_migrate_mapping(struct address_space *mapping,
|
||||
newzone = folio_zone(newfolio);
|
||||
|
||||
xas_lock_irq(&xas);
|
||||
if (folio_ref_count(folio) != expected_count ||
|
||||
xas_load(&xas) != folio) {
|
||||
xas_unlock_irq(&xas);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (!folio_ref_freeze(folio, expected_count)) {
|
||||
xas_unlock_irq(&xas);
|
||||
return -EAGAIN;
|
||||
@ -2368,7 +2362,6 @@ again:
|
||||
* can't be dropped from it).
|
||||
*/
|
||||
get_page(page);
|
||||
migrate->cpages++;
|
||||
|
||||
/*
|
||||
* Optimize for the common case where page is only mapped once
|
||||
@ -2378,7 +2371,7 @@ again:
|
||||
if (trylock_page(page)) {
|
||||
pte_t swp_pte;
|
||||
|
||||
mpfn |= MIGRATE_PFN_LOCKED;
|
||||
migrate->cpages++;
|
||||
ptep_get_and_clear(mm, addr, ptep);
|
||||
|
||||
/* Setup special migration page table entry */
|
||||
@ -2412,6 +2405,9 @@ again:
|
||||
|
||||
if (pte_present(pte))
|
||||
unmapped++;
|
||||
} else {
|
||||
put_page(page);
|
||||
mpfn = 0;
|
||||
}
|
||||
|
||||
next:
|
||||
@ -2516,15 +2512,17 @@ static bool migrate_vma_check_page(struct page *page)
|
||||
}
|
||||
|
||||
/*
|
||||
* migrate_vma_prepare() - lock pages and isolate them from the lru
|
||||
* migrate_vma_unmap() - replace page mapping with special migration pte entry
|
||||
* @migrate: migrate struct containing all migration information
|
||||
*
|
||||
* This locks pages that have been collected by migrate_vma_collect(). Once each
|
||||
* page is locked it is isolated from the lru (for non-device pages). Finally,
|
||||
* the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
|
||||
* migrated by concurrent kernel threads.
|
||||
* Isolate pages from the LRU and replace mappings (CPU page table pte) with a
|
||||
* special migration pte entry and check if it has been pinned. Pinned pages are
|
||||
* restored because we cannot migrate them.
|
||||
*
|
||||
* This is the last step before we call the device driver callback to allocate
|
||||
* destination memory and copy contents of original page over to new page.
|
||||
*/
|
||||
static void migrate_vma_prepare(struct migrate_vma *migrate)
|
||||
static void migrate_vma_unmap(struct migrate_vma *migrate)
|
||||
{
|
||||
const unsigned long npages = migrate->npages;
|
||||
const unsigned long start = migrate->start;
|
||||
@ -2533,32 +2531,12 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
|
||||
|
||||
lru_add_drain();
|
||||
|
||||
for (i = 0; (i < npages) && migrate->cpages; i++) {
|
||||
for (i = 0; i < npages; i++) {
|
||||
struct page *page = migrate_pfn_to_page(migrate->src[i]);
|
||||
bool remap = true;
|
||||
|
||||
if (!page)
|
||||
continue;
|
||||
|
||||
if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
|
||||
/*
|
||||
* Because we are migrating several pages there can be
|
||||
* a deadlock between 2 concurrent migration where each
|
||||
* are waiting on each other page lock.
|
||||
*
|
||||
* Make migrate_vma() a best effort thing and backoff
|
||||
* for any page we can not lock right away.
|
||||
*/
|
||||
if (!trylock_page(page)) {
|
||||
migrate->src[i] = 0;
|
||||
migrate->cpages--;
|
||||
put_page(page);
|
||||
continue;
|
||||
}
|
||||
remap = false;
|
||||
migrate->src[i] |= MIGRATE_PFN_LOCKED;
|
||||
}
|
||||
|
||||
/* ZONE_DEVICE pages are not on LRU */
|
||||
if (!is_zone_device_page(page)) {
|
||||
if (!PageLRU(page) && allow_drain) {
|
||||
@ -2568,16 +2546,9 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
|
||||
}
|
||||
|
||||
if (isolate_lru_page(page)) {
|
||||
if (remap) {
|
||||
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
|
||||
migrate->cpages--;
|
||||
restore++;
|
||||
} else {
|
||||
migrate->src[i] = 0;
|
||||
unlock_page(page);
|
||||
migrate->cpages--;
|
||||
put_page(page);
|
||||
}
|
||||
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
|
||||
migrate->cpages--;
|
||||
restore++;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -2585,80 +2556,20 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
if (!migrate_vma_check_page(page)) {
|
||||
if (remap) {
|
||||
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
|
||||
migrate->cpages--;
|
||||
restore++;
|
||||
|
||||
if (!is_zone_device_page(page)) {
|
||||
get_page(page);
|
||||
putback_lru_page(page);
|
||||
}
|
||||
} else {
|
||||
migrate->src[i] = 0;
|
||||
unlock_page(page);
|
||||
migrate->cpages--;
|
||||
|
||||
if (!is_zone_device_page(page))
|
||||
putback_lru_page(page);
|
||||
else
|
||||
put_page(page);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
|
||||
struct page *page = migrate_pfn_to_page(migrate->src[i]);
|
||||
|
||||
if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
|
||||
continue;
|
||||
|
||||
remove_migration_pte(page, migrate->vma, addr, page);
|
||||
|
||||
migrate->src[i] = 0;
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
restore--;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* migrate_vma_unmap() - replace page mapping with special migration pte entry
|
||||
* @migrate: migrate struct containing all migration information
|
||||
*
|
||||
* Replace page mapping (CPU page table pte) with a special migration pte entry
|
||||
* and check again if it has been pinned. Pinned pages are restored because we
|
||||
* cannot migrate them.
|
||||
*
|
||||
* This is the last step before we call the device driver callback to allocate
|
||||
* destination memory and copy contents of original page over to new page.
|
||||
*/
|
||||
static void migrate_vma_unmap(struct migrate_vma *migrate)
|
||||
{
|
||||
const unsigned long npages = migrate->npages;
|
||||
const unsigned long start = migrate->start;
|
||||
unsigned long addr, i, restore = 0;
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
struct page *page = migrate_pfn_to_page(migrate->src[i]);
|
||||
|
||||
if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
|
||||
continue;
|
||||
|
||||
if (page_mapped(page)) {
|
||||
if (page_mapped(page))
|
||||
try_to_migrate(page, 0);
|
||||
if (page_mapped(page))
|
||||
goto restore;
|
||||
}
|
||||
|
||||
if (migrate_vma_check_page(page))
|
||||
if (page_mapped(page) || !migrate_vma_check_page(page)) {
|
||||
if (!is_zone_device_page(page)) {
|
||||
get_page(page);
|
||||
putback_lru_page(page);
|
||||
}
|
||||
|
||||
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
|
||||
migrate->cpages--;
|
||||
restore++;
|
||||
continue;
|
||||
|
||||
restore:
|
||||
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
|
||||
migrate->cpages--;
|
||||
restore++;
|
||||
}
|
||||
}
|
||||
|
||||
for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
|
||||
@ -2671,12 +2582,8 @@ restore:
|
||||
|
||||
migrate->src[i] = 0;
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
restore--;
|
||||
|
||||
if (is_zone_device_page(page))
|
||||
put_page(page);
|
||||
else
|
||||
putback_lru_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2699,8 +2606,8 @@ restore:
|
||||
* it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
|
||||
* flag set). Once these are allocated and copied, the caller must update each
|
||||
* corresponding entry in the dst array with the pfn value of the destination
|
||||
* page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
|
||||
* (destination pages must have their struct pages locked, via lock_page()).
|
||||
* page and with MIGRATE_PFN_VALID. Destination pages must be locked via
|
||||
* lock_page().
|
||||
*
|
||||
* Note that the caller does not have to migrate all the pages that are marked
|
||||
* with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
|
||||
@ -2769,8 +2676,6 @@ int migrate_vma_setup(struct migrate_vma *args)
|
||||
|
||||
migrate_vma_collect(args);
|
||||
|
||||
if (args->cpages)
|
||||
migrate_vma_prepare(args);
|
||||
if (args->cpages)
|
||||
migrate_vma_unmap(args);
|
||||
|
||||
|
@ -125,7 +125,7 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
|
||||
return handle;
|
||||
}
|
||||
|
||||
void __reset_page_owner(struct page *page, unsigned int order)
|
||||
void __reset_page_owner(struct page *page, unsigned short order)
|
||||
{
|
||||
int i;
|
||||
struct page_ext *page_ext;
|
||||
@ -149,7 +149,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
|
||||
|
||||
static inline void __set_page_owner_handle(struct page_ext *page_ext,
|
||||
depot_stack_handle_t handle,
|
||||
unsigned int order, gfp_t gfp_mask)
|
||||
unsigned short order, gfp_t gfp_mask)
|
||||
{
|
||||
struct page_owner *page_owner;
|
||||
int i;
|
||||
@ -169,7 +169,7 @@ static inline void __set_page_owner_handle(struct page_ext *page_ext,
|
||||
}
|
||||
}
|
||||
|
||||
noinline void __set_page_owner(struct page *page, unsigned int order,
|
||||
noinline void __set_page_owner(struct page *page, unsigned short order,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
|
Loading…
Reference in New Issue
Block a user