mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
b2f557a21b
I conducted real-time testing and observed that madvise_cold_or_pageout_pte_range() causes significant latency under memory pressure, which can be effectively reduced by adding cond_resched() within the loop. I tested on the LicheePi 4A board using Cylictest for latency testing and Ftrace for latency tracing. The board uses TH1520 processor and has a memory size of 8GB. The kernel version is 6.5.0 with the PREEMPT_RT patch applied. The script I tested is as follows: echo wakeup_rt > /sys/kernel/tracing/current_tracer echo 1 > /sys/kernel/tracing/tracing_on echo 0 > /sys/kernel/tracing/tracing_max_latency stress-ng --vm 8 --vm-bytes 2G & cyclictest --mlockall --smp --priority=99 --distance=0 --duration=30m echo 0 > /sys/kernel/tracing/tracing_on cat /sys/kernel/tracing/trace The tracing results before modification are as follows: # tracer: wakeup_rt # # wakeup_rt latency trace v1.1.5 on 6.5.0-rt6-r1208-00003-g999d221864bf # -------------------------------------------------------------------- # latency: 2552 us, #6/6, CPU#3 | (M:preempt_rt VP:0, KP:0, SP:0 HP:0 #P:4) # ----------------- # | task: cyclictest-196 (uid:0 nice:0 policy:1 rt_prio:99) # ----------------- # # _--------=> CPU# # / _-------=> irqs-off/BH-disabled # | / _------=> need-resched # || / _-----=> need-resched-lazy # ||| / _----=> hardirq/softirq # |||| / _---=> preempt-depth # ||||| / _--=> preempt-lazy-depth # |||||| / _-=> migrate-disable # ||||||| / delay # cmd pid |||||||| time | caller # \ / |||||||| \ | / stress-n-206 3dn.h512 2us : 206:120:R + [003] 196: 0:R cyclictest stress-n-206 3dn.h512 7us : <stack trace> => __ftrace_trace_stack => __trace_stack => probe_wakeup => ttwu_do_activate => try_to_wake_up => wake_up_process => hrtimer_wakeup => __hrtimer_run_queues => hrtimer_interrupt => riscv_timer_interrupt => handle_percpu_devid_irq => generic_handle_domain_irq => riscv_intc_irq => handle_riscv_irq => do_irq stress-n-206 3dn.h512 9us#: 0 stress-n-206 3d...3.. 2544us : __schedule stress-n-206 3d...3.. 2545us : 206:120:R ==> [003] 196: 0:R cyclictest stress-n-206 3d...3.. 2551us : <stack trace> => __ftrace_trace_stack => __trace_stack => probe_wakeup_sched_switch => __schedule => preempt_schedule => migrate_enable => rt_spin_unlock => madvise_cold_or_pageout_pte_range => walk_pgd_range => __walk_page_range => walk_page_range => madvise_pageout => madvise_vma_behavior => do_madvise => sys_madvise => do_trap_ecall_u => ret_from_exception The tracing results after modification are as follows: # tracer: wakeup_rt # # wakeup_rt latency trace v1.1.5 on 6.5.0-rt6-r1208-00004-gca3876fc69a6-dirty # -------------------------------------------------------------------- # latency: 1689 us, #6/6, CPU#0 | (M:preempt_rt VP:0, KP:0, SP:0 HP:0 #P:4) # ----------------- # | task: cyclictest-217 (uid:0 nice:0 policy:1 rt_prio:99) # ----------------- # # _--------=> CPU# # / _-------=> irqs-off/BH-disabled # | / _------=> need-resched # || / _-----=> need-resched-lazy # ||| / _----=> hardirq/softirq # |||| / _---=> preempt-depth # ||||| / _--=> preempt-lazy-depth # |||||| / _-=> migrate-disable # ||||||| / delay # cmd pid |||||||| time | caller # \ / |||||||| \ | / stress-n-232 0dn.h413 1us+: 232:120:R + [000] 217: 0:R cyclictest stress-n-232 0dn.h413 12us : <stack trace> => __ftrace_trace_stack => __trace_stack => probe_wakeup => ttwu_do_activate => try_to_wake_up => wake_up_process => hrtimer_wakeup => __hrtimer_run_queues => hrtimer_interrupt => riscv_timer_interrupt => handle_percpu_devid_irq => generic_handle_domain_irq => riscv_intc_irq => handle_riscv_irq => do_irq stress-n-232 0dn.h413 19us#: 0 stress-n-232 0d...3.. 1671us : __schedule stress-n-232 0d...3.. 1676us+: 232:120:R ==> [000] 217: 0:R cyclictest stress-n-232 0d...3.. 1687us : <stack trace> => __ftrace_trace_stack => __trace_stack => probe_wakeup_sched_switch => __schedule => preempt_schedule => migrate_enable => free_unref_page_list => release_pages => free_pages_and_swap_cache => tlb_batch_pages_flush => tlb_flush_mmu => unmap_page_range => unmap_vmas => unmap_region => do_vmi_align_munmap.constprop.0 => do_vmi_munmap => __vm_munmap => sys_munmap => do_trap_ecall_u => ret_from_exception After the modification, the cause of maximum latency is no longer madvise_cold_or_pageout_pte_range(), so this modification can reduce the latency caused by madvise_cold_or_pageout_pte_range(). Currently the madvise_cold_or_pageout_pte_range() function exhibits significant latency under memory pressure, which can be effectively reduced by adding cond_resched() within the loop. When the batch_count reaches SWAP_CLUSTER_MAX, we reschedule the task to ensure fairness and avoid long lock holding times. Link: https://lkml.kernel.org/r/85363861af65fac66c7a98c251906afc0d9c8098.1695291046.git.wangjiexun@tinylab.org Signed-off-by: Jiexun Wang <wangjiexun@tinylab.org> Cc: Zhangjin Wu <falcon@tinylab.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
||
---|---|---|
.. | ||
damon | ||
kasan | ||
kfence | ||
kmsan | ||
backing-dev.c | ||
balloon_compaction.c | ||
bootmem_info.c | ||
cma_debug.c | ||
cma_sysfs.c | ||
cma.c | ||
cma.h | ||
compaction.c | ||
debug_page_alloc.c | ||
debug_page_ref.c | ||
debug_vm_pgtable.c | ||
debug.c | ||
dmapool_test.c | ||
dmapool.c | ||
early_ioremap.c | ||
fadvise.c | ||
fail_page_alloc.c | ||
failslab.c | ||
filemap.c | ||
folio-compat.c | ||
gup_test.c | ||
gup_test.h | ||
gup.c | ||
highmem.c | ||
hmm.c | ||
huge_memory.c | ||
hugetlb_cgroup.c | ||
hugetlb_vmemmap.c | ||
hugetlb_vmemmap.h | ||
hugetlb.c | ||
hwpoison-inject.c | ||
init-mm.c | ||
internal.h | ||
interval_tree.c | ||
io-mapping.c | ||
ioremap.c | ||
Kconfig | ||
Kconfig.debug | ||
khugepaged.c | ||
kmemleak.c | ||
ksm.c | ||
list_lru.c | ||
maccess.c | ||
madvise.c | ||
Makefile | ||
mapping_dirty_helpers.c | ||
memblock.c | ||
memcontrol.c | ||
memfd.c | ||
memory_hotplug.c | ||
memory-failure.c | ||
memory-tiers.c | ||
memory.c | ||
mempolicy.c | ||
mempool.c | ||
memremap.c | ||
memtest.c | ||
migrate_device.c | ||
migrate.c | ||
mincore.c | ||
mlock.c | ||
mm_init.c | ||
mm_slot.h | ||
mmap_lock.c | ||
mmap.c | ||
mmu_gather.c | ||
mmu_notifier.c | ||
mmzone.c | ||
mprotect.c | ||
mremap.c | ||
msync.c | ||
nommu.c | ||
oom_kill.c | ||
page_alloc.c | ||
page_counter.c | ||
page_ext.c | ||
page_idle.c | ||
page_io.c | ||
page_isolation.c | ||
page_owner.c | ||
page_poison.c | ||
page_reporting.c | ||
page_reporting.h | ||
page_table_check.c | ||
page_vma_mapped.c | ||
page-writeback.c | ||
pagewalk.c | ||
percpu-internal.h | ||
percpu-km.c | ||
percpu-stats.c | ||
percpu-vm.c | ||
percpu.c | ||
pgalloc-track.h | ||
pgtable-generic.c | ||
process_vm_access.c | ||
ptdump.c | ||
readahead.c | ||
rmap.c | ||
rodata_test.c | ||
secretmem.c | ||
shmem_quota.c | ||
shmem.c | ||
show_mem.c | ||
shrinker_debug.c | ||
shrinker.c | ||
shuffle.c | ||
shuffle.h | ||
slab_common.c | ||
slab.c | ||
slab.h | ||
slub.c | ||
sparse-vmemmap.c | ||
sparse.c | ||
swap_cgroup.c | ||
swap_slots.c | ||
swap_state.c | ||
swap.c | ||
swap.h | ||
swapfile.c | ||
truncate.c | ||
usercopy.c | ||
userfaultfd.c | ||
util.c | ||
vmalloc.c | ||
vmpressure.c | ||
vmscan.c | ||
vmstat.c | ||
workingset.c | ||
z3fold.c | ||
zbud.c | ||
zpool.c | ||
zsmalloc.c | ||
zswap.c |