mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
mm/mglru: improve reset_mm_stats()
struct lruvec* is already a field of struct lru_gen_mm_walk. Remove the parameter struct lruvec* into functions that already have access to struct lru_gen_mm_walk*. Also, we do not need to handle reset histogram stats when !should_walk_mmu(). Remove the call to reset_mm_stats() in iterate_mm_list_nowalk(). Link: https://lkml.kernel.org/r/20240214060538.3524462-4-kinseyho@google.com Signed-off-by: Kinsey Ho <kinseyho@google.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Donet Tom <donettom@linux.vnet.ibm.com> Cc: Yu Zhao <yuzhao@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
51973cc9e5
commit
2d823764fa
42
mm/vmscan.c
42
mm/vmscan.c
@ -2879,38 +2879,37 @@ static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk)
|
||||
|
||||
#endif
|
||||
|
||||
static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last)
|
||||
static void reset_mm_stats(struct lru_gen_mm_walk *walk, bool last)
|
||||
{
|
||||
int i;
|
||||
int hist;
|
||||
struct lruvec *lruvec = walk->lruvec;
|
||||
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
|
||||
|
||||
lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);
|
||||
|
||||
if (walk) {
|
||||
hist = lru_hist_from_seq(walk->max_seq);
|
||||
hist = lru_hist_from_seq(walk->max_seq);
|
||||
|
||||
for (i = 0; i < NR_MM_STATS; i++) {
|
||||
WRITE_ONCE(mm_state->stats[hist][i],
|
||||
mm_state->stats[hist][i] + walk->mm_stats[i]);
|
||||
walk->mm_stats[i] = 0;
|
||||
}
|
||||
for (i = 0; i < NR_MM_STATS; i++) {
|
||||
WRITE_ONCE(mm_state->stats[hist][i],
|
||||
mm_state->stats[hist][i] + walk->mm_stats[i]);
|
||||
walk->mm_stats[i] = 0;
|
||||
}
|
||||
|
||||
if (NR_HIST_GENS > 1 && last) {
|
||||
hist = lru_hist_from_seq(mm_state->seq + 1);
|
||||
hist = lru_hist_from_seq(walk->max_seq + 1);
|
||||
|
||||
for (i = 0; i < NR_MM_STATS; i++)
|
||||
WRITE_ONCE(mm_state->stats[hist][i], 0);
|
||||
}
|
||||
}
|
||||
|
||||
static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
|
||||
struct mm_struct **iter)
|
||||
static bool iterate_mm_list(struct lru_gen_mm_walk *walk, struct mm_struct **iter)
|
||||
{
|
||||
bool first = false;
|
||||
bool last = false;
|
||||
struct mm_struct *mm = NULL;
|
||||
struct lruvec *lruvec = walk->lruvec;
|
||||
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
|
||||
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
|
||||
@ -2954,7 +2953,7 @@ static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
|
||||
} while (!(mm = get_next_mm(walk)));
|
||||
done:
|
||||
if (*iter || last)
|
||||
reset_mm_stats(lruvec, walk, last);
|
||||
reset_mm_stats(walk, last);
|
||||
|
||||
spin_unlock(&mm_list->lock);
|
||||
|
||||
@ -2984,7 +2983,6 @@ static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq)
|
||||
mm_state->head = NULL;
|
||||
mm_state->tail = NULL;
|
||||
WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
|
||||
reset_mm_stats(lruvec, NULL, true);
|
||||
success = true;
|
||||
}
|
||||
|
||||
@ -3159,9 +3157,10 @@ static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio,
|
||||
walk->nr_pages[new_gen][type][zone] += delta;
|
||||
}
|
||||
|
||||
static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk)
|
||||
static void reset_batch_size(struct lru_gen_mm_walk *walk)
|
||||
{
|
||||
int gen, type, zone;
|
||||
struct lruvec *lruvec = walk->lruvec;
|
||||
struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
|
||||
walk->batched = 0;
|
||||
@ -3591,7 +3590,7 @@ done:
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_mm_walk *walk)
|
||||
static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
|
||||
{
|
||||
static const struct mm_walk_ops mm_walk_ops = {
|
||||
.test_walk = should_skip_vma,
|
||||
@ -3600,6 +3599,7 @@ static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_
|
||||
};
|
||||
|
||||
int err;
|
||||
struct lruvec *lruvec = walk->lruvec;
|
||||
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
|
||||
walk->next_addr = FIRST_USER_ADDRESS;
|
||||
@ -3628,7 +3628,7 @@ static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_
|
||||
|
||||
if (walk->batched) {
|
||||
spin_lock_irq(&lruvec->lru_lock);
|
||||
reset_batch_size(lruvec, walk);
|
||||
reset_batch_size(walk);
|
||||
spin_unlock_irq(&lruvec->lru_lock);
|
||||
}
|
||||
|
||||
@ -3856,9 +3856,9 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
|
||||
walk->force_scan = force_scan;
|
||||
|
||||
do {
|
||||
success = iterate_mm_list(lruvec, walk, &mm);
|
||||
success = iterate_mm_list(walk, &mm);
|
||||
if (mm)
|
||||
walk_mm(lruvec, mm, walk);
|
||||
walk_mm(mm, walk);
|
||||
} while (mm);
|
||||
done:
|
||||
if (success) {
|
||||
@ -4558,8 +4558,10 @@ retry:
|
||||
move_folios_to_lru(lruvec, &list);
|
||||
|
||||
walk = current->reclaim_state->mm_walk;
|
||||
if (walk && walk->batched)
|
||||
reset_batch_size(lruvec, walk);
|
||||
if (walk && walk->batched) {
|
||||
walk->lruvec = lruvec;
|
||||
reset_batch_size(walk);
|
||||
}
|
||||
|
||||
item = PGSTEAL_KSWAPD + reclaimer_offset();
|
||||
if (!cgroup_reclaim(sc))
|
||||
|
Loading…
Reference in New Issue
Block a user