mm: remove the implementation of swap_free() and always use swap_free_nr()

To streamline maintenance efforts, we propose removing the implementation
of swap_free().  Instead, we can simply invoke swap_free_nr() with nr set
to 1.  swap_free_nr() is designed with a bitmap consisting of only one
long, resulting in overhead that can be ignored for cases where nr equals
1.

A prime candidate for leveraging swap_free_nr() lies within
kernel/power/swap.c.  Implementing this change facilitates the adoption of
batch processing for hibernation.

Link: https://lkml.kernel.org/r/20240529082824.150954-3-21cnbao@gmail.com
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
Suggested-by: "Huang, Ying" <ying.huang@intel.com>
Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
Acked-by: Chris Li <chrisl@kernel.org>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Cc: "Rafael J. Wysocki" <rafael@kernel.org>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Len Brown <len.brown@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Chuanhua Han <hanchuanhua@oppo.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Gao Xiang <xiang@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kairui Song <kasong@tencent.com>
Cc: Khalid Aziz <khalid.aziz@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Barry Song 2024-05-29 20:28:20 +12:00 committed by Andrew Morton
parent ebfba00451
commit 54f7a49c20
3 changed files with 11 additions and 21 deletions

View File

@ -477,7 +477,6 @@ extern int add_swap_count_continuation(swp_entry_t, gfp_t);
extern void swap_shmem_alloc(swp_entry_t);
extern int swap_duplicate(swp_entry_t);
extern int swapcache_prepare(swp_entry_t);
extern void swap_free(swp_entry_t);
extern void swap_free_nr(swp_entry_t entry, int nr_pages);
extern void swapcache_free_entries(swp_entry_t *entries, int n);
extern void free_swap_and_cache_nr(swp_entry_t entry, int nr);
@ -556,10 +555,6 @@ static inline int swapcache_prepare(swp_entry_t swp)
return 0;
}
static inline void swap_free(swp_entry_t swp)
{
}
static inline void swap_free_nr(swp_entry_t entry, int nr_pages)
{
}
@ -608,6 +603,11 @@ static inline void free_swap_and_cache(swp_entry_t entry)
free_swap_and_cache_nr(entry, 1);
}
static inline void swap_free(swp_entry_t entry)
{
swap_free_nr(entry, 1);
}
#ifdef CONFIG_MEMCG
static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
{

View File

@ -200,12 +200,11 @@ void free_all_swap_pages(int swap)
while ((node = swsusp_extents.rb_node)) {
struct swsusp_extent *ext;
unsigned long offset;
ext = rb_entry(node, struct swsusp_extent, node);
rb_erase(node, &swsusp_extents);
for (offset = ext->start; offset <= ext->end; offset++)
swap_free(swp_entry(swap, offset));
swap_free_nr(swp_entry(swap, ext->start),
ext->end - ext->start + 1);
kfree(ext);
}

View File

@ -1343,19 +1343,6 @@ static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
swap_range_free(p, offset, 1);
}
/*
* Caller has made sure that the swap device corresponding to entry
* is still around or has not been recycled.
*/
void swap_free(swp_entry_t entry)
{
struct swap_info_struct *p;
p = _swap_info_get(entry);
if (p)
__swap_entry_free(p, entry);
}
static void cluster_swap_free_nr(struct swap_info_struct *sis,
unsigned long offset, int nr_pages)
{
@ -1385,6 +1372,10 @@ static void cluster_swap_free_nr(struct swap_info_struct *sis,
unlock_cluster_or_swap_info(sis, ci);
}
/*
* Caller has made sure that the swap device corresponding to entry
* is still around or has not been recycled.
*/
void swap_free_nr(swp_entry_t entry, int nr_pages)
{
int nr;