mirror of
https://github.com/torvalds/linux.git
synced 2024-12-24 11:51:27 +00:00
zsmalloc: introduce some helper functions
Patch series "zsmalloc: remove bit_spin_lock", v2. zsmalloc uses bit_spin_lock to minimize space overhead since it's zpage granularity lock. However, it causes zsmalloc non-working under PREEMPT_RT as well as adding too much complication. This patchset tries to replace the bit_spin_lock with per-pool rwlock. It also removes unnecessary zspage isolation logic from class, which was the other part too much complication added into zsmalloc. Last patch changes the get_cpu_var to local_lock to make it work in PREEMPT_RT. This patch (of 9): get_zspage_mapping returns fullness as well as class_idx. However, the fullness is usually not used since it could be stale in some contexts. It causes misleading as well as unnecessary instructions so this patch introduces zspage_class. obj_to_location also produces page and index but we don't need always the index, either so this patch introduces obj_to_page. Link: https://lkml.kernel.org/r/20211115185909.3949505-1-minchan@kernel.org Link: https://lkml.kernel.org/r/20211115185909.3949505-2-minchan@kernel.org Signed-off-by: Minchan Kim <minchan@kernel.org> Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1622ed7d07
commit
67f1c9cd0c
@ -517,6 +517,12 @@ static void get_zspage_mapping(struct zspage *zspage,
|
||||
*class_idx = zspage->class;
|
||||
}
|
||||
|
||||
static struct size_class *zspage_class(struct zs_pool *pool,
|
||||
struct zspage *zspage)
|
||||
{
|
||||
return pool->size_class[zspage->class];
|
||||
}
|
||||
|
||||
static void set_zspage_mapping(struct zspage *zspage,
|
||||
unsigned int class_idx,
|
||||
enum fullness_group fullness)
|
||||
@ -844,6 +850,12 @@ static void obj_to_location(unsigned long obj, struct page **page,
|
||||
*obj_idx = (obj & OBJ_INDEX_MASK);
|
||||
}
|
||||
|
||||
static void obj_to_page(unsigned long obj, struct page **page)
|
||||
{
|
||||
obj >>= OBJ_TAG_BITS;
|
||||
*page = pfn_to_page(obj >> OBJ_INDEX_BITS);
|
||||
}
|
||||
|
||||
/**
|
||||
* location_to_obj - get obj value encoded from (<page>, <obj_idx>)
|
||||
* @page: page object resides in zspage
|
||||
@ -1246,8 +1258,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
|
||||
unsigned long obj, off;
|
||||
unsigned int obj_idx;
|
||||
|
||||
unsigned int class_idx;
|
||||
enum fullness_group fg;
|
||||
struct size_class *class;
|
||||
struct mapping_area *area;
|
||||
struct page *pages[2];
|
||||
@ -1270,8 +1280,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
|
||||
/* migration cannot move any subpage in this zspage */
|
||||
migrate_read_lock(zspage);
|
||||
|
||||
get_zspage_mapping(zspage, &class_idx, &fg);
|
||||
class = pool->size_class[class_idx];
|
||||
class = zspage_class(pool, zspage);
|
||||
off = (class->size * obj_idx) & ~PAGE_MASK;
|
||||
|
||||
area = &get_cpu_var(zs_map_area);
|
||||
@ -1304,16 +1313,13 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
|
||||
unsigned long obj, off;
|
||||
unsigned int obj_idx;
|
||||
|
||||
unsigned int class_idx;
|
||||
enum fullness_group fg;
|
||||
struct size_class *class;
|
||||
struct mapping_area *area;
|
||||
|
||||
obj = handle_to_obj(handle);
|
||||
obj_to_location(obj, &page, &obj_idx);
|
||||
zspage = get_zspage(page);
|
||||
get_zspage_mapping(zspage, &class_idx, &fg);
|
||||
class = pool->size_class[class_idx];
|
||||
class = zspage_class(pool, zspage);
|
||||
off = (class->size * obj_idx) & ~PAGE_MASK;
|
||||
|
||||
area = this_cpu_ptr(&zs_map_area);
|
||||
@ -1491,8 +1497,6 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
|
||||
struct zspage *zspage;
|
||||
struct page *f_page;
|
||||
unsigned long obj;
|
||||
unsigned int f_objidx;
|
||||
int class_idx;
|
||||
struct size_class *class;
|
||||
enum fullness_group fullness;
|
||||
bool isolated;
|
||||
@ -1502,13 +1506,11 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
|
||||
|
||||
pin_tag(handle);
|
||||
obj = handle_to_obj(handle);
|
||||
obj_to_location(obj, &f_page, &f_objidx);
|
||||
obj_to_page(obj, &f_page);
|
||||
zspage = get_zspage(f_page);
|
||||
|
||||
migrate_read_lock(zspage);
|
||||
|
||||
get_zspage_mapping(zspage, &class_idx, &fullness);
|
||||
class = pool->size_class[class_idx];
|
||||
class = zspage_class(pool, zspage);
|
||||
|
||||
spin_lock(&class->lock);
|
||||
obj_free(class, obj);
|
||||
@ -1866,8 +1868,6 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
|
||||
{
|
||||
struct zs_pool *pool;
|
||||
struct size_class *class;
|
||||
int class_idx;
|
||||
enum fullness_group fullness;
|
||||
struct zspage *zspage;
|
||||
struct address_space *mapping;
|
||||
|
||||
@ -1880,15 +1880,10 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
|
||||
|
||||
zspage = get_zspage(page);
|
||||
|
||||
/*
|
||||
* Without class lock, fullness could be stale while class_idx is okay
|
||||
* because class_idx is constant unless page is freed so we should get
|
||||
* fullness again under class lock.
|
||||
*/
|
||||
get_zspage_mapping(zspage, &class_idx, &fullness);
|
||||
mapping = page_mapping(page);
|
||||
pool = mapping->private_data;
|
||||
class = pool->size_class[class_idx];
|
||||
|
||||
class = zspage_class(pool, zspage);
|
||||
|
||||
spin_lock(&class->lock);
|
||||
if (get_zspage_inuse(zspage) == 0) {
|
||||
@ -1907,6 +1902,9 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
|
||||
* size_class to prevent further object allocation from the zspage.
|
||||
*/
|
||||
if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
|
||||
enum fullness_group fullness;
|
||||
unsigned int class_idx;
|
||||
|
||||
get_zspage_mapping(zspage, &class_idx, &fullness);
|
||||
atomic_long_inc(&pool->isolated_pages);
|
||||
remove_zspage(class, zspage, fullness);
|
||||
@ -1923,8 +1921,6 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
|
||||
{
|
||||
struct zs_pool *pool;
|
||||
struct size_class *class;
|
||||
int class_idx;
|
||||
enum fullness_group fullness;
|
||||
struct zspage *zspage;
|
||||
struct page *dummy;
|
||||
void *s_addr, *d_addr, *addr;
|
||||
@ -1949,9 +1945,8 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
|
||||
|
||||
/* Concurrent compactor cannot migrate any subpage in zspage */
|
||||
migrate_write_lock(zspage);
|
||||
get_zspage_mapping(zspage, &class_idx, &fullness);
|
||||
pool = mapping->private_data;
|
||||
class = pool->size_class[class_idx];
|
||||
class = zspage_class(pool, zspage);
|
||||
offset = get_first_obj_offset(page);
|
||||
|
||||
spin_lock(&class->lock);
|
||||
@ -2049,8 +2044,6 @@ static void zs_page_putback(struct page *page)
|
||||
{
|
||||
struct zs_pool *pool;
|
||||
struct size_class *class;
|
||||
int class_idx;
|
||||
enum fullness_group fg;
|
||||
struct address_space *mapping;
|
||||
struct zspage *zspage;
|
||||
|
||||
@ -2058,10 +2051,9 @@ static void zs_page_putback(struct page *page)
|
||||
VM_BUG_ON_PAGE(!PageIsolated(page), page);
|
||||
|
||||
zspage = get_zspage(page);
|
||||
get_zspage_mapping(zspage, &class_idx, &fg);
|
||||
mapping = page_mapping(page);
|
||||
pool = mapping->private_data;
|
||||
class = pool->size_class[class_idx];
|
||||
class = zspage_class(pool, zspage);
|
||||
|
||||
spin_lock(&class->lock);
|
||||
dec_zspage_isolation(zspage);
|
||||
|
Loading…
Reference in New Issue
Block a user