forked from Minki/linux
mm/z3fold.c: improve compression by extending search
The current z3fold implementation only searches this CPU's page lists for a fitting page to put a new object into. This patch adds quick search for very well fitting pages (i. e. those having exactly the required number of free space) on other CPUs too, before allocating a new page for that object. Link: http://lkml.kernel.org/r/20190417103733.72ae81abe1552397c95a008e@gmail.com Signed-off-by: Vitaly Wool <vitaly.vul@sony.com> Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Cc: Dan Streetman <ddstreet@ieee.org> Cc: Krzysztof Kozlowski <k.kozlowski@samsung.com> Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sonymobile.com> Cc: Uladzislau Rezki <urezki@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9050cce104
commit
351618b203
36
mm/z3fold.c
36
mm/z3fold.c
@ -522,6 +522,42 @@ lookup:
|
||||
}
|
||||
put_cpu_ptr(pool->unbuddied);
|
||||
|
||||
if (!zhdr) {
|
||||
int cpu;
|
||||
|
||||
/* look for _exact_ match on other cpus' lists */
|
||||
for_each_online_cpu(cpu) {
|
||||
struct list_head *l;
|
||||
|
||||
unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
|
||||
spin_lock(&pool->lock);
|
||||
l = &unbuddied[chunks];
|
||||
|
||||
zhdr = list_first_entry_or_null(READ_ONCE(l),
|
||||
struct z3fold_header, buddy);
|
||||
|
||||
if (!zhdr || !z3fold_page_trylock(zhdr)) {
|
||||
spin_unlock(&pool->lock);
|
||||
zhdr = NULL;
|
||||
continue;
|
||||
}
|
||||
list_del_init(&zhdr->buddy);
|
||||
zhdr->cpu = -1;
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
page = virt_to_page(zhdr);
|
||||
if (test_bit(NEEDS_COMPACTING, &page->private)) {
|
||||
z3fold_page_unlock(zhdr);
|
||||
zhdr = NULL;
|
||||
if (can_sleep)
|
||||
cond_resched();
|
||||
continue;
|
||||
}
|
||||
kref_get(&zhdr->refcount);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return zhdr;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user