slob: respect list_head abstraction layer

Currently we reach inside the list_head.  This is a violation of the layer
of abstraction provided by the list_head.  It makes the code fragile.
More importantly it makes the code wicked hard to understand.

The code reaches into the list_head structure to counteract the fact that
the list _may_ have been changed during slob_page_alloc().  Instead of
this we can add a return parameter to slob_page_alloc() to signal that the
list was modified (list_del() called with page->lru to remove page from
the freelist).

This code is concerned with an optimisation that counters the tendency for
first fit allocation algorithm to fragment memory into many small chunks
at the front of the memory pool.  Since the page is only removed from the
list when an allocation uses _all_ the remaining memory in the page then
in this special case fragmentation does not occur and we therefore do not
need the optimisation.

Add a return parameter to slob_page_alloc() to signal that the allocation
used up the whole page and that the page was removed from the free list.
After calling slob_page_alloc() check the return value just added and only
attempt optimisation if the page is still on the list.

Use list_head API instead of reaching into the list_head structure to
check if sp is at the front of the list.

Link: http://lkml.kernel.org/r/20190402230545.2929-3-tobin@kernel.org
Signed-off-by: Tobin C. Harding <tobin@kernel.org>
Acked-by: Christoph Lameter <cl@linux.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Roman Gushchin <guro@fb.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Tobin C. Harding 2019-05-13 17:16:03 -07:00 committed by Linus Torvalds
parent a16b538499
commit 130e8e09e2

View File

@ -213,13 +213,26 @@ static void slob_free_pages(void *b, int order)
} }
/* /*
* Allocate a slob block within a given slob_page sp. * slob_page_alloc() - Allocate a slob block within a given slob_page sp.
* @sp: Page to look in.
* @size: Size of the allocation.
* @align: Allocation alignment.
* @page_removed_from_list: Return parameter.
*
* Tries to find a chunk of memory at least @size bytes big within @page.
*
* Return: Pointer to memory if allocated, %NULL otherwise. If the
* allocation fills up @page then the page is removed from the
* freelist, in this case @page_removed_from_list will be set to
* true (set to false otherwise).
*/ */
static void *slob_page_alloc(struct page *sp, size_t size, int align) static void *slob_page_alloc(struct page *sp, size_t size, int align,
bool *page_removed_from_list)
{ {
slob_t *prev, *cur, *aligned = NULL; slob_t *prev, *cur, *aligned = NULL;
int delta = 0, units = SLOB_UNITS(size); int delta = 0, units = SLOB_UNITS(size);
*page_removed_from_list = false;
for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) { for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
slobidx_t avail = slob_units(cur); slobidx_t avail = slob_units(cur);
@ -254,8 +267,10 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align)
} }
sp->units -= units; sp->units -= units;
if (!sp->units) if (!sp->units) {
clear_slob_page_free(sp); clear_slob_page_free(sp);
*page_removed_from_list = true;
}
return cur; return cur;
} }
if (slob_last(cur)) if (slob_last(cur))
@ -269,10 +284,10 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align)
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{ {
struct page *sp; struct page *sp;
struct list_head *prev;
struct list_head *slob_list; struct list_head *slob_list;
slob_t *b = NULL; slob_t *b = NULL;
unsigned long flags; unsigned long flags;
bool _unused;
if (size < SLOB_BREAK1) if (size < SLOB_BREAK1)
slob_list = &free_slob_small; slob_list = &free_slob_small;
@ -284,6 +299,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
spin_lock_irqsave(&slob_lock, flags); spin_lock_irqsave(&slob_lock, flags);
/* Iterate through each partially free page, try to find room */ /* Iterate through each partially free page, try to find room */
list_for_each_entry(sp, slob_list, lru) { list_for_each_entry(sp, slob_list, lru) {
bool page_removed_from_list = false;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* /*
* If there's a node specification, search for a partial * If there's a node specification, search for a partial
@ -296,18 +312,25 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
if (sp->units < SLOB_UNITS(size)) if (sp->units < SLOB_UNITS(size))
continue; continue;
/* Attempt to alloc */ b = slob_page_alloc(sp, size, align, &page_removed_from_list);
prev = sp->lru.prev;
b = slob_page_alloc(sp, size, align);
if (!b) if (!b)
continue; continue;
/* Improve fragment distribution and reduce our average /*
* search time by starting our next search here. (see * If slob_page_alloc() removed sp from the list then we
* Knuth vol 1, sec 2.5, pg 449) */ * cannot call list functions on sp. If so allocation
if (prev != slob_list->prev && * did not fragment the page anyway so optimisation is
slob_list->next != prev->next) * unnecessary.
list_move_tail(slob_list, prev->next); */
if (!page_removed_from_list) {
/*
* Improve fragment distribution and reduce our average
* search time by starting our next search here. (see
* Knuth vol 1, sec 2.5, pg 449)
*/
if (!list_is_first(&sp->lru, slob_list))
list_rotate_to_front(&sp->lru, slob_list);
}
break; break;
} }
spin_unlock_irqrestore(&slob_lock, flags); spin_unlock_irqrestore(&slob_lock, flags);
@ -326,7 +349,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
INIT_LIST_HEAD(&sp->lru); INIT_LIST_HEAD(&sp->lru);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list); set_slob_page_free(sp, slob_list);
b = slob_page_alloc(sp, size, align); b = slob_page_alloc(sp, size, align, &_unused);
BUG_ON(!b); BUG_ON(!b);
spin_unlock_irqrestore(&slob_lock, flags); spin_unlock_irqrestore(&slob_lock, flags);
} }