mm/page_alloc.c: use list_{first,last}_entry instead of list_entry
To make the intention clearer, use list_{first,last}_entry instead of list_entry. Signed-off-by: Geliang Tang <geliangtang@163.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
6ac0206bc0
commit
a16601c545
@ -805,7 +805,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
|||||||
do {
|
do {
|
||||||
int mt; /* migratetype of the to-be-freed page */
|
int mt; /* migratetype of the to-be-freed page */
|
||||||
|
|
||||||
page = list_entry(list->prev, struct page, lru);
|
page = list_last_entry(list, struct page, lru);
|
||||||
/* must delete as __free_one_page list manipulates */
|
/* must delete as __free_one_page list manipulates */
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
|
|
||||||
@ -1410,11 +1410,10 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
|
|||||||
/* Find a page of the appropriate size in the preferred list */
|
/* Find a page of the appropriate size in the preferred list */
|
||||||
for (current_order = order; current_order < MAX_ORDER; ++current_order) {
|
for (current_order = order; current_order < MAX_ORDER; ++current_order) {
|
||||||
area = &(zone->free_area[current_order]);
|
area = &(zone->free_area[current_order]);
|
||||||
if (list_empty(&area->free_list[migratetype]))
|
page = list_first_entry_or_null(&area->free_list[migratetype],
|
||||||
continue;
|
|
||||||
|
|
||||||
page = list_entry(area->free_list[migratetype].next,
|
|
||||||
struct page, lru);
|
struct page, lru);
|
||||||
|
if (!page)
|
||||||
|
continue;
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
rmv_page_order(page);
|
rmv_page_order(page);
|
||||||
area->nr_free--;
|
area->nr_free--;
|
||||||
@ -1693,12 +1692,12 @@ static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
|
|||||||
for (order = 0; order < MAX_ORDER; order++) {
|
for (order = 0; order < MAX_ORDER; order++) {
|
||||||
struct free_area *area = &(zone->free_area[order]);
|
struct free_area *area = &(zone->free_area[order]);
|
||||||
|
|
||||||
if (list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
|
page = list_first_entry_or_null(
|
||||||
|
&area->free_list[MIGRATE_HIGHATOMIC],
|
||||||
|
struct page, lru);
|
||||||
|
if (!page)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
page = list_entry(area->free_list[MIGRATE_HIGHATOMIC].next,
|
|
||||||
struct page, lru);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It should never happen but changes to locking could
|
* It should never happen but changes to locking could
|
||||||
* inadvertently allow a per-cpu drain to add pages
|
* inadvertently allow a per-cpu drain to add pages
|
||||||
@ -1746,7 +1745,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
|
|||||||
if (fallback_mt == -1)
|
if (fallback_mt == -1)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
page = list_entry(area->free_list[fallback_mt].next,
|
page = list_first_entry(&area->free_list[fallback_mt],
|
||||||
struct page, lru);
|
struct page, lru);
|
||||||
if (can_steal)
|
if (can_steal)
|
||||||
steal_suitable_fallback(zone, page, start_migratetype);
|
steal_suitable_fallback(zone, page, start_migratetype);
|
||||||
@ -2205,9 +2204,9 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (cold)
|
if (cold)
|
||||||
page = list_entry(list->prev, struct page, lru);
|
page = list_last_entry(list, struct page, lru);
|
||||||
else
|
else
|
||||||
page = list_entry(list->next, struct page, lru);
|
page = list_first_entry(list, struct page, lru);
|
||||||
|
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
pcp->count--;
|
pcp->count--;
|
||||||
|
Loading…
Reference in New Issue
Block a user