mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 05:11:48 +00:00
mm/vmstat: fix overflow in mod_zone_page_state()
mod_zone_page_state() takes a "delta" integer argument. delta contains the number of pages that should be added or subtracted from a struct zone's vm_stat field. If a zone is larger than 8TB this will cause overflows. E.g. for a zone with a size slightly larger than 8TB the line mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages); in mm/page_alloc.c:free_area_init_core() will result in a negative result for the NR_ALLOC_BATCH entry within the zone's vm_stat, since 8TB contain 0x8xxxxxxx pages which will be sign extended to a negative value. Fix this by changing the delta argument to long type. This could fix an early boot problem seen on s390, where we have a 9TB system with only one node. ZONE_DMA contains 2GB and ZONE_NORMAL the rest. The system is trying to allocate a GFP_DMA page but ZONE_DMA is completely empty, so it tries to reclaim pages in an endless loop. This was seen on a heavily patched 3.10 kernel. One possible explaination seem to be the overflows caused by mod_zone_page_state(). Unfortunately I did not have the chance to verify that this patch actually fixes the problem, since I don't have access to the system right now. However the overflow problem does exist anyway. Given the description that a system with slightly less than 8TB does work, this seems to be a candidate for the observed problem. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
cc28d6d80f
commit
6cdb18ad98
@ -176,11 +176,11 @@ extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
|
|||||||
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
|
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
|
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
|
||||||
void __inc_zone_page_state(struct page *, enum zone_stat_item);
|
void __inc_zone_page_state(struct page *, enum zone_stat_item);
|
||||||
void __dec_zone_page_state(struct page *, enum zone_stat_item);
|
void __dec_zone_page_state(struct page *, enum zone_stat_item);
|
||||||
|
|
||||||
void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
|
void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
|
||||||
void inc_zone_page_state(struct page *, enum zone_stat_item);
|
void inc_zone_page_state(struct page *, enum zone_stat_item);
|
||||||
void dec_zone_page_state(struct page *, enum zone_stat_item);
|
void dec_zone_page_state(struct page *, enum zone_stat_item);
|
||||||
|
|
||||||
@ -205,7 +205,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat,
|
|||||||
* The functions directly modify the zone and global counters.
|
* The functions directly modify the zone and global counters.
|
||||||
*/
|
*/
|
||||||
static inline void __mod_zone_page_state(struct zone *zone,
|
static inline void __mod_zone_page_state(struct zone *zone,
|
||||||
enum zone_stat_item item, int delta)
|
enum zone_stat_item item, long delta)
|
||||||
{
|
{
|
||||||
zone_page_state_add(delta, zone, item);
|
zone_page_state_add(delta, zone, item);
|
||||||
}
|
}
|
||||||
|
10
mm/vmstat.c
10
mm/vmstat.c
@ -219,7 +219,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat,
|
|||||||
* particular counter cannot be updated from interrupt context.
|
* particular counter cannot be updated from interrupt context.
|
||||||
*/
|
*/
|
||||||
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
||||||
int delta)
|
long delta)
|
||||||
{
|
{
|
||||||
struct per_cpu_pageset __percpu *pcp = zone->pageset;
|
struct per_cpu_pageset __percpu *pcp = zone->pageset;
|
||||||
s8 __percpu *p = pcp->vm_stat_diff + item;
|
s8 __percpu *p = pcp->vm_stat_diff + item;
|
||||||
@ -318,8 +318,8 @@ EXPORT_SYMBOL(__dec_zone_page_state);
|
|||||||
* 1 Overstepping half of threshold
|
* 1 Overstepping half of threshold
|
||||||
* -1 Overstepping minus half of threshold
|
* -1 Overstepping minus half of threshold
|
||||||
*/
|
*/
|
||||||
static inline void mod_state(struct zone *zone,
|
static inline void mod_state(struct zone *zone, enum zone_stat_item item,
|
||||||
enum zone_stat_item item, int delta, int overstep_mode)
|
long delta, int overstep_mode)
|
||||||
{
|
{
|
||||||
struct per_cpu_pageset __percpu *pcp = zone->pageset;
|
struct per_cpu_pageset __percpu *pcp = zone->pageset;
|
||||||
s8 __percpu *p = pcp->vm_stat_diff + item;
|
s8 __percpu *p = pcp->vm_stat_diff + item;
|
||||||
@ -357,7 +357,7 @@ static inline void mod_state(struct zone *zone,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
||||||
int delta)
|
long delta)
|
||||||
{
|
{
|
||||||
mod_state(zone, item, delta, 0);
|
mod_state(zone, item, delta, 0);
|
||||||
}
|
}
|
||||||
@ -384,7 +384,7 @@ EXPORT_SYMBOL(dec_zone_page_state);
|
|||||||
* Use interrupt disable to serialize counter updates
|
* Use interrupt disable to serialize counter updates
|
||||||
*/
|
*/
|
||||||
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
||||||
int delta)
|
long delta)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user