Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "15 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  lib/stackdepot: export save/fetch stack for drivers
  mm: kmemleak: scan .data.ro_after_init
  memcg: prevent memcg caches to be both OFF_SLAB & OBJFREELIST_SLAB
  coredump: fix unfreezable coredumping task
  mm/filemap: don't allow partially uptodate page for pipes
  mm/hugetlb: fix huge page reservation leak in private mapping error paths
  ocfs2: fix not enough credit panic
  Revert "console: don't prefer first registered if DT specifies stdout-path"
  mm: hwpoison: fix thp split handling in memory_failure()
  swapfile: fix memory corruption via malformed swapfile
  mm/cma.c: check the max limit for cma allocation
  scripts/bloat-o-meter: fix SIGPIPE
  shmem: fix pageflags after swapping DMA32 object
  mm, frontswap: make sure allocated frontswap map is assigned
  mm: remove extra newline from allocation stall warning
This commit is contained in:
Linus Torvalds 2016-11-11 09:44:23 -08:00
commit 968ef8de55
19 changed files with 104 additions and 34 deletions

View File

@ -62,9 +62,11 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__start_ro_after_init = .;
__start_data_ro_after_init = .;
.data..ro_after_init : {
*(.data..ro_after_init)
}
__end_data_ro_after_init = .;
EXCEPTION_TABLE(16)
. = ALIGN(PAGE_SIZE);
__end_ro_after_init = .;

View File

@ -2077,8 +2077,6 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
name = of_get_property(of_aliases, "stdout", NULL);
if (name)
of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
if (of_stdout)
console_set_by_of();
}
if (!of_aliases)

View File

@ -1,6 +1,7 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/freezer.h>
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
@ -423,7 +424,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
if (core_waiters > 0) {
struct core_thread *ptr;
freezer_do_not_count();
wait_for_completion(&core_state->startup);
freezer_count();
/*
* Wait for all the threads to become inactive, so that
* all the thread context (extended register state, like

View File

@ -3699,7 +3699,7 @@ static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb,
struct ocfs2_dx_root_block *dx_root)
{
int credits = ocfs2_clusters_to_blocks(osb->sb, 2);
int credits = ocfs2_clusters_to_blocks(osb->sb, 3);
credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list);
credits += ocfs2_quota_trans_credits(osb->sb);

View File

@ -14,6 +14,8 @@
* [_sdata, _edata]: contains .data.* sections, may also contain .rodata.*
* and/or .init.* sections.
* [__start_rodata, __end_rodata]: contains .rodata.* sections
* [__start_data_ro_after_init, __end_data_ro_after_init]:
* contains data.ro_after_init section
* [__init_begin, __init_end]: contains .init.* sections, but .init.text.*
* may be out of this range on some architectures.
* [_sinittext, _einittext]: contains .init.text.* sections
@ -31,6 +33,7 @@ extern char _data[], _sdata[], _edata[];
extern char __bss_start[], __bss_stop[];
extern char __init_begin[], __init_end[];
extern char _sinittext[], _einittext[];
extern char __start_data_ro_after_init[], __end_data_ro_after_init[];
extern char _end[];
extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
extern char __kprobes_text_start[], __kprobes_text_end[];

View File

@ -259,7 +259,10 @@
* own by defining an empty RO_AFTER_INIT_DATA.
*/
#ifndef RO_AFTER_INIT_DATA
#define RO_AFTER_INIT_DATA *(.data..ro_after_init)
#define RO_AFTER_INIT_DATA \
__start_data_ro_after_init = .; \
*(.data..ro_after_init) \
__end_data_ro_after_init = .;
#endif
/*

View File

@ -173,12 +173,6 @@ static inline void console_sysfs_notify(void)
#endif
extern bool console_suspend_enabled;
#ifdef CONFIG_OF
extern void console_set_by_of(void);
#else
static inline void console_set_by_of(void) {}
#endif
/* Suspend and resume console messages over PM events */
extern void suspend_console(void);
extern void resume_console(void);

View File

@ -106,8 +106,9 @@ static inline void frontswap_invalidate_area(unsigned type)
static inline void frontswap_init(unsigned type, unsigned long *map)
{
if (frontswap_enabled())
__frontswap_init(type, map);
#ifdef CONFIG_FRONTSWAP
__frontswap_init(type, map);
#endif
}
#endif /* _LINUX_FRONTSWAP_H */

View File

@ -253,17 +253,6 @@ static int preferred_console = -1;
int console_set_on_cmdline;
EXPORT_SYMBOL(console_set_on_cmdline);
#ifdef CONFIG_OF
static bool of_specified_console;
void console_set_by_of(void)
{
of_specified_console = true;
}
#else
# define of_specified_console false
#endif
/* Flag: console code may call schedule() */
static int console_may_schedule;
@ -2657,7 +2646,7 @@ void register_console(struct console *newcon)
* didn't select a console we take the first one
* that registers here.
*/
if (preferred_console < 0 && !of_specified_console) {
if (preferred_console < 0) {
if (newcon->index < 0)
newcon->index = 0;
if (newcon->setup == NULL ||

View File

@ -192,6 +192,7 @@ void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
trace->entries = stack->entries;
trace->skip = 0;
}
EXPORT_SYMBOL_GPL(depot_fetch_stack);
/**
* depot_save_stack - save stack in a stack depot.
@ -283,3 +284,4 @@ exit:
fast_exit:
return retval;
}
EXPORT_SYMBOL_GPL(depot_save_stack);

View File

@ -385,6 +385,9 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
bitmap_maxno = cma_bitmap_maxno(cma);
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
if (bitmap_count > bitmap_maxno)
return NULL;
for (;;) {
mutex_lock(&cma->lock);
bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,

View File

@ -1826,11 +1826,17 @@ static void return_unused_surplus_pages(struct hstate *h,
* is not the case is if a reserve map was changed between calls. It
* is the responsibility of the caller to notice the difference and
* take appropriate action.
*
* vma_add_reservation is used in error paths where a reservation must
* be restored when a newly allocated huge page must be freed. It is
* to be called after calling vma_needs_reservation to determine if a
* reservation exists.
*/
enum vma_resv_mode {
VMA_NEEDS_RESV,
VMA_COMMIT_RESV,
VMA_END_RESV,
VMA_ADD_RESV,
};
static long __vma_reservation_common(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr,
@ -1856,6 +1862,14 @@ static long __vma_reservation_common(struct hstate *h,
region_abort(resv, idx, idx + 1);
ret = 0;
break;
case VMA_ADD_RESV:
if (vma->vm_flags & VM_MAYSHARE)
ret = region_add(resv, idx, idx + 1);
else {
region_abort(resv, idx, idx + 1);
ret = region_del(resv, idx, idx + 1);
}
break;
default:
BUG();
}
@ -1903,6 +1917,56 @@ static void vma_end_reservation(struct hstate *h,
(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
}
static long vma_add_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
}
/*
* This routine is called to restore a reservation on error paths. In the
* specific error paths, a huge page was allocated (via alloc_huge_page)
* and is about to be freed. If a reservation for the page existed,
* alloc_huge_page would have consumed the reservation and set PagePrivate
* in the newly allocated page. When the page is freed via free_huge_page,
* the global reservation count will be incremented if PagePrivate is set.
* However, free_huge_page can not adjust the reserve map. Adjust the
* reserve map here to be consistent with global reserve count adjustments
* to be made by free_huge_page.
*/
static void restore_reserve_on_error(struct hstate *h,
struct vm_area_struct *vma, unsigned long address,
struct page *page)
{
if (unlikely(PagePrivate(page))) {
long rc = vma_needs_reservation(h, vma, address);
if (unlikely(rc < 0)) {
/*
* Rare out of memory condition in reserve map
* manipulation. Clear PagePrivate so that
* global reserve count will not be incremented
* by free_huge_page. This will make it appear
* as though the reservation for this page was
* consumed. This may prevent the task from
* faulting in the page at a later time. This
* is better than inconsistent global huge page
* accounting of reserve counts.
*/
ClearPagePrivate(page);
} else if (rc) {
rc = vma_add_reservation(h, vma, address);
if (unlikely(rc < 0))
/*
* See above comment about rare out of
* memory condition.
*/
ClearPagePrivate(page);
} else
vma_end_reservation(h, vma, address);
}
}
struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve)
{
@ -3498,6 +3562,7 @@ retry_avoidcopy:
spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
out_release_all:
restore_reserve_on_error(h, vma, address, new_page);
put_page(new_page);
out_release_old:
put_page(old_page);
@ -3680,6 +3745,7 @@ backout:
spin_unlock(ptl);
backout_unlocked:
unlock_page(page);
restore_reserve_on_error(h, vma, address, page);
put_page(page);
goto out;
}

View File

@ -1414,6 +1414,7 @@ static void kmemleak_scan(void)
/* data/bss scanning */
scan_large_block(_sdata, _edata);
scan_large_block(__bss_start, __bss_stop);
scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init);
#ifdef CONFIG_SMP
/* per-cpu sections scanning */

View File

@ -1112,10 +1112,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
}
if (!PageHuge(p) && PageTransHuge(hpage)) {
lock_page(hpage);
if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
unlock_page(hpage);
if (!PageAnon(hpage))
lock_page(p);
if (!PageAnon(p) || unlikely(split_huge_page(p))) {
unlock_page(p);
if (!PageAnon(p))
pr_err("Memory failure: %#lx: non anonymous thp\n",
pfn);
else
@ -1126,9 +1126,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
put_hwpoison_page(p);
return -EBUSY;
}
unlock_page(hpage);
get_hwpoison_page(p);
put_hwpoison_page(hpage);
unlock_page(p);
VM_BUG_ON_PAGE(!page_count(p), p);
hpage = compound_head(p);
}

View File

@ -3658,7 +3658,7 @@ retry:
/* Make sure we know about allocations which stall for too long */
if (time_after(jiffies, alloc_start + stall_timeout)) {
warn_alloc(gfp_mask,
"page alloction stalls for %ums, order:%u\n",
"page allocation stalls for %ums, order:%u",
jiffies_to_msecs(jiffies-alloc_start), order);
stall_timeout += 10 * HZ;
}

View File

@ -1483,6 +1483,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
copy_highpage(newpage, oldpage);
flush_dcache_page(newpage);
__SetPageLocked(newpage);
__SetPageSwapBacked(newpage);
SetPageUptodate(newpage);
set_page_private(newpage, swap_index);
SetPageSwapCache(newpage);

View File

@ -533,8 +533,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
s = create_cache(cache_name, root_cache->object_size,
root_cache->size, root_cache->align,
root_cache->flags, root_cache->ctor,
memcg, root_cache);
root_cache->flags & CACHE_CREATE_MASK,
root_cache->ctor, memcg, root_cache);
/*
* If we could not create a memcg cache, do not complain, because
* that's not critical at all as we can always proceed with the root

View File

@ -2224,6 +2224,8 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
swab32s(&swap_header->info.version);
swab32s(&swap_header->info.last_page);
swab32s(&swap_header->info.nr_badpages);
if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
return 0;
for (i = 0; i < swap_header->info.nr_badpages; i++)
swab32s(&swap_header->info.badpages[i]);
}

View File

@ -8,6 +8,9 @@
# of the GNU General Public License, incorporated herein by reference.
import sys, os, re
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
if len(sys.argv) != 3:
sys.stderr.write("usage: %s file1 file2\n" % sys.argv[0])