mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
Merge branch 'slab/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'slab/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: Update Pekka's email address in MAINTAINERS mm/slab.c: make local symbols static slub: Avoid use of slub_lock in show_slab_objects() memory hotplug: one more lock on memory hotplug
This commit is contained in:
commit
1b59be2a6c
@ -3684,7 +3684,7 @@ F: kernel/debug/
|
||||
|
||||
KMEMCHECK
|
||||
M: Vegard Nossum <vegardno@ifi.uio.no>
|
||||
M: Pekka Enberg <penberg@cs.helsinki.fi>
|
||||
M: Pekka Enberg <penberg@kernel.org>
|
||||
S: Maintained
|
||||
F: Documentation/kmemcheck.txt
|
||||
F: arch/x86/include/asm/kmemcheck.h
|
||||
@ -5646,7 +5646,7 @@ F: drivers/net/sky2.*
|
||||
|
||||
SLAB ALLOCATOR
|
||||
M: Christoph Lameter <cl@linux-foundation.org>
|
||||
M: Pekka Enberg <penberg@cs.helsinki.fi>
|
||||
M: Pekka Enberg <penberg@kernel.org>
|
||||
M: Matt Mackall <mpm@selenic.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
|
@ -165,6 +165,12 @@ extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
|
||||
extern void put_page_bootmem(struct page *page);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Lock for memory hotplug guarantees 1) all callbacks for memory hotplug
|
||||
* notifier will be called under this. 2) offline/online/add/remove memory
|
||||
* will not run simultaneously.
|
||||
*/
|
||||
|
||||
void lock_memory_hotplug(void);
|
||||
void unlock_memory_hotplug(void);
|
||||
|
||||
|
@ -409,6 +409,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
|
||||
int ret;
|
||||
struct memory_notify arg;
|
||||
|
||||
lock_memory_hotplug();
|
||||
arg.start_pfn = pfn;
|
||||
arg.nr_pages = nr_pages;
|
||||
arg.status_change_nid = -1;
|
||||
@ -421,6 +422,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
|
||||
ret = notifier_to_errno(ret);
|
||||
if (ret) {
|
||||
memory_notify(MEM_CANCEL_ONLINE, &arg);
|
||||
unlock_memory_hotplug();
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
@ -445,6 +447,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
|
||||
printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
|
||||
nr_pages, pfn);
|
||||
memory_notify(MEM_CANCEL_ONLINE, &arg);
|
||||
unlock_memory_hotplug();
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -469,6 +472,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
|
||||
|
||||
if (onlined_pages)
|
||||
memory_notify(MEM_ONLINE, &arg);
|
||||
unlock_memory_hotplug();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -284,7 +284,7 @@ struct kmem_list3 {
|
||||
* Need this for bootstrapping a per node allocator.
|
||||
*/
|
||||
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
|
||||
struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
|
||||
static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
|
||||
#define CACHE_CACHE 0
|
||||
#define SIZE_AC MAX_NUMNODES
|
||||
#define SIZE_L3 (2 * MAX_NUMNODES)
|
||||
@ -4053,7 +4053,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
|
||||
* necessary. Note that the l3 listlock also protects the array_cache
|
||||
* if drain_array() is used on the shared array.
|
||||
*/
|
||||
void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
|
||||
static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
|
||||
struct array_cache *ac, int force, int node)
|
||||
{
|
||||
int tofree;
|
||||
@ -4317,7 +4317,7 @@ static const struct seq_operations slabinfo_op = {
|
||||
* @count: data length
|
||||
* @ppos: unused
|
||||
*/
|
||||
ssize_t slabinfo_write(struct file *file, const char __user * buffer,
|
||||
static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
|
||||
|
@ -3797,7 +3797,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
|
||||
}
|
||||
}
|
||||
|
||||
down_read(&slub_lock);
|
||||
lock_memory_hotplug();
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
if (flags & SO_ALL) {
|
||||
for_each_node_state(node, N_NORMAL_MEMORY) {
|
||||
@ -3838,7 +3838,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
|
||||
x += sprintf(buf + x, " N%d=%lu",
|
||||
node, nodes[node]);
|
||||
#endif
|
||||
up_read(&slub_lock);
|
||||
unlock_memory_hotplug();
|
||||
kfree(nodes);
|
||||
return x + sprintf(buf + x, "\n");
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user