modules: remove modlist_lock

Now we always use stop_machine for module insertion or deletion, we no
longer need the modlist_lock: merely disabling preemption is sufficient to
block against list manipulation.  This avoids deadlock on OOPSen where we
can potentially grab the lock twice.

Bug: 8695
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Tobias Oed <tobiasoed@hotmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Rusty Russell 2007-07-15 23:41:46 -07:00 committed by Linus Torvalds
parent 6c675bd43c
commit 24da1cbff9

View File

@ -61,10 +61,8 @@ extern int module_sysfs_initialized;
/* If this is set, the section belongs in the init part of the module */ /* If this is set, the section belongs in the init part of the module */
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
/* Protects module list */ /* List of modules, protected by module_mutex or preempt_disable
static DEFINE_SPINLOCK(modlist_lock); * (add/delete uses stop_machine). */
/* List of modules, protected by module_mutex AND modlist_lock */
static DEFINE_MUTEX(module_mutex); static DEFINE_MUTEX(module_mutex);
static LIST_HEAD(modules); static LIST_HEAD(modules);
@ -760,14 +758,13 @@ static void print_unload_info(struct seq_file *m, struct module *mod)
void __symbol_put(const char *symbol) void __symbol_put(const char *symbol)
{ {
struct module *owner; struct module *owner;
unsigned long flags;
const unsigned long *crc; const unsigned long *crc;
spin_lock_irqsave(&modlist_lock, flags); preempt_disable();
if (!__find_symbol(symbol, &owner, &crc, 1)) if (!__find_symbol(symbol, &owner, &crc, 1))
BUG(); BUG();
module_put(owner); module_put(owner);
spin_unlock_irqrestore(&modlist_lock, flags); preempt_enable();
} }
EXPORT_SYMBOL(__symbol_put); EXPORT_SYMBOL(__symbol_put);
@ -1228,14 +1225,14 @@ static void free_module(struct module *mod)
void *__symbol_get(const char *symbol) void *__symbol_get(const char *symbol)
{ {
struct module *owner; struct module *owner;
unsigned long value, flags; unsigned long value;
const unsigned long *crc; const unsigned long *crc;
spin_lock_irqsave(&modlist_lock, flags); preempt_disable();
value = __find_symbol(symbol, &owner, &crc, 1); value = __find_symbol(symbol, &owner, &crc, 1);
if (value && !strong_try_module_get(owner)) if (value && !strong_try_module_get(owner))
value = 0; value = 0;
spin_unlock_irqrestore(&modlist_lock, flags); preempt_enable();
return (void *)value; return (void *)value;
} }
@ -2308,11 +2305,10 @@ const struct seq_operations modules_op = {
/* Given an address, look for it in the module exception tables. */ /* Given an address, look for it in the module exception tables. */
const struct exception_table_entry *search_module_extables(unsigned long addr) const struct exception_table_entry *search_module_extables(unsigned long addr)
{ {
unsigned long flags;
const struct exception_table_entry *e = NULL; const struct exception_table_entry *e = NULL;
struct module *mod; struct module *mod;
spin_lock_irqsave(&modlist_lock, flags); preempt_disable();
list_for_each_entry(mod, &modules, list) { list_for_each_entry(mod, &modules, list) {
if (mod->num_exentries == 0) if (mod->num_exentries == 0)
continue; continue;
@ -2323,7 +2319,7 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
if (e) if (e)
break; break;
} }
spin_unlock_irqrestore(&modlist_lock, flags); preempt_enable();
/* Now, if we found one, we are running inside it now, hence /* Now, if we found one, we are running inside it now, hence
we cannot unload the module, hence no refcnt needed. */ we cannot unload the module, hence no refcnt needed. */
@ -2335,25 +2331,24 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
*/ */
int is_module_address(unsigned long addr) int is_module_address(unsigned long addr)
{ {
unsigned long flags;
struct module *mod; struct module *mod;
spin_lock_irqsave(&modlist_lock, flags); preempt_disable();
list_for_each_entry(mod, &modules, list) { list_for_each_entry(mod, &modules, list) {
if (within(addr, mod->module_core, mod->core_size)) { if (within(addr, mod->module_core, mod->core_size)) {
spin_unlock_irqrestore(&modlist_lock, flags); preempt_enable();
return 1; return 1;
} }
} }
spin_unlock_irqrestore(&modlist_lock, flags); preempt_enable();
return 0; return 0;
} }
/* Is this a valid kernel address? We don't grab the lock: we are oopsing. */ /* Is this a valid kernel address? */
struct module *__module_text_address(unsigned long addr) struct module *__module_text_address(unsigned long addr)
{ {
struct module *mod; struct module *mod;
@ -2368,11 +2363,10 @@ struct module *__module_text_address(unsigned long addr)
struct module *module_text_address(unsigned long addr) struct module *module_text_address(unsigned long addr)
{ {
struct module *mod; struct module *mod;
unsigned long flags;
spin_lock_irqsave(&modlist_lock, flags); preempt_disable();
mod = __module_text_address(addr); mod = __module_text_address(addr);
spin_unlock_irqrestore(&modlist_lock, flags); preempt_enable();
return mod; return mod;
} }