summaryrefslogtreecommitdiff
path: root/kernel/module.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2010-05-19 17:33:39 -0600
committerRusty Russell <rusty@rustcorp.com.au>2010-05-19 17:33:39 +0930
commit480b02df3aa9f07d1c7df0cd8be7a5ca73893455 (patch)
tree27b51ecc15bb30b0d79bb0c702a0124c8b4848fc /kernel/module.c
parentfedb3d27d9e8606b3867b5ae49d6258458a07a72 (diff)
module: drop the lock while waiting for module to complete initialization.
This fixes "gave up waiting for init of module libcrc32c." which happened at boot time due to multiple parallel module loads. The problem was a deadlock: we wait for a module to finish initializing, but we keep the module_lock mutex so it can't complete. In particular, this could reasonably happen if a module does a request_module() in its initialization routine. So we change use_module() to return an errno rather than a bool, and if it's -EBUSY we drop the lock and wait in the caller, then reaquire the lock. Reported-by: Brandon Philips <brandon@ifup.org> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Tested-by: Brandon Philips <brandon@ifup.org>
Diffstat (limited to 'kernel/module.c')
-rw-r--r--kernel/module.c59
1 files changed, 37 insertions, 22 deletions
diff --git a/kernel/module.c b/kernel/module.c
index e2564580f3f1..970d773aec62 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -561,33 +561,26 @@ int use_module(struct module *a, struct module *b)
struct module_use *use;
int no_warn, err;
- if (b == NULL || already_uses(a, b)) return 1;
-
- /* If we're interrupted or time out, we fail. */
- if (wait_event_interruptible_timeout(
- module_wq, (err = strong_try_module_get(b)) != -EBUSY,
- 30 * HZ) <= 0) {
- printk("%s: gave up waiting for init of module %s.\n",
- a->name, b->name);
+ if (b == NULL || already_uses(a, b))
return 0;
- }
- /* If strong_try_module_get() returned a different error, we fail. */
+ /* If we're interrupted or time out, we fail. */
+ err = strong_try_module_get(b);
if (err)
- return 0;
+ return err;
DEBUGP("Allocating new usage for %s.\n", a->name);
use = kmalloc(sizeof(*use), GFP_ATOMIC);
if (!use) {
printk("%s: out of memory loading\n", a->name);
module_put(b);
- return 0;
+ return -ENOMEM;
}
use->module_which_uses = a;
list_add(&use->list, &b->modules_which_use_me);
no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
- return 1;
+ return 0;
}
EXPORT_SYMBOL_GPL(use_module);
@@ -880,7 +873,7 @@ static inline void module_unload_free(struct module *mod)
int use_module(struct module *a, struct module *b)
{
- return strong_try_module_get(b) == 0;
+ return strong_try_module_get(b);
}
EXPORT_SYMBOL_GPL(use_module);
@@ -1051,17 +1044,39 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
struct module *owner;
const struct kernel_symbol *sym;
const unsigned long *crc;
+ DEFINE_WAIT(wait);
+ int err;
+ long timeleft = 30 * HZ;
+again:
sym = find_symbol(name, &owner, &crc,
!(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
- /* use_module can fail due to OOM,
- or module initialization or unloading */
- if (sym) {
- if (!check_version(sechdrs, versindex, name, mod, crc, owner)
- || !use_module(mod, owner))
- sym = NULL;
- }
- return sym;
+ if (!sym)
+ return NULL;
+
+ if (!check_version(sechdrs, versindex, name, mod, crc, owner))
+ return NULL;
+
+ prepare_to_wait(&module_wq, &wait, TASK_INTERRUPTIBLE);
+ err = use_module(mod, owner);
+ if (likely(!err) || err != -EBUSY || signal_pending(current)) {
+ finish_wait(&module_wq, &wait);
+ return err ? NULL : sym;
+ }
+
+ /* Module is still loading. Drop lock and wait. */
+ mutex_unlock(&module_mutex);
+ timeleft = schedule_timeout(timeleft);
+ mutex_lock(&module_mutex);
+ finish_wait(&module_wq, &wait);
+
+ /* Module might be gone entirely, or replaced. Re-lookup. */
+ if (timeleft)
+ goto again;
+
+ printk(KERN_WARNING "%s: gave up waiting for init of module %s.\n",
+ mod->name, owner->name);
+ return NULL;
}
/*