summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Vagin <avagin@openvz.org>2012-03-07 14:49:56 +0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-04-02 09:52:35 -0700
commit1a24aa6ec745d87b9dcac87ebb45959ae19117dc (patch)
tree7d236b2d6d00bbffeb3c3f0526d7092a3b842e26
parent1f2c44df43fef14718d2713f89b1ce1fada45577 (diff)
uevent: send events in correct order according to seqnum (v3)
commit 7b60a18da393ed70db043a777fd9e6d5363077c4 upstream. The queue handling in the udev daemon assumes that the events are ordered. Before this patch uevent_seqnum is incremented under sequence_lock, than an event is send uner uevent_sock_mutex. I want to say that code contained a window between incrementing seqnum and sending an event. This patch locks uevent_sock_mutex before incrementing uevent_seqnum. v2: delete sequence_lock, uevent_seqnum is protected by uevent_sock_mutex v3: unlock the mutex before the goto exit Thanks for Kay for the comments. Signed-off-by: Andrew Vagin <avagin@openvz.org> Tested-By: Kay Sievers <kay.sievers@vrfy.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--lib/kobject_uevent.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index ad72a03ce5e9..6d40244e8010 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -29,16 +29,17 @@
u64 uevent_seqnum;
char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
-static DEFINE_SPINLOCK(sequence_lock);
#ifdef CONFIG_NET
struct uevent_sock {
struct list_head list;
struct sock *sk;
};
static LIST_HEAD(uevent_sock_list);
-static DEFINE_MUTEX(uevent_sock_mutex);
#endif
+/* This lock protects uevent_seqnum and uevent_sock_list */
+static DEFINE_MUTEX(uevent_sock_mutex);
+
/* the strings here must match the enum in include/linux/kobject.h */
static const char *kobject_actions[] = {
[KOBJ_ADD] = "add",
@@ -136,7 +137,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
struct kobject *top_kobj;
struct kset *kset;
const struct kset_uevent_ops *uevent_ops;
- u64 seq;
int i = 0;
int retval = 0;
#ifdef CONFIG_NET
@@ -243,17 +243,16 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
else if (action == KOBJ_REMOVE)
kobj->state_remove_uevent_sent = 1;
+ mutex_lock(&uevent_sock_mutex);
/* we will send an event, so request a new sequence number */
- spin_lock(&sequence_lock);
- seq = ++uevent_seqnum;
- spin_unlock(&sequence_lock);
- retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq);
- if (retval)
+ retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum);
+ if (retval) {
+ mutex_unlock(&uevent_sock_mutex);
goto exit;
+ }
#if defined(CONFIG_NET)
/* send netlink message */
- mutex_lock(&uevent_sock_mutex);
list_for_each_entry(ue_sk, &uevent_sock_list, list) {
struct sock *uevent_sock = ue_sk->sk;
struct sk_buff *skb;
@@ -287,8 +286,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
} else
retval = -ENOMEM;
}
- mutex_unlock(&uevent_sock_mutex);
#endif
+ mutex_unlock(&uevent_sock_mutex);
/* call uevent_helper, usually only enabled during early boot */
if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {