| From 7b60a18da393ed70db043a777fd9e6d5363077c4 Mon Sep 17 00:00:00 2001 |
| From: Andrew Vagin <avagin@openvz.org> |
| Date: Wed, 7 Mar 2012 14:49:56 +0400 |
| Subject: uevent: send events in correct order according to seqnum (v3) |
| |
| From: Andrew Vagin <avagin@openvz.org> |
| |
| commit 7b60a18da393ed70db043a777fd9e6d5363077c4 upstream. |
| |
| The queue handling in the udev daemon assumes that the events are |
| ordered. |
| |
| Before this patch uevent_seqnum is incremented under sequence_lock, |
| than an event is send uner uevent_sock_mutex. I want to say that code |
| contained a window between incrementing seqnum and sending an event. |
| |
| This patch locks uevent_sock_mutex before incrementing uevent_seqnum. |
| |
| v2: delete sequence_lock, uevent_seqnum is protected by uevent_sock_mutex |
| v3: unlock the mutex before the goto exit |
| |
| Thanks for Kay for the comments. |
| |
| Signed-off-by: Andrew Vagin <avagin@openvz.org> |
| Tested-By: Kay Sievers <kay.sievers@vrfy.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| lib/kobject_uevent.c | 19 +++++++++---------- |
| 1 file changed, 9 insertions(+), 10 deletions(-) |
| |
| --- a/lib/kobject_uevent.c |
| +++ b/lib/kobject_uevent.c |
| @@ -29,16 +29,17 @@ |
| |
| u64 uevent_seqnum; |
| char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; |
| -static DEFINE_SPINLOCK(sequence_lock); |
| #ifdef CONFIG_NET |
| struct uevent_sock { |
| struct list_head list; |
| struct sock *sk; |
| }; |
| static LIST_HEAD(uevent_sock_list); |
| -static DEFINE_MUTEX(uevent_sock_mutex); |
| #endif |
| |
| +/* This lock protects uevent_seqnum and uevent_sock_list */ |
| +static DEFINE_MUTEX(uevent_sock_mutex); |
| + |
| /* the strings here must match the enum in include/linux/kobject.h */ |
| static const char *kobject_actions[] = { |
| [KOBJ_ADD] = "add", |
| @@ -136,7 +137,6 @@ int kobject_uevent_env(struct kobject *k |
| struct kobject *top_kobj; |
| struct kset *kset; |
| const struct kset_uevent_ops *uevent_ops; |
| - u64 seq; |
| int i = 0; |
| int retval = 0; |
| #ifdef CONFIG_NET |
| @@ -243,17 +243,16 @@ int kobject_uevent_env(struct kobject *k |
| else if (action == KOBJ_REMOVE) |
| kobj->state_remove_uevent_sent = 1; |
| |
| + mutex_lock(&uevent_sock_mutex); |
| /* we will send an event, so request a new sequence number */ |
| - spin_lock(&sequence_lock); |
| - seq = ++uevent_seqnum; |
| - spin_unlock(&sequence_lock); |
| - retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq); |
| - if (retval) |
| + retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum); |
| + if (retval) { |
| + mutex_unlock(&uevent_sock_mutex); |
| goto exit; |
| + } |
| |
| #if defined(CONFIG_NET) |
| /* send netlink message */ |
| - mutex_lock(&uevent_sock_mutex); |
| list_for_each_entry(ue_sk, &uevent_sock_list, list) { |
| struct sock *uevent_sock = ue_sk->sk; |
| struct sk_buff *skb; |
| @@ -287,8 +286,8 @@ int kobject_uevent_env(struct kobject *k |
| } else |
| retval = -ENOMEM; |
| } |
| - mutex_unlock(&uevent_sock_mutex); |
| #endif |
| + mutex_unlock(&uevent_sock_mutex); |
| |
| /* call uevent_helper, usually only enabled during early boot */ |
| if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { |