summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-05-08 18:34:17 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2007-05-10 23:45:07 -0700
commit572a103ded0ad880f75ce83e99f0512fbb80b5b0 (patch)
treeb469715be284a13c3f603903cc9158baa7baa992 /net
parentc33be3c362f1bc98f6e2d731a274ef138ae80741 (diff)
[NET] link_watch: Move link watch list into net_device
These days the link watch mechanism is an integral part of the network subsystem as it manages the carrier status. So it now makes sense to allocate some memory for it in net_device rather than allocating it on demand. In fact, this is necessary because we can't tolerate a memory allocation failure since that means we'd have to potentially throw a link up event away. It also simplifies the code greatly. In doing so I discovered a subtle race condition in the use of singleevent. This race condition still exists (and is somewhat magnified) without singleevent but it's now plugged thanks to an smp_mb__before_clear_bit. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/link_watch.c50
1 files changed, 15 insertions, 35 deletions
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index e3c26a9ccad6..71a35da275d4 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -19,7 +19,6 @@
#include <linux/rtnetlink.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
-#include <linux/list.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
@@ -28,7 +27,6 @@
enum lw_bits {
LW_RUNNING = 0,
- LW_SE_USED
};
static unsigned long linkwatch_flags;
@@ -37,17 +35,9 @@ static unsigned long linkwatch_nextevent;
static void linkwatch_event(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
-static LIST_HEAD(lweventlist);
+static struct net_device *lweventlist;
static DEFINE_SPINLOCK(lweventlist_lock);
-struct lw_event {
- struct list_head list;
- struct net_device *dev;
-};
-
-/* Avoid kmalloc() for most systems */
-static struct lw_event singleevent;
-
static unsigned char default_operstate(const struct net_device *dev)
{
if (!netif_carrier_ok(dev))
@@ -90,21 +80,23 @@ static void rfc2863_policy(struct net_device *dev)
/* Must be called with the rtnl semaphore held */
void linkwatch_run_queue(void)
{
- struct list_head head, *n, *next;
+ struct net_device *next;
spin_lock_irq(&lweventlist_lock);
- list_replace_init(&lweventlist, &head);
+ next = lweventlist;
+ lweventlist = NULL;
spin_unlock_irq(&lweventlist_lock);
- list_for_each_safe(n, next, &head) {
- struct lw_event *event = list_entry(n, struct lw_event, list);
- struct net_device *dev = event->dev;
+ while (next) {
+ struct net_device *dev = next;
- if (event == &singleevent) {
- clear_bit(LW_SE_USED, &linkwatch_flags);
- } else {
- kfree(event);
- }
+ next = dev->link_watch_next;
+
+ /*
+ * Make sure the above read is complete since it can be
+ * rewritten as soon as we clear the bit below.
+ */
+ smp_mb__before_clear_bit();
/* We are about to handle this device,
* so new events can be accepted
@@ -147,24 +139,12 @@ void linkwatch_fire_event(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
unsigned long flags;
- struct lw_event *event;
-
- if (test_and_set_bit(LW_SE_USED, &linkwatch_flags)) {
- event = kmalloc(sizeof(struct lw_event), GFP_ATOMIC);
-
- if (unlikely(event == NULL)) {
- clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
- return;
- }
- } else {
- event = &singleevent;
- }
dev_hold(dev);
- event->dev = dev;
spin_lock_irqsave(&lweventlist_lock, flags);
- list_add_tail(&event->list, &lweventlist);
+ dev->link_watch_next = lweventlist;
+ lweventlist = dev;
spin_unlock_irqrestore(&lweventlist_lock, flags);
if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) {