summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-02-22 12:03:30 +0100
committerClark Williams <williams@redhat.com>2012-04-13 11:01:52 -0500
commite15ffe4a7a13065eb2bafa7e262b169d2b2600c2 (patch)
tree9022fc356feefdcdcf429862e8fc1b55922708ab
parentb3b33f1273dd90754c0a1592bfe4d95f7eeaaaa9 (diff)
seqlock: Prevent rt starvation
If a low prio writer gets preempted while holding the seqlock write locked, a high prio reader spins forever on RT. To prevent this let the reader grab the spinlock, so it blocks and eventually boosts the writer. This way the writer can proceed and endless spinning is prevented. For seqcount writers we disable preemption over the update code path. Thaanks to Al Viro for distangling some VFS code to make that possible. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: stable-rt@vger.kernel.org
-rw-r--r--include/linux/seqlock.h55
-rw-r--r--include/net/neighbour.h2
2 files changed, 46 insertions, 11 deletions
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 3e1f3f93dd18..a1dbf48d41f4 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -125,18 +125,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
* Sequence counter only version assumes that callers are using their
* own mutexing.
*/
-static inline void write_seqcount_begin(seqcount_t *s)
+static inline void __write_seqcount_begin(seqcount_t *s)
{
s->sequence++;
smp_wmb();
}
-static inline void write_seqcount_end(seqcount_t *s)
+static inline void write_seqcount_begin(seqcount_t *s)
+{
+ preempt_disable_rt();
+ __write_seqcount_begin(s);
+}
+
+static inline void __write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
}
+static inline void write_seqcount_end(seqcount_t *s)
+{
+ __write_seqcount_end(s);
+ preempt_enable_rt();
+}
+
/**
* write_seqcount_barrier - invalidate in-progress read-side seq operations
* @s: pointer to seqcount_t
@@ -177,10 +189,33 @@ typedef struct {
/*
* Read side functions for starting and finalizing a read side section.
*/
+#ifndef CONFIG_PREEMPT_RT_FULL
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
return read_seqcount_begin(&sl->seqcount);
}
+#else
+/*
+ * Starvation safe read side for RT
+ */
+static inline unsigned read_seqbegin(seqlock_t *sl)
+{
+ unsigned ret;
+
+repeat:
+ ret = sl->seqcount.sequence;
+ if (unlikely(ret & 1)) {
+ /*
+ * Take the lock and let the writer proceed (i.e. evtl
+ * boost it), otherwise we could loop here forever.
+ */
+ spin_lock(&sl->lock);
+ spin_unlock(&sl->lock);
+ goto repeat;
+ }
+ return ret;
+}
+#endif
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
@@ -195,36 +230,36 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
- write_seqcount_begin(&sl->seqcount);
+ __write_seqcount_begin(&sl->seqcount);
}
static inline void write_sequnlock(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ __write_seqcount_end(&sl->seqcount);
spin_unlock(&sl->lock);
}
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
- write_seqcount_begin(&sl->seqcount);
+ __write_seqcount_begin(&sl->seqcount);
}
static inline void write_sequnlock_bh(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ __write_seqcount_end(&sl->seqcount);
spin_unlock_bh(&sl->lock);
}
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
- write_seqcount_begin(&sl->seqcount);
+ __write_seqcount_begin(&sl->seqcount);
}
static inline void write_sequnlock_irq(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ __write_seqcount_end(&sl->seqcount);
spin_unlock_irq(&sl->lock);
}
@@ -233,7 +268,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
- write_seqcount_begin(&sl->seqcount);
+ __write_seqcount_begin(&sl->seqcount);
return flags;
}
@@ -243,7 +278,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
- write_seqcount_end(&sl->seqcount);
+ __write_seqcount_end(&sl->seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 34c996f46181..8e626fa6ccde 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -394,7 +394,7 @@ struct neighbour_cb {
#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
+static inline void neigh_ha_snapshot(char *dst, struct neighbour *n,
const struct net_device *dev)
{
unsigned int seq;