summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-07-03 00:24:54 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-03 15:27:04 -0700
commit8a25d5debff2daee280e83e09d8c25d67c26a972 (patch)
tree3bccfef9acb66fc62863bfd6c16493c5e8c8e394 /kernel/sched.c
parent4ea2176dfa714882e88180b474e4cbcd888b70af (diff)
[PATCH] lockdep: prove spinlock rwlock locking correctness
Use the lock validator framework to prove spinlock and rwlock locking correctness. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 911829966534..ae4db0185bb2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -308,6 +308,13 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
/* this is a valid case when another task releases the spinlock */
rq->lock.owner = current;
#endif
+ /*
+ * If we are tracking spinlock dependencies then we have to
+ * fix up the runqueue lock - which gets 'carried over' from
+ * prev into current:
+ */
+ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
+
spin_unlock_irq(&rq->lock);
}
@@ -1778,6 +1785,7 @@ task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
WARN_ON(rq->prev_mm);
rq->prev_mm = oldmm;
}
+ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
/* Here we just switch the register state and the stack. */
switch_to(prev, next, prev);
@@ -4384,6 +4392,7 @@ asmlinkage long sys_sched_yield(void)
* no need to preempt or enable interrupts:
*/
__release(rq->lock);
+ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
_raw_spin_unlock(&rq->lock);
preempt_enable_no_resched();
@@ -4447,6 +4456,7 @@ int cond_resched_lock(spinlock_t *lock)
spin_lock(lock);
}
if (need_resched() && __resched_legal()) {
+ spin_release(&lock->dep_map, 1, _THIS_IP_);
_raw_spin_unlock(lock);
preempt_enable_no_resched();
__cond_resched();