summaryrefslogtreecommitdiff
path: root/kernel/locking/rtmutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r--kernel/locking/rtmutex.c65
1 files changed, 6 insertions, 59 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 2a9bf2443acc..44a33057a83a 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -142,12 +142,6 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
}
-static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
-{
- return waiter && waiter != PI_WAKEUP_INPROGRESS &&
- waiter != PI_REQUEUE_INPROGRESS;
-}
-
/*
* We can speed up the acquire/release, if there's no debugging state to be
* set up.
@@ -421,8 +415,7 @@ int max_lock_depth = 1024;
static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
{
- return rt_mutex_real_waiter(p->pi_blocked_on) ?
- p->pi_blocked_on->lock : NULL;
+ return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
}
/*
@@ -558,7 +551,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* reached or the state of the chain has changed while we
* dropped the locks.
*/
- if (!rt_mutex_real_waiter(waiter))
+ if (!waiter)
goto out_unlock_pi;
/*
@@ -1328,22 +1321,6 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
return -EDEADLK;
raw_spin_lock(&task->pi_lock);
- /*
- * In the case of futex requeue PI, this will be a proxy
- * lock. The task will wake unaware that it is enqueueed on
- * this lock. Avoid blocking on two locks and corrupting
- * pi_blocked_on via the PI_WAKEUP_INPROGRESS
- * flag. futex_wait_requeue_pi() sets this when it wakes up
- * before requeue (due to a signal or timeout). Do not enqueue
- * the task if PI_WAKEUP_INPROGRESS is set.
- */
- if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
- raw_spin_unlock(&task->pi_lock);
- return -EAGAIN;
- }
-
- BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
-
waiter->task = task;
waiter->lock = lock;
waiter->prio = task->prio;
@@ -1367,7 +1344,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
rt_mutex_enqueue_pi(owner, waiter);
rt_mutex_adjust_prio(owner);
- if (rt_mutex_real_waiter(owner->pi_blocked_on))
+ if (owner->pi_blocked_on)
chain_walk = 1;
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
chain_walk = 1;
@@ -1467,7 +1444,7 @@ static void remove_waiter(struct rt_mutex *lock,
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
- struct rt_mutex *next_lock = NULL;
+ struct rt_mutex *next_lock;
lockdep_assert_held(&lock->wait_lock);
@@ -1493,8 +1470,7 @@ static void remove_waiter(struct rt_mutex *lock,
rt_mutex_adjust_prio(owner);
/* Store the lock on which owner is blocked or NULL */
- if (rt_mutex_real_waiter(owner->pi_blocked_on))
- next_lock = task_blocked_on_lock(owner);
+ next_lock = task_blocked_on_lock(owner);
raw_spin_unlock(&owner->pi_lock);
@@ -1530,8 +1506,7 @@ void rt_mutex_adjust_pi(struct task_struct *task)
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
- if (!rt_mutex_real_waiter(waiter) ||
- rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
+ if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
@@ -2350,34 +2325,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
if (try_to_take_rt_mutex(lock, task, NULL))
return 1;
-#ifdef CONFIG_PREEMPT_RT_FULL
- /*
- * In PREEMPT_RT there's an added race.
- * If the task, that we are about to requeue, times out,
- * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
- * to skip this task. But right after the task sets
- * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
- * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
- * This will replace the PI_WAKEUP_INPROGRESS with the actual
- * lock that it blocks on. We *must not* place this task
- * on this proxy lock in that case.
- *
- * To prevent this race, we first take the task's pi_lock
- * and check if it has updated its pi_blocked_on. If it has,
- * we assume that it woke up and we return -EAGAIN.
- * Otherwise, we set the task's pi_blocked_on to
- * PI_REQUEUE_INPROGRESS, so that if the task is waking up
- * it will know that we are in the process of requeuing it.
- */
- raw_spin_lock(&task->pi_lock);
- if (task->pi_blocked_on) {
- raw_spin_unlock(&task->pi_lock);
- return -EAGAIN;
- }
- task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
- raw_spin_unlock(&task->pi_lock);
-#endif
-
/* We enforce deadlock detection for futexes */
ret = task_blocks_on_rt_mutex(lock, waiter, task,
RT_MUTEX_FULL_CHAINWALK);