summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/futex.h31
-rw-r--r--kernel/exit.c13
-rw-r--r--kernel/futex.c37
3 files changed, 40 insertions, 41 deletions
diff --git a/include/linux/futex.h b/include/linux/futex.h
index fb698e25f210..ff143f766b46 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -55,6 +55,7 @@ union futex_key {
#ifdef CONFIG_FUTEX
enum {
FUTEX_STATE_OK,
+ FUTEX_STATE_EXITING,
FUTEX_STATE_DEAD,
};
@@ -69,33 +70,7 @@ static inline void futex_init_task(struct task_struct *tsk)
tsk->futex_state = FUTEX_STATE_OK;
}
-/**
- * futex_exit_done - Sets the tasks futex state to FUTEX_STATE_DEAD
- * @tsk: task to set the state on
- *
- * Set the futex exit state of the task lockless. The futex waiter code
- * observes that state when a task is exiting and loops until the task has
- * actually finished the futex cleanup. The worst case for this is that the
- * waiter runs through the wait loop until the state becomes visible.
- *
- * This has two callers:
- *
- * - futex_mm_release() after the futex exit cleanup has been done
- *
- * - do_exit() from the recursive fault handling path.
- *
- * In case of a recursive fault this is best effort. Either the futex exit
- * code has run already or not. If the OWNER_DIED bit has been set on the
- * futex then the waiter can take it over. If not, the problem is pushed
- * back to user space. If the futex exit code did not run yet, then an
- * already queued waiter might block forever, but there is nothing which
- * can be done about that.
- */
-static inline void futex_exit_done(struct task_struct *tsk)
-{
- tsk->futex_state = FUTEX_STATE_DEAD;
-}
-
+void futex_exit_recursive(struct task_struct *tsk);
void futex_exit_release(struct task_struct *tsk);
void futex_exec_release(struct task_struct *tsk);
@@ -103,7 +78,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3);
#else
static inline void futex_init_task(struct task_struct *tsk) { }
-static inline void futex_exit_done(struct task_struct *tsk) { }
+static inline void futex_exit_recursive(struct task_struct *tsk) { }
static inline void futex_exit_release(struct task_struct *tsk) { }
static inline void futex_exec_release(struct task_struct *tsk) { }
#endif
diff --git a/kernel/exit.c b/kernel/exit.c
index 915514ceca0c..57cb0eb1271c 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -803,23 +803,12 @@ void __noreturn do_exit(long code)
*/
if (unlikely(tsk->flags & PF_EXITING)) {
pr_alert("Fixing recursive fault but reboot is needed!\n");
- futex_exit_done(tsk);
+ futex_exit_recursive(tsk);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
exit_signals(tsk); /* sets PF_EXITING */
- /*
- * Ensure that all new tsk->pi_lock acquisitions must observe
- * PF_EXITING. Serializes against futex.c:attach_to_pi_owner().
- */
- smp_mb();
- /*
- * Ensure that we must observe the pi_state in exit_mm() ->
- * mm_release() -> exit_pi_state_list().
- */
- raw_spin_lock_irq(&tsk->pi_lock);
- raw_spin_unlock_irq(&tsk->pi_lock);
if (unlikely(in_atomic())) {
pr_info("note: %s[%d] exited with preempt_count %d\n",
diff --git a/kernel/futex.c b/kernel/futex.c
index 53de6574a134..4960e327c375 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -3702,10 +3702,45 @@ void futex_exec_release(struct task_struct *tsk)
exit_pi_state_list(tsk);
}
+/**
+ * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
+ * @tsk: task to set the state on
+ *
+ * Set the futex exit state of the task lockless. The futex waiter code
+ * observes that state when a task is exiting and loops until the task has
+ * actually finished the futex cleanup. The worst case for this is that the
+ * waiter runs through the wait loop until the state becomes visible.
+ *
+ * This is called from the recursive fault handling path in do_exit().
+ *
+ * This is best effort. Either the futex exit code has run already or
+ * not. If the OWNER_DIED bit has been set on the futex then the waiter can
+ * take it over. If not, the problem is pushed back to user space. If the
+ * futex exit code did not run yet, then an already queued waiter might
+ * block forever, but there is nothing which can be done about that.
+ */
+void futex_exit_recursive(struct task_struct *tsk)
+{
+ tsk->futex_state = FUTEX_STATE_DEAD;
+}
+
void futex_exit_release(struct task_struct *tsk)
{
+ tsk->futex_state = FUTEX_STATE_EXITING;
+ /*
+ * Ensure that all new tsk->pi_lock acquisitions must observe
+ * FUTEX_STATE_EXITING. Serializes against attach_to_pi_owner().
+ */
+ smp_mb();
+ /*
+ * Ensure that we must observe the pi_state in exit_pi_state_list().
+ */
+ raw_spin_lock_irq(&tsk->pi_lock);
+ raw_spin_unlock_irq(&tsk->pi_lock);
+
futex_exec_release(tsk);
- futex_exit_done(tsk);
+
+ tsk->futex_state = FUTEX_STATE_DEAD;
}
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,