summaryrefslogtreecommitdiff
path: root/kernel/exit.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/exit.c')
-rw-r--r--kernel/exit.c64
1 files changed, 45 insertions, 19 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index a1801b6397bc..5e99ea7dc3fa 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -24,13 +24,13 @@
#include <linux/pid_namespace.h>
#include <linux/ptrace.h>
#include <linux/profile.h>
-#include <linux/signalfd.h>
#include <linux/mount.h>
#include <linux/proc_fs.h>
#include <linux/kthread.h>
#include <linux/mempolicy.h>
#include <linux/taskstats_kern.h>
#include <linux/delayacct.h>
+#include <linux/freezer.h>
#include <linux/cpuset.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
@@ -44,6 +44,7 @@
#include <linux/resource.h>
#include <linux/blkdev.h>
#include <linux/task_io_accounting_ops.h>
+#include <linux/freezer.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -91,14 +92,6 @@ static void __exit_signal(struct task_struct *tsk)
sighand = rcu_dereference(tsk->sighand);
spin_lock(&sighand->siglock);
- /*
- * Notify that this sighand has been detached. This must
- * be called with the tsk->sighand lock held. Also, this
- * access tsk->sighand internally, so it must be called
- * before tsk->sighand is reset.
- */
- signalfd_detach_locked(tsk);
-
posix_cpu_timers_exit(tsk);
if (atomic_dec_and_test(&sig->count))
posix_cpu_timers_exit_group(tsk);
@@ -129,9 +122,9 @@ static void __exit_signal(struct task_struct *tsk)
sig->maj_flt += tsk->maj_flt;
sig->nvcsw += tsk->nvcsw;
sig->nivcsw += tsk->nivcsw;
- sig->sched_time += tsk->sched_time;
sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk);
+ sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
sig = NULL; /* Marker for below. */
}
@@ -189,7 +182,6 @@ repeat:
zap_leader = (leader->exit_signal == -1);
}
- sched_exit(p);
write_unlock_irq(&tasklist_lock);
proc_flush_task(p);
release_thread(p);
@@ -298,7 +290,7 @@ static void reparent_to_kthreadd(void)
/* Set the exit signal to SIGCHLD so we signal init on exit */
current->exit_signal = SIGCHLD;
- if (!has_rt_policy(current) && (task_nice(current) < 0))
+ if (task_nice(current) < 0)
set_user_nice(current, 0);
/* cpus_allowed? */
/* rt_priority? */
@@ -395,6 +387,11 @@ void daemonize(const char *name, ...)
* they would be locked into memory.
*/
exit_mm(current);
+ /*
+ * We don't want to have TIF_FREEZE set if the system-wide hibernation
+ * or suspend transition begins right now.
+ */
+ current->flags |= PF_NOFREEZE;
set_special_pids(1, 1);
proc_clear_tty(current);
@@ -596,6 +593,8 @@ static void exit_mm(struct task_struct * tsk)
tsk->mm = NULL;
up_read(&mm->mmap_sem);
enter_lazy_tlb(mm, current);
+ /* We don't want this task to be frozen prematurely */
+ clear_freeze_flag(tsk);
task_unlock(tsk);
mmput(mm);
}
@@ -816,7 +815,7 @@ static void exit_notify(struct task_struct *tsk)
__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
}
- /* Let father know we died
+ /* Let father know we died
*
* Thread signals are configurable, but you aren't going to use
* that to send signals to arbitary processes.
@@ -829,9 +828,7 @@ static void exit_notify(struct task_struct *tsk)
* If our self_exec id doesn't match our parent_exec_id then
* we have changed execution domain as these two values started
* the same after a fork.
- *
*/
-
if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
( tsk->parent_exec_id != t->self_exec_id ||
tsk->self_exec_id != tsk->parent_exec_id)
@@ -851,9 +848,7 @@ static void exit_notify(struct task_struct *tsk)
}
state = EXIT_ZOMBIE;
- if (tsk->exit_signal == -1 &&
- (likely(tsk->ptrace == 0) ||
- unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT)))
+ if (tsk->exit_signal == -1 && likely(!tsk->ptrace))
state = EXIT_DEAD;
tsk->exit_state = state;
@@ -870,6 +865,34 @@ static void exit_notify(struct task_struct *tsk)
release_task(tsk);
}
+#ifdef CONFIG_DEBUG_STACK_USAGE
+static void check_stack_usage(void)
+{
+ static DEFINE_SPINLOCK(low_water_lock);
+ static int lowest_to_date = THREAD_SIZE;
+ unsigned long *n = end_of_stack(current);
+ unsigned long free;
+
+ while (*n == 0)
+ n++;
+ free = (unsigned long)n - (unsigned long)end_of_stack(current);
+
+ if (free >= lowest_to_date)
+ return;
+
+ spin_lock(&low_water_lock);
+ if (free < lowest_to_date) {
+ printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
+ "left\n",
+ current->comm, free);
+ lowest_to_date = free;
+ }
+ spin_unlock(&low_water_lock);
+}
+#else
+static inline void check_stack_usage(void) {}
+#endif
+
fastcall NORET_TYPE void do_exit(long code)
{
struct task_struct *tsk = current;
@@ -949,9 +972,12 @@ fastcall NORET_TYPE void do_exit(long code)
if (unlikely(tsk->compat_robust_list))
compat_exit_robust_list(tsk);
#endif
+ if (group_dead)
+ tty_audit_exit();
if (unlikely(tsk->audit_context))
audit_free(tsk);
+ tsk->exit_code = code;
taskstats_exit(tsk, group_dead);
exit_mm(tsk);
@@ -961,6 +987,7 @@ fastcall NORET_TYPE void do_exit(long code)
exit_sem(tsk);
__exit_files(tsk);
__exit_fs(tsk);
+ check_stack_usage();
exit_thread();
cpuset_exit(tsk);
exit_keys(tsk);
@@ -972,7 +999,6 @@ fastcall NORET_TYPE void do_exit(long code)
if (tsk->binfmt)
module_put(tsk->binfmt->module);
- tsk->exit_code = code;
proc_exit_connector(tsk);
exit_task_namespaces(tsk);
exit_notify(tsk);