summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/watchdog.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index e092e5a6cdd7..274e737a92e6 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -53,6 +53,9 @@ static cpumask_t __read_mostly watchdog_cpus;
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
#endif
+static __read_mostly int soft_lockup_detected;
+static __read_mostly int hard_lockup_detected;
+
/* boot commands */
/*
* Should we panic when a soft-lockup or hard-lockup occurs:
@@ -101,6 +104,11 @@ static int __init nosoftlockup_setup(char *str)
__setup("nosoftlockup", nosoftlockup_setup);
/* */
+int watchdog_get_lockup_state(void)
+{
+ return ((soft_lockup_detected << 8) || hard_lockup_detected);
+}
+
/*
* Hard-lockup warnings should be triggered after just a few seconds. Soft-
* lockups can have false positives under extreme conditions. So we generally
@@ -253,6 +261,8 @@ static void watchdog_check_hardlockup_other_cpu(void)
if (per_cpu(hard_watchdog_warn, next_cpu) == true)
return;
+ hard_lockup_detected = 1;
+
if (hardlockup_panic)
panic("Watchdog detected hard LOCKUP on cpu %u", next_cpu);
else
@@ -310,6 +320,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
if (is_hardlockup()) {
int this_cpu = smp_processor_id();
+ hard_lockup_detected = 1;
+
/* only print hardlockups once */
if (__this_cpu_read(hard_watchdog_warn) == true)
return;
@@ -391,6 +403,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
if (__this_cpu_read(soft_watchdog_warn) == true)
return HRTIMER_RESTART;
+ soft_lockup_detected = 1;
+
printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
smp_processor_id(), duration,
current->comm, task_pid_nr(current));