summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-04-27 14:16:42 +0200
committerThomas Gleixner <tglx@linutronix.de>2011-05-02 21:39:15 +0200
commit99ee5315dac6211e972fa3f23bcc9a0343ff58c4 (patch)
tree6663d6ceaabcb9bac03193e2781cdbe6a139f70c /kernel
parentb12a03ce4880bd13786a98db6de494a3e0123129 (diff)
timerfd: Allow timers to be cancelled when clock was set
Some applications must be aware of clock realtime being set backward. A simple example is a clock applet which arms a timer for the next minute display. If clock realtime is set backward then the applet displays a stale time for the amount of time which the clock was set backwards. Due to that applications poll the time because we don't have an interface. Extend the timerfd interface by adding a flag which puts the timer onto a different internal realtime clock. All timers on this clock are expired whenever the clock was set. The timerfd core records the monotonic offset when the timer is created. When the timer is armed, then the current offset is compared to the previous recorded offset. When it has changed, then timerfd_settime returns -ECANCELED. When a timer is read the offset is compared and if it changed -ECANCELED returned to user space. Periodic timers are not rearmed in the cancelation case. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: John Stultz <johnstul@us.ibm.com> Cc: Chris Friesen <chris.friesen@genband.com> Tested-by: Kay Sievers <kay.sievers@vrfy.org> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Davide Libenzi <davidel@xmailserver.org> Reviewed-by: Alexander Shishkin <virtuoso@slind.org> Link: http://lkml.kernel.org/r/%3Calpine.LFD.2.02.1104271359580.3323%40ionos%3E Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c36
-rw-r--r--kernel/time/timekeeping.c15
2 files changed, 50 insertions, 1 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index c145ed643bca..eabcbd781433 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -78,6 +78,11 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
.get_time = &ktime_get_boottime,
.resolution = KTIME_LOW_RES,
},
+ {
+ .index = CLOCK_REALTIME_COS,
+ .get_time = &ktime_get_real,
+ .resolution = KTIME_LOW_RES,
+ },
}
};
@@ -85,6 +90,7 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
[CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
[CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
[CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
+ [CLOCK_REALTIME_COS] = HRTIMER_BASE_REALTIME_COS,
};
static inline int hrtimer_clockid_to_base(clockid_t clock_id)
@@ -110,6 +116,7 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot;
+ base->clock_base[HRTIMER_BASE_REALTIME_COS].softirq_time = xtim;
}
/*
@@ -479,6 +486,8 @@ static inline void debug_deactivate(struct hrtimer *timer)
trace_hrtimer_cancel(timer);
}
+static void hrtimer_expire_cancelable(struct hrtimer_cpu_base *cpu_base);
+
/* High resolution timer related functions */
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -715,9 +724,14 @@ static void retrigger_next_event(void *arg)
struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
struct timespec realtime_offset, xtim, wtm, sleep;
- if (!hrtimer_hres_active())
+ if (!hrtimer_hres_active()) {
+ raw_spin_lock(&base->lock);
+ hrtimer_expire_cancelable(base);
+ raw_spin_unlock(&base->lock);
return;
+ }
+ /* Optimized out for !HIGH_RES */
get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
@@ -727,6 +741,10 @@ static void retrigger_next_event(void *arg)
timespec_to_ktime(realtime_offset);
base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
timespec_to_ktime(sleep);
+ base->clock_base[HRTIMER_BASE_REALTIME_COS].offset =
+ timespec_to_ktime(realtime_offset);
+
+ hrtimer_expire_cancelable(base);
hrtimer_force_reprogram(base, 0);
raw_spin_unlock(&base->lock);
@@ -1222,6 +1240,22 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
timer->state &= ~HRTIMER_STATE_CALLBACK;
}
+static void hrtimer_expire_cancelable(struct hrtimer_cpu_base *cpu_base)
+{
+ struct timerqueue_node *node;
+ struct hrtimer_clock_base *base;
+ ktime_t now = ktime_get_real();
+
+ base = &cpu_base->clock_base[HRTIMER_BASE_REALTIME_COS];
+
+ while ((node = timerqueue_getnext(&base->active))) {
+ struct hrtimer *timer;
+
+ timer = container_of(node, struct hrtimer, node);
+ __run_hrtimer(timer, &now);
+ }
+}
+
#ifdef CONFIG_HIGH_RES_TIMERS
/*
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index a61b8fa2d39a..342408cf68dd 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1099,6 +1099,21 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
}
/**
+ * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
+ */
+ktime_t ktime_get_monotonic_offset(void)
+{
+ unsigned long seq;
+ struct timespec wtom;
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ wtom = wall_to_monotonic;
+ } while (read_seqretry(&xtime_lock, seq));
+ return timespec_to_ktime(wtom);
+}
+
+/**
* xtime_update() - advances the timekeeping infrastructure
* @ticks: number of ticks, that have elapsed since the last call.
*