summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-05-05 17:31:14 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-05 17:31:14 -0700
commitbb896afe2089575ca1bb1fbf3f07b934e1ba999b (patch)
tree7300f9b4e3e267fe97d898440c72ff2c4c327f23 /include/linux
parent2e83fc4df5f27dfc1b53044c4f142b2f9d1db08c (diff)
parentaac6abca858386438d9a7233c3471d2ecfa2f704 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-fixes
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-fixes: sched: default to n for GROUP_SCHED and FAIR_GROUP_SCHED sched: add optional support for CONFIG_HAVE_UNSTABLE_SCHED_CLOCK sched, x86: add HAVE_UNSTABLE_SCHED_CLOCK sched: fix cpu clock sched: fair-group: fix a Div0 error of the fair group scheduler sched: fix missing locking in sched_domains code sched: make clock sync tunable by architecture code sched: fix debugging sched: fix sched_info_switch not being called according to documentation sched: fix hrtick_start_fair and CPU-Hotplug sched: fix SCHED_FAIR wake-idle logic error sched: fix RT task-wakeup logic sched: add statics, don't return void expressions sched: add debug checks to idle functions sched: remove old sched doc sched: make rt_sched_class, idle_sched_class static sched: optimize calc_delta_mine() sched: fix normalized sleeper
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/sched.h38
1 files changed, 37 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 03c238088aee..0c35b0343a76 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -158,6 +158,8 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
}
#endif
+extern unsigned long long time_sync_thresh;
+
/*
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
@@ -1551,6 +1553,35 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
extern unsigned long long sched_clock(void);
+#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+static inline void sched_clock_init(void)
+{
+}
+
+static inline u64 sched_clock_cpu(int cpu)
+{
+ return sched_clock();
+}
+
+static inline void sched_clock_tick(void)
+{
+}
+
+static inline void sched_clock_idle_sleep_event(void)
+{
+}
+
+static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
+{
+}
+#else
+extern void sched_clock_init(void);
+extern u64 sched_clock_cpu(int cpu);
+extern void sched_clock_tick(void);
+extern void sched_clock_idle_sleep_event(void);
+extern void sched_clock_idle_wakeup_event(u64 delta_ns);
+#endif
+
/*
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
* clock constructed from sched_clock():
@@ -1977,6 +2008,11 @@ static inline void clear_tsk_need_resched(struct task_struct *tsk)
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
+static inline int test_tsk_need_resched(struct task_struct *tsk)
+{
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
+}
+
static inline int signal_pending(struct task_struct *p)
{
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
@@ -1991,7 +2027,7 @@ static inline int fatal_signal_pending(struct task_struct *p)
static inline int need_resched(void)
{
- return unlikely(test_thread_flag(TIF_NEED_RESCHED));
+ return unlikely(test_tsk_need_resched(current));
}
/*