summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Boonstoppel <pboonstoppel@nvidia.com>2012-08-09 15:34:47 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-02-13 11:51:19 -0800
commitf61eb9ceb26cee3fdbb8c7a4920f171f7661fb4f (patch)
tree7ef1b69d19fdc57f57ce58dc78988ce4a653aa8e
parent1e5c13ec422f665432bfc9f7c5fc1f9fd614afd3 (diff)
sched: Unthrottle rt runqueues in __disable_runtime()
commit a4c96ae319b8047f62dedbe1eac79e321c185749 upstream. migrate_tasks() uses _pick_next_task_rt() to get tasks from the real-time runqueues to be migrated. When rt_rq is throttled _pick_next_task_rt() won't return anything, in which case migrate_tasks() can't move all threads over and gets stuck in an infinite loop. Instead unthrottle rt runqueues before migrating tasks. Additionally: move unthrottle_offline_cfs_rqs() to rq_offline_fair() Signed-off-by: Peter Boonstoppel <pboonstoppel@nvidia.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Turner <pjt@google.com> Link: http://lkml.kernel.org/r/5FBF8E85CA34454794F0F7ECBA79798F379D3648B7@HQMAIL04.nvidia.com Signed-off-by: Ingo Molnar <mingo@kernel.org> [ lizf: backported to 3.4: adjust context ] Signed-off-by: Li Zefan <lizefan@huawei.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--kernel/sched/core.c3
-rw-r--r--kernel/sched/fair.c7
-rw-r--r--kernel/sched/rt.c1
-rw-r--r--kernel/sched/sched.h1
4 files changed, 6 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3a5b31796edf..7522816cd7f6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5301,9 +5301,6 @@ static void migrate_tasks(unsigned int dead_cpu)
*/
rq->stop = NULL;
- /* Ensure any throttled groups are reachable by pick_next_task */
- unthrottle_offline_cfs_rqs(rq);
-
for ( ; ; ) {
/*
* There's this thread running, bail when that's the only
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 363df3702c20..93be350c9b63 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2071,7 +2071,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
hrtimer_cancel(&cfs_b->slack_timer);
}
-void unthrottle_offline_cfs_rqs(struct rq *rq)
+static void unthrottle_offline_cfs_rqs(struct rq *rq)
{
struct cfs_rq *cfs_rq;
@@ -2125,7 +2125,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
return NULL;
}
static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
-void unthrottle_offline_cfs_rqs(struct rq *rq) {}
+static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
#endif /* CONFIG_CFS_BANDWIDTH */
@@ -5171,6 +5171,9 @@ static void rq_online_fair(struct rq *rq)
static void rq_offline_fair(struct rq *rq)
{
update_sysctl();
+
+ /* Ensure any throttled groups are reachable by pick_next_task */
+ unthrottle_offline_cfs_rqs(rq);
}
#endif /* CONFIG_SMP */
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index dd1c7e9d7fb0..32a51c808128 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -685,6 +685,7 @@ balanced:
* runtime - in which case borrowing doesn't make sense.
*/
rt_rq->rt_runtime = RUNTIME_INF;
+ rt_rq->rt_throttled = 0;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4e2bd6c32da7..a70d8908a6d3 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1138,7 +1138,6 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
extern void init_cfs_rq(struct cfs_rq *cfs_rq);
extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
-extern void unthrottle_offline_cfs_rqs(struct rq *rq);
extern void cfs_bandwidth_usage_inc(void);
extern void cfs_bandwidth_usage_dec(void);