From 6d686f4564f3fc7c6e678852919e48ad331d276b Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Wed, 13 Jan 2010 20:21:52 -0700 Subject: sched: Don't expose local functions kernel/sched: don't expose local functions The get_rr_interval_* functions are all class methods of struct sched_class. They are not exported so make them static. Signed-off-by: H Hartley Sweeten Cc: Peter Zijlstra LKML-Reference: <201001132021.53253.hartleys@visionengravers.com> Signed-off-by: Ingo Molnar --- kernel/sched_rt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/sched_rt.c') diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index f48328ac216f..072b3fcee8d8 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1721,7 +1721,7 @@ static void set_curr_task_rt(struct rq *rq) dequeue_pushable_task(rq, p); } -unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) +static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) { /* * Time slice is 0 for SCHED_FIFO tasks -- cgit v1.2.3 From 3d45fd804a95055ecab5b3eed81f5ab2dbb047a2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 17 Dec 2009 17:12:46 +0100 Subject: sched: Remove the sched_class load_balance methods Take out the sched_class methods for load-balancing. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/sched_rt.c | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'kernel/sched_rt.c') diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 072b3fcee8d8..502bb614e40a 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1481,24 +1481,6 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) push_rt_tasks(rq); } -static unsigned long -load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, - unsigned long max_load_move, - struct sched_domain *sd, enum cpu_idle_type idle, - int *all_pinned, int *this_best_prio) -{ - /* don't touch RT tasks */ - return 0; -} - -static int -move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, - struct sched_domain *sd, enum cpu_idle_type idle) -{ - /* don't touch RT tasks */ - return 0; -} - static void set_cpus_allowed_rt(struct task_struct *p, const struct cpumask *new_mask) { @@ -1746,8 +1728,6 @@ static const struct sched_class rt_sched_class = { #ifdef CONFIG_SMP .select_task_rq = select_task_rq_rt, - .load_balance = load_balance_rt, - .move_one_task = move_one_task_rt, .set_cpus_allowed = set_cpus_allowed_rt, .rq_online = rq_online_rt, .rq_offline = rq_offline_rt, -- cgit v1.2.3 From ea87bb7853168434f4a82426dd1ea8421f9e604d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 20 Jan 2010 20:58:57 +0000 Subject: sched: Extend enqueue_task to allow head queueing The ability of enqueueing a task to the head of a SCHED_FIFO priority list is required to fix some violations of POSIX scheduling policy. Extend the related functions with a "head" argument. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Tested-by: Carsten Emde Tested-by: Mathias Weber LKML-Reference: <20100120171629.734886007@linutronix.de> --- kernel/sched_rt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel/sched_rt.c') diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 502bb614e40a..38076dabb44a 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -878,7 +878,8 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) /* * Adding/removing a task to/from a priority array: */ -static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) +static void +enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head) { struct sched_rt_entity *rt_se = &p->rt; -- cgit v1.2.3 From 37dad3fce97f01e5149d69de0833d8452c0e862e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 20 Jan 2010 20:59:01 +0000 Subject: sched: Implement head queueing for sched_rt The ability of enqueueing a task to the head of a SCHED_FIFO priority list is required to fix some violations of POSIX scheduling policy. Implement the functionality in sched_rt. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Tested-by: Carsten Emde Tested-by: Mathias Weber LKML-Reference: <20100120171629.772169931@linutronix.de> --- kernel/sched_rt.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'kernel/sched_rt.c') diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 38076dabb44a..ca49ceb01201 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -194,7 +194,7 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) return rt_se->my_q; } -static void enqueue_rt_entity(struct sched_rt_entity *rt_se); +static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head); static void dequeue_rt_entity(struct sched_rt_entity *rt_se); static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) @@ -204,7 +204,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) if (rt_rq->rt_nr_running) { if (rt_se && !on_rt_rq(rt_se)) - enqueue_rt_entity(rt_se); + enqueue_rt_entity(rt_se, false); if (rt_rq->highest_prio.curr < curr->prio) resched_task(curr); } @@ -803,7 +803,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) dec_rt_group(rt_se, rt_rq); } -static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) +static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) { struct rt_rq *rt_rq = rt_rq_of_se(rt_se); struct rt_prio_array *array = &rt_rq->active; @@ -819,7 +819,10 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) return; - list_add_tail(&rt_se->run_list, queue); + if (head) + list_add(&rt_se->run_list, queue); + else + list_add_tail(&rt_se->run_list, queue); __set_bit(rt_se_prio(rt_se), array->bitmap); inc_rt_tasks(rt_se, rt_rq); @@ -856,11 +859,11 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se) } } -static void enqueue_rt_entity(struct sched_rt_entity *rt_se) +static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) { dequeue_rt_stack(rt_se); for_each_sched_rt_entity(rt_se) - __enqueue_rt_entity(rt_se); + __enqueue_rt_entity(rt_se, head); } static void dequeue_rt_entity(struct sched_rt_entity *rt_se) @@ -871,7 +874,7 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) struct rt_rq *rt_rq = group_rt_rq(rt_se); if (rt_rq && rt_rq->rt_nr_running) - __enqueue_rt_entity(rt_se); + __enqueue_rt_entity(rt_se, false); } } @@ -886,7 +889,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head) if (wakeup) rt_se->timeout = 0; - enqueue_rt_entity(rt_se); + enqueue_rt_entity(rt_se, head); if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); -- cgit v1.2.3 From 74b7eb5885415ed41d012f432398d1b697115b5f Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Fri, 29 Jan 2010 14:57:52 +0800 Subject: sched: Change usage of rt_rq->rt_se to rt_rq->tg->rt_se[cpu] This is the first step to remove rt_rq member rt_se because it have the same meaning with tg->rt_se[cpu]. And the latter style is also used by the fair scheduling class. Signed-off-by: Yong Zhang Cc: Rusty Russell Signed-off-by: Peter Zijlstra LKML-Reference: <2674af741001282257r28c97a92o9f90cf16fe8d3d84@mail.gmail.com> Signed-off-by: Ingo Molnar --- kernel/sched_rt.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'kernel/sched_rt.c') diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index ca49ceb01201..bf3e38fdbe6d 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -199,8 +199,11 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se); static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) { + int this_cpu = smp_processor_id(); struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; - struct sched_rt_entity *rt_se = rt_rq->rt_se; + struct sched_rt_entity *rt_se; + + rt_se = rt_rq->tg->rt_se[this_cpu]; if (rt_rq->rt_nr_running) { if (rt_se && !on_rt_rq(rt_se)) @@ -212,7 +215,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) { - struct sched_rt_entity *rt_se = rt_rq->rt_se; + int this_cpu = smp_processor_id(); + struct sched_rt_entity *rt_se; + + rt_se = rt_rq->tg->rt_se[this_cpu]; if (rt_se && on_rt_rq(rt_se)) dequeue_rt_entity(rt_se); -- cgit v1.2.3