summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPuneet Saxena <puneets@nvidia.com>2012-09-20 19:22:09 +0530
committerSimone Willett <swillett@nvidia.com>2012-09-26 14:47:27 -0700
commit3bca5808dcc371ea4e1d6ce555e3eae76fbe0e7e (patch)
tree6ce5fc36df33040c76785e524a08bfe6b7063a48 /kernel
parentb8dc085596053bf61bbd3fd3931a5b9fe372f0bc (diff)
Revert "cpuquiet: Update averaging of nr_runnables"
This reverts commit 8bd999a85354485af3cbee872816a9921d8bfffc. bug 1050721 Change-Id: I29fcff431e5427dfaa2524a12c5702154037018a Signed-off-by: Puneet Saxena <puneets@nvidia.com> Reviewed-on: http://git-master/r/134307 Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c47
-rw-r--r--kernel/sched/debug.c3
-rw-r--r--kernel/sched/sched.h22
3 files changed, 46 insertions, 26 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 974c9b2c0754..b8f4618407b2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2161,10 +2161,35 @@ unsigned long this_cpu_load(void)
return this->cpu_load[0];
}
-u64 nr_running_integral(unsigned int cpu)
+unsigned long avg_nr_running(void)
+{
+ unsigned long i, sum = 0;
+ unsigned int seqcnt, ave_nr_running;
+
+ for_each_online_cpu(i) {
+ struct rq *q = cpu_rq(i);
+
+ /*
+ * Update average to avoid reading stalled value if there were
+ * no run-queue changes for a long time. On the other hand if
+ * the changes are happening right now, just read current value
+ * directly.
+ */
+ seqcnt = read_seqcount_begin(&q->ave_seqcnt);
+ ave_nr_running = do_avg_nr_running(q);
+ if (read_seqcount_retry(&q->ave_seqcnt, seqcnt)) {
+ read_seqcount_begin(&q->ave_seqcnt);
+ ave_nr_running = q->ave_nr_running;
+ }
+
+ sum += ave_nr_running;
+ }
+
+ return sum;
+}
+
+unsigned long get_avg_nr_running(unsigned int cpu)
{
- unsigned int seqcnt;
- u64 integral;
struct rq *q;
if (cpu >= nr_cpu_ids)
@@ -2172,21 +2197,7 @@ u64 nr_running_integral(unsigned int cpu)
q = cpu_rq(cpu);
- /*
- * Update average to avoid reading stalled value if there were
- * no run-queue changes for a long time. On the other hand if
- * the changes are happening right now, just read current value
- * directly.
- */
-
- seqcnt = read_seqcount_begin(&q->ave_seqcnt);
- integral = do_nr_running_integral(q);
- if (read_seqcount_retry(&q->ave_seqcnt, seqcnt)) {
- read_seqcount_begin(&q->ave_seqcnt);
- integral = q->nr_running_integral;
- }
-
- return integral;
+ return q->ave_nr_running;
}
/*
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 09acaa15161d..06d172eb5cea 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -266,6 +266,9 @@ static void print_cpu(struct seq_file *m, int cpu)
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
P(nr_running);
+ SEQ_printf(m, " .%-30s: %d.%03d \n", "ave_nr_running",
+ rq->ave_nr_running / FIXED_1,
+ ((rq->ave_nr_running % FIXED_1) * 1000) / FIXED_1);
SEQ_printf(m, " .%-30s: %lu\n", "load",
rq->load.weight);
P(nr_switches);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 99589411f980..ef5a1ff65196 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -365,7 +365,7 @@ struct rq {
/* time-based average load */
u64 nr_last_stamp;
- u64 nr_running_integral;
+ unsigned int ave_nr_running;
seqcount_t ave_seqcnt;
/* capture load from *all* tasks on this cpu: */
@@ -924,26 +924,32 @@ static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
* 25 ~= 33554432ns = 33.5ms
* 24 ~= 16777216ns = 16.8ms
*/
+#define NR_AVE_PERIOD_EXP 27
#define NR_AVE_SCALE(x) ((x) << FSHIFT)
+#define NR_AVE_PERIOD (1 << NR_AVE_PERIOD_EXP)
+#define NR_AVE_DIV_PERIOD(x) ((x) >> NR_AVE_PERIOD_EXP)
-
-static inline u64 do_nr_running_integral(struct rq *rq)
+static inline unsigned int do_avg_nr_running(struct rq *rq)
{
s64 nr, deltax;
- u64 nr_running_integral = rq->nr_running_integral;
+ unsigned int ave_nr_running = rq->ave_nr_running;
deltax = rq->clock_task - rq->nr_last_stamp;
nr = NR_AVE_SCALE(rq->nr_running);
- nr_running_integral += nr * deltax;
+ if (deltax > NR_AVE_PERIOD)
+ ave_nr_running = nr;
+ else
+ ave_nr_running +=
+ NR_AVE_DIV_PERIOD(deltax * (nr - ave_nr_running));
- return nr_running_integral;
+ return ave_nr_running;
}
static inline void inc_nr_running(struct rq *rq)
{
write_seqcount_begin(&rq->ave_seqcnt);
- rq->nr_running_integral = do_nr_running_integral(rq);
+ rq->ave_nr_running = do_avg_nr_running(rq);
rq->nr_last_stamp = rq->clock_task;
rq->nr_running++;
write_seqcount_end(&rq->ave_seqcnt);
@@ -952,7 +958,7 @@ static inline void inc_nr_running(struct rq *rq)
static inline void dec_nr_running(struct rq *rq)
{
write_seqcount_begin(&rq->ave_seqcnt);
- rq->nr_running_integral = do_nr_running_integral(rq);
+ rq->ave_nr_running = do_avg_nr_running(rq);
rq->nr_last_stamp = rq->clock_task;
rq->nr_running--;
write_seqcount_end(&rq->ave_seqcnt);