summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorVarun Wadekar <vwadekar@nvidia.com>2012-05-07 15:12:25 -0700
committerDan Willemsen <dwillemsen@nvidia.com>2013-09-14 12:13:16 -0700
commite23fd0bf7e184cb10624d5df433afc9d2cc48930 (patch)
treeb3cd8599f23efa61feb1dc342ef0e9190a0fca67 /kernel/sched
parent10bb490f7b86028941f688cfe0670824e2d80cd1 (diff)
scheduler: compute time-average nr_running per run-queue
Port commit 0b5a8a6f3 (http://git-master/r/111635) from v3.1 Compute the time-average number of running tasks per run-queue for a trailing window of a fixed time period. The delta add/sub to the average value is weighted by the amount of time per nr_running value relative to the total measurement period. Original author: Diwakar Tundlam <dtundlam@nvidia.com> Change-Id: I076e24ff4ed65bed3b8dd8d2b279a503318071ff Signed-off-by: Diwakar Tundlam <dtundlam@nvidia.com> Signed-off-by: Varun Wadekar <vwadekar@nvidia.com> Rebase-Id: R1760349117674c9cf5ea63046f937a7c7a0186f6
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/debug.c3
-rw-r--r--kernel/sched/sched.h41
2 files changed, 44 insertions, 0 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 75024a673520..ad1b66c36096 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -281,6 +281,9 @@ do { \
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
P(nr_running);
+ SEQ_printf(m, " .%-30s: %d.%03d \n", "ave_nr_running",
+ rq->ave_nr_running / FIXED_1,
+ ((rq->ave_nr_running % FIXED_1) * 1000) / FIXED_1);
SEQ_printf(m, " .%-30s: %lu\n", "load",
rq->load.weight);
P(nr_switches);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ce39224d6155..aa12450076cd 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -415,6 +415,10 @@ struct rq {
#endif
int skip_clock_update;
+ /* time-based average load */
+ u64 nr_last_stamp;
+ unsigned int ave_nr_running;
+
/* capture load from *all* tasks on this cpu: */
struct load_weight load;
unsigned long nr_load_updates;
@@ -1073,8 +1077,34 @@ static inline u64 steal_ticks(u64 steal)
}
#endif
+/* 27 ~= 134217728ns = 134.2ms
+ * 26 ~= 67108864ns = 67.1ms
+ * 25 ~= 33554432ns = 33.5ms
+ * 24 ~= 16777216ns = 16.8ms
+ */
+#define NR_AVE_PERIOD_EXP 27
+#define NR_AVE_SCALE(x) ((x) << FSHIFT)
+#define NR_AVE_PERIOD (1 << NR_AVE_PERIOD_EXP)
+#define NR_AVE_DIV_PERIOD(x) ((x) >> NR_AVE_PERIOD_EXP)
+
+static inline void do_avg_nr_running(struct rq *rq)
+{
+ s64 nr, deltax;
+
+ deltax = rq->clock_task - rq->nr_last_stamp;
+ rq->nr_last_stamp = rq->clock_task;
+ nr = NR_AVE_SCALE(rq->nr_running);
+
+ if (deltax > NR_AVE_PERIOD)
+ rq->ave_nr_running = nr;
+ else
+ rq->ave_nr_running +=
+ NR_AVE_DIV_PERIOD(deltax * (nr - rq->ave_nr_running));
+}
+
static inline void inc_nr_running(struct rq *rq)
{
+ do_avg_nr_running(rq);
rq->nr_running++;
#ifdef CONFIG_NO_HZ_FULL
@@ -1090,9 +1120,20 @@ static inline void inc_nr_running(struct rq *rq)
static inline void dec_nr_running(struct rq *rq)
{
+ do_avg_nr_running(rq);
rq->nr_running--;
}
+unsigned long avg_nr_running(void)
+{
+ unsigned long i, sum = 0;
+
+ for_each_online_cpu(i)
+ sum += cpu_rq(i)->ave_nr_running;
+
+ return sum;
+}
+
static inline void rq_last_tick_reset(struct rq *rq)
{
#ifdef CONFIG_NO_HZ_FULL