diff options
author | Alex Frid <afrid@nvidia.com> | 2012-05-02 13:43:57 -0700 |
---|---|---|
committer | Ryan Wong <ryanw@nvidia.com> | 2012-06-11 22:03:07 -0700 |
commit | 83af3e7dbab59e593347a7f89123b9f4abef469e (patch) | |
tree | 69b53e98e9ceffb5d3d61a2807e695826c394d7c /arch | |
parent | 77c0e45d065e2361cc3f2ad9a856e9e6c5c09b6c (diff) |
ARM: tegra: power: Use runnable threads average for hotplug
Sample scheduler runnable threads average in auto-hotplug work
function and use it to determine the auto-hotplug target for number
of on-line cores. Use cpu up delay as sampling period, and enforce
down delay by checking last cpu configuration change time stamp.
Bug 958978
Change-Id: I922dc9d03c653b3465c2b7bac0498aa59ae71af0
Signed-off-by: Alex Frid <afrid@nvidia.com>
Reviewed-on: http://git-master/r/108073
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Ryan Wong <ryanw@nvidia.com>
Tested-by: Ryan Wong <ryanw@nvidia.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/mach-tegra/cpu-tegra3.c | 47 |
1 files changed, 39 insertions, 8 deletions
diff --git a/arch/arm/mach-tegra/cpu-tegra3.c b/arch/arm/mach-tegra/cpu-tegra3.c index 86a0b1364b3d..386e4571949c 100644 --- a/arch/arm/mach-tegra/cpu-tegra3.c +++ b/arch/arm/mach-tegra/cpu-tegra3.c @@ -73,6 +73,8 @@ static struct clk *cpu_clk; static struct clk *cpu_g_clk; static struct clk *cpu_lp_clk; +static unsigned long last_change_time; + static struct { cputime64_t time_up_total; u64 last_update; @@ -186,6 +188,14 @@ enum { TEGRA_CPU_SPEED_SKEWED, }; +#define NR_FSHIFT 2 +static unsigned int nr_run_thresholds[] = { +/* 1, 2, 3, 4 - on-line cpus target */ + 5, 9, 13, UINT_MAX /* avg run threads * 4 (e.g., 9 = 2.25 threads) */ +}; +static unsigned int nr_run_hysteresis = 2; /* 0.5 thread */ +static unsigned int nr_run_last; + static noinline int tegra_cpu_speed_balance(void) { unsigned long highest_speed = tegra_cpu_highest_speed(); @@ -194,17 +204,36 @@ static noinline int tegra_cpu_speed_balance(void) unsigned int nr_cpus = num_online_cpus(); unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4; unsigned int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS); + unsigned int avg_nr_run = avg_nr_running(); + unsigned int nr_run; + + /* Evaluate: + * - distribution of freq targets for already on-lined CPUs + * - average number of runnable threads + * - effective MIPS available within EDP frequency limits, + * and return: + * TEGRA_CPU_SPEED_BALANCED to bring one more CPU core on-line + * TEGRA_CPU_SPEED_BIASED to keep CPU core composition unchanged + * TEGRA_CPU_SPEED_SKEWED to remove CPU core off-line + */ + for (nr_run = 1; nr_run < ARRAY_SIZE(nr_run_thresholds); nr_run++) { + unsigned int nr_threshold = nr_run_thresholds[nr_run - 1]; + if (nr_run_last <= nr_run) + nr_threshold += nr_run_hysteresis; + if (avg_nr_run <= (nr_threshold << (FSHIFT - NR_FSHIFT))) + break; + } + nr_run_last = nr_run; - /* balanced: freq targets for all CPUs are above 50% of highest speed - biased: freq target for at least one CPU is below 50% threshold - skewed: freq targets for at least 2 CPUs are below 25% threshold */ if (((tegra_count_slow_cpus(skewed_speed) >= 2) || + (nr_run < nr_cpus) || tegra_cpu_edp_favor_down(nr_cpus, mp_overhead) || (highest_speed <= idle_bottom_freq) || (nr_cpus > max_cpus)) && (nr_cpus > min_cpus)) return TEGRA_CPU_SPEED_SKEWED; if (((tegra_count_slow_cpus(balanced_speed) >= 1) || + (nr_run <= nr_cpus) || (!tegra_cpu_edp_favor_up(nr_cpus, mp_overhead)) || (highest_speed <= idle_bottom_freq) || (nr_cpus == max_cpus)) && (nr_cpus >= min_cpus)) @@ -218,7 +247,6 @@ static void tegra_auto_hotplug_work_func(struct work_struct *work) bool up = false; unsigned int cpu = nr_cpu_ids; unsigned long now = jiffies; - static unsigned long last_change_time; mutex_lock(tegra3_cpu_lock); @@ -230,7 +258,8 @@ static void tegra_auto_hotplug_work_func(struct work_struct *work) cpu = tegra_get_slowest_cpu_n(); if (cpu < nr_cpu_ids) { up = false; - } else if (!is_lp_cluster() && !no_lp) { + } else if (!is_lp_cluster() && !no_lp && + ((now - last_change_time) >= down_delay)) { if(!clk_set_parent(cpu_clk, cpu_lp_clk)) { hp_stats_update(CONFIG_NR_CPUS, true); hp_stats_update(0, false); @@ -240,11 +269,12 @@ static void tegra_auto_hotplug_work_func(struct work_struct *work) } } queue_delayed_work( - hotplug_wq, &hotplug_work, down_delay); + hotplug_wq, &hotplug_work, up2gn_delay); break; case TEGRA_HP_UP: if (is_lp_cluster() && !no_lp) { if(!clk_set_parent(cpu_clk, cpu_g_clk)) { + last_change_time = now; hp_stats_update(CONFIG_NR_CPUS, false); hp_stats_update(0, true); /* catch-up with governor target speed */ @@ -306,6 +336,7 @@ static int min_cpus_notify(struct notifier_block *nb, unsigned long n, void *p) tegra_update_cpu_speed(speed); if (!clk_set_parent(cpu_clk, cpu_g_clk)) { + last_change_time = jiffies; hp_stats_update(CONFIG_NR_CPUS, false); hp_stats_update(0, true); } @@ -371,7 +402,7 @@ void tegra_auto_hotplug_governor(unsigned int cpu_freq, bool suspend) } else if (cpu_freq <= bottom_freq) { hp_state = TEGRA_HP_DOWN; queue_delayed_work( - hotplug_wq, &hotplug_work, down_delay); + hotplug_wq, &hotplug_work, up_delay); } break; case TEGRA_HP_DOWN: @@ -387,7 +418,7 @@ void tegra_auto_hotplug_governor(unsigned int cpu_freq, bool suspend) if (cpu_freq <= bottom_freq) { hp_state = TEGRA_HP_DOWN; queue_delayed_work( - hotplug_wq, &hotplug_work, down_delay); + hotplug_wq, &hotplug_work, up_delay); } else if (cpu_freq <= top_freq) { hp_state = TEGRA_HP_IDLE; } |