/* * arch/arm/mach-tegra/cpu-tegra.c * * Copyright (C) 2010 Google, Inc. * * Author: * Colin Cross * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation * * Copyright (C) 2010-2014 NVIDIA CORPORATION. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "clock.h" #include "cpu-tegra.h" #include "dvfs.h" /* tegra throttling and edp governors require frequencies in the table to be in ascending order */ static struct cpufreq_frequency_table *freq_table; static struct clk *cpu_clk; static struct clk *emc_clk; static unsigned long policy_max_speed[CONFIG_NR_CPUS]; static unsigned long target_cpu_speed[CONFIG_NR_CPUS]; static DEFINE_MUTEX(tegra_cpu_lock); static bool is_suspended; static int suspend_index; static unsigned int volt_capped_speed; static struct pm_qos_request cpufreq_max_req; static struct pm_qos_request cpufreq_min_req; static bool force_policy_max; static int force_policy_max_set(const char *arg, const struct kernel_param *kp) { int ret; bool old_policy = force_policy_max; mutex_lock(&tegra_cpu_lock); ret = param_set_bool(arg, kp); if ((ret == 0) && (old_policy != force_policy_max)) tegra_cpu_set_speed_cap_locked(NULL); mutex_unlock(&tegra_cpu_lock); return ret; } static int force_policy_max_get(char *buffer, const struct kernel_param *kp) { return param_get_bool(buffer, kp); } static struct kernel_param_ops policy_ops = { .set = force_policy_max_set, .get = force_policy_max_get, }; module_param_cb(force_policy_max, &policy_ops, &force_policy_max, 0644); static unsigned int cpu_user_cap; static inline void _cpu_user_cap_set_locked(void) { #ifndef CONFIG_TEGRA_CPU_CAP_EXACT_FREQ if (cpu_user_cap != 0) { int i; for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { if (freq_table[i].frequency > cpu_user_cap) break; } i = (i == 0) ? 0 : i - 1; cpu_user_cap = freq_table[i].frequency; } #endif tegra_cpu_set_speed_cap_locked(NULL); } void tegra_cpu_user_cap_set(unsigned int speed_khz) { mutex_lock(&tegra_cpu_lock); cpu_user_cap = speed_khz; _cpu_user_cap_set_locked(); mutex_unlock(&tegra_cpu_lock); } static int cpu_user_cap_set(const char *arg, const struct kernel_param *kp) { int ret; mutex_lock(&tegra_cpu_lock); ret = param_set_uint(arg, kp); if (ret == 0) _cpu_user_cap_set_locked(); mutex_unlock(&tegra_cpu_lock); return ret; } static int cpu_user_cap_get(char *buffer, const struct kernel_param *kp) { return param_get_uint(buffer, kp); } static struct kernel_param_ops cap_ops = { .set = cpu_user_cap_set, .get = cpu_user_cap_get, }; module_param_cb(cpu_user_cap, &cap_ops, &cpu_user_cap, 0644); static unsigned int user_cap_speed(unsigned int requested_speed) { if ((cpu_user_cap) && (requested_speed > cpu_user_cap)) return cpu_user_cap; return requested_speed; } #ifdef CONFIG_TEGRA_THERMAL_THROTTLE static ssize_t show_throttle(struct cpufreq_policy *policy, char *buf) { return sprintf(buf, "%u\n", tegra_is_throttling(NULL)); } cpufreq_freq_attr_ro(throttle); static ssize_t show_throttle_count(struct cpufreq_policy *policy, char *buf) { int count; tegra_is_throttling(&count); return sprintf(buf, "%u\n", count); } static struct freq_attr _attr_throttle_count = { .attr = {.name = "throttle_count", .mode = 0444, }, .show = show_throttle_count, }; static struct attribute *new_attrs[] = { &_attr_throttle_count.attr, NULL }; static struct attribute_group stats_attr_grp = { .attrs = new_attrs, .name = "stats" }; #endif /* CONFIG_TEGRA_THERMAL_THROTTLE */ static const struct tegra_edp_limits *cpu_reg_idle_limits; static unsigned int reg_mode; static bool reg_mode_force_normal; #ifdef CONFIG_TEGRA_EDP_LIMITS static const struct tegra_edp_limits *cpu_edp_limits; static int cpu_edp_limits_size; static const unsigned int *system_edp_limits; static bool system_edp_alarm; static int edp_thermal_index; static cpumask_t edp_cpumask; static unsigned int edp_limit; static struct pm_qos_request edp_max_cpus; unsigned int tegra_get_edp_limit(int *get_edp_thermal_index) { if (get_edp_thermal_index) *get_edp_thermal_index = edp_thermal_index; return edp_limit; } static unsigned int edp_predict_limit(unsigned int cpus) { unsigned int limit = 0; BUG_ON(cpus == 0); if (cpu_edp_limits) { BUG_ON(edp_thermal_index >= cpu_edp_limits_size); limit = cpu_edp_limits[edp_thermal_index].freq_limits[cpus - 1]; } if (system_edp_limits && system_edp_alarm) limit = min(limit, system_edp_limits[cpus - 1]); return limit; } static unsigned int get_edp_freq_limit(unsigned int nr_cpus) { unsigned int limit = edp_predict_limit(nr_cpus); #ifndef CONFIG_TEGRA_EDP_EXACT_FREQ unsigned int i; for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { if (freq_table[i].frequency > limit) { break; } } BUG_ON(i == 0); /* min freq above the limit or table empty */ limit = freq_table[i-1].frequency; #endif return limit; } /* * Returns the ideal nr of cpus to have online for maximum performance given * edp constraints at the current temperature */ static int get_edp_max_cpus(void) { unsigned int i, limit; unsigned int max_mips = 0; int max_nr_cpus = PM_QOS_DEFAULT_VALUE; for (i = 1; i <= num_possible_cpus(); i++) { limit = get_edp_freq_limit(i); if (limit * i >= max_mips) { max_mips = limit * i; max_nr_cpus = i; } } return max_nr_cpus; } /* * Shouldn't be called with tegra_cpu_lock held. Will result in a deadlock * otherwise */ static void edp_update_max_cpus(void) { unsigned int max_cpus = get_edp_max_cpus(); if (!pm_qos_request_active(&edp_max_cpus)) pm_qos_add_request(&edp_max_cpus, PM_QOS_MAX_ONLINE_CPUS, max_cpus); else pm_qos_update_request(&edp_max_cpus, max_cpus); } /* Must be called while holding cpu_tegra_lock */ static void edp_update_limit(void) { BUG_ON(!mutex_is_locked(&tegra_cpu_lock)); edp_limit = get_edp_freq_limit(cpumask_weight(&edp_cpumask)); } static unsigned int edp_governor_speed(unsigned int requested_speed) { if ((!edp_limit) || (requested_speed <= edp_limit)) return requested_speed; else return edp_limit; } int tegra_edp_get_max_state(struct thermal_cooling_device *cdev, unsigned long *max_state) { *max_state = cpu_edp_limits_size - 1; return 0; } int tegra_edp_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *cur_state) { *cur_state = edp_thermal_index; return 0; } int tegra_edp_set_cur_state(struct thermal_cooling_device *cdev, unsigned long cur_state) { mutex_lock(&tegra_cpu_lock); edp_thermal_index = cur_state; /* Update cpu rate if cpufreq (at least on cpu0) is already started; alter cpu dvfs table for this thermal zone if necessary */ tegra_cpu_dvfs_alter(edp_thermal_index, &edp_cpumask, true, 0); if (target_cpu_speed[0]) { edp_update_limit(); tegra_cpu_set_speed_cap_locked(NULL); } tegra_cpu_dvfs_alter(edp_thermal_index, &edp_cpumask, false, 0); mutex_unlock(&tegra_cpu_lock); edp_update_max_cpus(); return 0; } static struct thermal_cooling_device_ops tegra_edp_cooling_ops = { .get_max_state = tegra_edp_get_max_state, .get_cur_state = tegra_edp_get_cur_state, .set_cur_state = tegra_edp_set_cur_state, }; static int __init edp_init(void) { thermal_cooling_device_register( "cpu_edp", NULL, &tegra_edp_cooling_ops); return 0; } module_init(edp_init); int tegra_system_edp_alarm(bool alarm) { int ret = -ENODEV; mutex_lock(&tegra_cpu_lock); system_edp_alarm = alarm; /* Update cpu rate if cpufreq (at least on cpu0) is already started and cancel emergency throttling after either edp limit is applied or alarm is canceled */ if (target_cpu_speed[0]) { edp_update_limit(); ret = tegra_cpu_set_speed_cap_locked(NULL); } if (!ret || !alarm) tegra_edp_throttle_cpu_now(0); mutex_unlock(&tegra_cpu_lock); return ret; } static int tegra_cpu_edp_notify( struct notifier_block *nb, unsigned long event, void *hcpu) { int ret = 0; unsigned int cpu_speed, new_speed; int cpu = (long)hcpu; switch (event) { case CPU_UP_PREPARE: mutex_lock(&tegra_cpu_lock); cpu_set(cpu, edp_cpumask); edp_update_limit(); cpu_speed = tegra_getspeed(0); new_speed = edp_governor_speed(cpu_speed); if (new_speed < cpu_speed || (!is_suspended && cpu_reg_idle_limits)) { ret = tegra_cpu_set_speed_cap_locked(NULL); printk(KERN_DEBUG "cpu-tegra:%sforce EDP limit %u kHz" "\n", ret ? " failed to " : " ", new_speed); } if (!ret) ret = tegra_cpu_dvfs_alter( edp_thermal_index, &edp_cpumask, false, event); if (ret) { cpu_clear(cpu, edp_cpumask); edp_update_limit(); } mutex_unlock(&tegra_cpu_lock); break; case CPU_DEAD: mutex_lock(&tegra_cpu_lock); cpu_clear(cpu, edp_cpumask); tegra_cpu_dvfs_alter( edp_thermal_index, &edp_cpumask, true, event); edp_update_limit(); tegra_cpu_set_speed_cap_locked(NULL); mutex_unlock(&tegra_cpu_lock); break; } return notifier_from_errno(ret); } static struct notifier_block tegra_cpu_edp_notifier = { .notifier_call = tegra_cpu_edp_notify, }; static void tegra_cpu_edp_init(bool resume) { tegra_get_system_edp_limits(&system_edp_limits); tegra_get_cpu_edp_limits(&cpu_edp_limits, &cpu_edp_limits_size); if (!(cpu_edp_limits || system_edp_limits)) { if (!resume) pr_info("cpu-tegra: no EDP table is provided\n"); return; } /* FIXME: use the highest temperature limits if sensor is not on-line? * If thermal zone is not set yet by the sensor, edp_thermal_index = 0. * Boot frequency allowed SoC to get here, should work till sensor is * initialized. */ edp_cpumask = *cpu_online_mask; edp_update_limit(); if (!resume) { register_hotcpu_notifier(&tegra_cpu_edp_notifier); pr_info("cpu-tegra: init EDP limit: %u MHz\n", edp_limit/1000); } } static void tegra_cpu_edp_exit(void) { if (!(cpu_edp_limits || system_edp_limits)) return; unregister_hotcpu_notifier(&tegra_cpu_edp_notifier); } static unsigned int cpu_reg_mode_predict_idle_limit(void) { unsigned int cpus, limit; if (!cpu_reg_idle_limits) return 0; cpus = cpumask_weight(&edp_cpumask); BUG_ON(cpus == 0); BUG_ON(edp_thermal_index >= cpu_edp_limits_size); limit = cpu_reg_idle_limits[edp_thermal_index].freq_limits[cpus - 1]; return limit ? : 1; /* bump 0 to 1kHz to differentiate from no-table */ } static int cpu_reg_mode_limits_init(void) { int limits_size; tegra_get_cpu_reg_mode_limits(&cpu_reg_idle_limits, &limits_size, REGULATOR_MODE_IDLE); if (!cpu_reg_idle_limits) { reg_mode = -ENOENT; return -ENOENT; } if (!cpu_edp_limits || (limits_size != cpu_edp_limits_size)) { pr_err("cpu-tegra: EDP and regulator mode tables mismatch\n"); cpu_reg_idle_limits = NULL; reg_mode = -EINVAL; return -EINVAL; } reg_mode_force_normal = false; return 0; } int tegra_cpu_reg_mode_force_normal(bool force) { int ret = 0; mutex_lock(&tegra_cpu_lock); if (cpu_reg_idle_limits) { reg_mode_force_normal = force; ret = tegra_cpu_set_speed_cap_locked(NULL); } mutex_unlock(&tegra_cpu_lock); return ret; } int tegra_update_cpu_edp_limits(void) { int ret; mutex_lock(&tegra_cpu_lock); tegra_recalculate_cpu_edp_limits(); cpu_reg_mode_limits_init(); ret = tegra_cpu_set_speed_cap_locked(NULL); mutex_unlock(&tegra_cpu_lock); return ret; } #ifdef CONFIG_DEBUG_FS static int system_edp_alarm_get(void *data, u64 *val) { *val = (u64)system_edp_alarm; return 0; } static int system_edp_alarm_set(void *data, u64 val) { if (val > 1) { /* emulate emergency throttling */ tegra_edp_throttle_cpu_now(val); return 0; } return tegra_system_edp_alarm((bool)val); } DEFINE_SIMPLE_ATTRIBUTE(system_edp_alarm_fops, system_edp_alarm_get, system_edp_alarm_set, "%llu\n"); static int reg_mode_force_normal_get(void *data, u64 *val) { *val = (u64)reg_mode_force_normal; return 0; } static int reg_mode_force_normal_set(void *data, u64 val) { return tegra_cpu_reg_mode_force_normal(val); } DEFINE_SIMPLE_ATTRIBUTE(force_normal_fops, reg_mode_force_normal_get, reg_mode_force_normal_set, "%llu\n"); static int reg_mode_get(void *data, u64 *val) { *val = (u64)reg_mode; return 0; } DEFINE_SIMPLE_ATTRIBUTE(reg_mode_fops, reg_mode_get, NULL, "0x%llx\n"); static int __init tegra_edp_debug_init(struct dentry *cpu_tegra_debugfs_root) { if (!debugfs_create_file("edp_alarm", 0644, cpu_tegra_debugfs_root, NULL, &system_edp_alarm_fops)) return -ENOMEM; if (!debugfs_create_file("reg_mode_force_normal", 0644, cpu_tegra_debugfs_root, NULL, &force_normal_fops)) return -ENOMEM; if (!debugfs_create_file("reg_mode", 0444, cpu_tegra_debugfs_root, NULL, ®_mode_fops)) return -ENOMEM; return 0; } #endif #else /* CONFIG_TEGRA_EDP_LIMITS */ #define edp_governor_speed(requested_speed) (requested_speed) #define tegra_cpu_edp_init(resume) #define tegra_cpu_edp_exit() #define tegra_edp_debug_init(cpu_tegra_debugfs_root) (0) #define cpu_reg_mode_predict_idle_limit() (0) #endif /* CONFIG_TEGRA_EDP_LIMITS */ #ifdef CONFIG_DEBUG_FS static struct dentry *cpu_tegra_debugfs_root; static int __init tegra_cpu_debug_init(void) { cpu_tegra_debugfs_root = debugfs_create_dir("cpu-tegra", 0); if (!cpu_tegra_debugfs_root) return -ENOMEM; if (tegra_edp_debug_init(cpu_tegra_debugfs_root)) goto err_out; return 0; err_out: debugfs_remove_recursive(cpu_tegra_debugfs_root); return -ENOMEM; } static void __exit tegra_cpu_debug_exit(void) { debugfs_remove_recursive(cpu_tegra_debugfs_root); } late_initcall(tegra_cpu_debug_init); module_exit(tegra_cpu_debug_exit); #endif /* CONFIG_DEBUG_FS */ static int tegra_verify_speed(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, freq_table); } unsigned int tegra_getspeed(unsigned int cpu) { unsigned long rate; if (cpu >= CONFIG_NR_CPUS) return 0; rate = clk_get_rate(cpu_clk) / 1000; return rate; } int tegra_update_cpu_speed(unsigned long rate) { int ret = 0; struct cpufreq_freqs freqs; unsigned int mode, mode_limit = cpu_reg_mode_predict_idle_limit(); freqs.old = tegra_getspeed(0); freqs.new = rate; rate = clk_round_rate(cpu_clk, rate * 1000); if (!IS_ERR_VALUE(rate)) freqs.new = rate / 1000; mode = REGULATOR_MODE_NORMAL; if (mode_limit && (mode_limit < freqs.new || reg_mode_force_normal)) { mode_limit = 0; /* prevent further mode controls */ if (reg_mode != mode) { ret = tegra_dvfs_rail_set_mode(tegra_cpu_rail, mode); if (ret) return ret; reg_mode = mode; } } if (freqs.old == freqs.new) goto _out; /* * Vote on memory bus frequency based on cpu frequency * This sets the minimum frequency, display or avp may request higher */ if (freqs.old < freqs.new) { ret = tegra_update_mselect_rate(freqs.new); if (ret) { pr_err("cpu-tegra: Failed to scale mselect for cpu" " frequency %u kHz\n", freqs.new); return ret; } if (emc_clk) { ret = clk_set_rate(emc_clk, tegra_emc_cpu_limit(freqs.new)); if (ret) { pr_err("cpu-tegra: Failed to scale emc for cpu" " frequency %u kHz\n", freqs.new); return ret; } } } for_each_online_cpu(freqs.cpu) cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); #ifdef CONFIG_CPU_FREQ_DEBUG printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n", freqs.old, freqs.new); #endif ret = clk_set_rate(cpu_clk, freqs.new * 1000); if (ret) { pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n", freqs.new); return ret; } for_each_online_cpu(freqs.cpu) cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); if (freqs.old > freqs.new) { if (emc_clk) clk_set_rate(emc_clk, tegra_emc_cpu_limit(freqs.new)); tegra_update_mselect_rate(freqs.new); } _out: mode = REGULATOR_MODE_IDLE; if ((mode_limit >= freqs.new) && (reg_mode != mode)) if (!tegra_dvfs_rail_set_mode(tegra_cpu_rail, mode)) reg_mode = mode; return 0; } unsigned int tegra_count_slow_cpus(unsigned long speed_limit) { unsigned int cnt = 0; int i; for_each_online_cpu(i) if (target_cpu_speed[i] <= speed_limit) cnt++; return cnt; } unsigned int tegra_get_slowest_cpu_n(void) { unsigned int cpu = nr_cpu_ids; unsigned long rate = ULONG_MAX; int i; for_each_online_cpu(i) if ((i > 0) && (rate > target_cpu_speed[i])) { cpu = i; rate = target_cpu_speed[i]; } return cpu; } unsigned long tegra_cpu_lowest_speed(void) { unsigned long rate = ULONG_MAX; int i; for_each_online_cpu(i) rate = min(rate, target_cpu_speed[i]); return rate; } unsigned long tegra_cpu_highest_speed(void) { unsigned long policy_max = ULONG_MAX; unsigned long rate = 0; int i; for_each_online_cpu(i) { if (force_policy_max) policy_max = min(policy_max, policy_max_speed[i]); rate = max(rate, target_cpu_speed[i]); } rate = min(rate, policy_max); return rate; } void tegra_cpu_set_volt_cap(unsigned int cap) { mutex_lock(&tegra_cpu_lock); if (cap != volt_capped_speed) { volt_capped_speed = cap; tegra_cpu_set_speed_cap_locked(NULL); } mutex_unlock(&tegra_cpu_lock); if (cap) pr_debug("tegra_cpu:volt limit to %u Khz\n", cap); else pr_debug("tegra_cpu:volt limit removed\n"); } static unsigned int volt_cap_speed(unsigned int requested_speed) { if (volt_capped_speed && requested_speed > volt_capped_speed) return volt_capped_speed; return requested_speed; } /* Must be called with tegra_cpu_lock held */ int tegra_cpu_set_speed_cap_locked(unsigned int *speed_cap) { int ret = 0; unsigned int new_speed = tegra_cpu_highest_speed(); BUG_ON(!mutex_is_locked(&tegra_cpu_lock)); #ifdef CONFIG_TEGRA_EDP_LIMITS edp_update_limit(); #endif if (is_suspended) return -EBUSY; new_speed = tegra_throttle_governor_speed(new_speed); new_speed = edp_governor_speed(new_speed); new_speed = user_cap_speed(new_speed); new_speed = volt_cap_speed(new_speed); if (speed_cap) *speed_cap = new_speed; ret = tegra_update_cpu_speed(new_speed); if (ret == 0) tegra_auto_hotplug_governor(new_speed, false); return ret; } int tegra_cpu_set_speed_cap(unsigned int *speed_cap) { int ret; mutex_lock(&tegra_cpu_lock); ret = tegra_cpu_set_speed_cap_locked(speed_cap); mutex_unlock(&tegra_cpu_lock); return ret; } int tegra_suspended_target(unsigned int target_freq) { unsigned int new_speed = target_freq; if (!is_suspended) return -EBUSY; /* apply only "hard" caps */ new_speed = tegra_throttle_governor_speed(new_speed); new_speed = edp_governor_speed(new_speed); return tegra_update_cpu_speed(new_speed); } static int tegra_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { int idx; unsigned int freq; unsigned int new_speed; int ret = 0; mutex_lock(&tegra_cpu_lock); ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &idx); if (ret) goto _out; freq = freq_table[idx].frequency; target_cpu_speed[policy->cpu] = freq; ret = tegra_cpu_set_speed_cap_locked(&new_speed); _out: mutex_unlock(&tegra_cpu_lock); return ret; } static int tegra_pm_notify(struct notifier_block *nb, unsigned long event, void *dummy) { if (event == PM_SUSPEND_PREPARE) { pm_qos_update_request(&cpufreq_min_req, freq_table[suspend_index].frequency); pm_qos_update_request(&cpufreq_max_req, freq_table[suspend_index].frequency); mutex_lock(&tegra_cpu_lock); is_suspended = true; if (cpu_reg_idle_limits) reg_mode_force_normal = true; pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n", freq_table[suspend_index].frequency); tegra_update_cpu_speed(freq_table[suspend_index].frequency); tegra_auto_hotplug_governor( freq_table[suspend_index].frequency, true); mutex_unlock(&tegra_cpu_lock); } else if (event == PM_POST_SUSPEND) { unsigned int freq; mutex_lock(&tegra_cpu_lock); is_suspended = false; reg_mode_force_normal = false; tegra_cpu_edp_init(true); tegra_cpu_set_speed_cap_locked(&freq); pr_info("Tegra cpufreq resume: restoring frequency to %d kHz\n", freq); mutex_unlock(&tegra_cpu_lock); pm_qos_update_request(&cpufreq_max_req, PM_QOS_CPU_FREQ_MAX_DEFAULT_VALUE); pm_qos_update_request(&cpufreq_min_req, PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE); } return NOTIFY_OK; } static struct notifier_block tegra_cpu_pm_notifier = { .notifier_call = tegra_pm_notify, }; static int tegra_cpu_init(struct cpufreq_policy *policy) { int idx, ret; unsigned int freq; if (policy->cpu >= CONFIG_NR_CPUS) return -EINVAL; cpu_clk = clk_get_sys(NULL, "cpu"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); emc_clk = clk_get_sys("tegra-cpu", "cpu_emc"); if (IS_ERR(emc_clk)) emc_clk = NULL; freq = tegra_getspeed(policy->cpu); if (emc_clk) { clk_set_rate(emc_clk, tegra_emc_cpu_limit(freq)); clk_prepare_enable(emc_clk); } clk_prepare_enable(cpu_clk); cpufreq_frequency_table_cpuinfo(policy, freq_table); cpufreq_frequency_table_get_attr(freq_table, policy->cpu); /* clip boot frequency to table entry */ ret = cpufreq_frequency_table_target(policy, freq_table, freq, CPUFREQ_RELATION_H, &idx); if (!ret && (freq != freq_table[idx].frequency)) { ret = tegra_update_cpu_speed(freq_table[idx].frequency); if (!ret) freq = freq_table[idx].frequency; } policy->cur = freq; target_cpu_speed[policy->cpu] = policy->cur; /* FIXME: what's the actual transition time? */ policy->cpuinfo.transition_latency = 300 * 1000; cpumask_copy(policy->cpus, cpu_possible_mask); return 0; } static int tegra_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_cpuinfo(policy, freq_table); if (emc_clk) { clk_disable_unprepare(emc_clk); clk_put(emc_clk); } clk_put(cpu_clk); return 0; } static int tegra_cpufreq_policy_notifier( struct notifier_block *nb, unsigned long event, void *data) { #ifdef CONFIG_TEGRA_THERMAL_THROTTLE static int once = 1; #endif int i, ret; struct cpufreq_policy *policy = data; if (event == CPUFREQ_NOTIFY) { ret = cpufreq_frequency_table_target(policy, freq_table, policy->max, CPUFREQ_RELATION_H, &i); policy_max_speed[policy->cpu] = ret ? policy->max : freq_table[i].frequency; #ifdef CONFIG_TEGRA_THERMAL_THROTTLE if (once && sysfs_merge_group(&policy->kobj, &stats_attr_grp) == 0) once = 0; #endif } return NOTIFY_OK; } static struct notifier_block tegra_cpufreq_policy_nb = { .notifier_call = tegra_cpufreq_policy_notifier, }; static struct freq_attr *tegra_cpufreq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, #ifdef CONFIG_TEGRA_THERMAL_THROTTLE &throttle, #endif NULL, }; static struct cpufreq_driver tegra_cpufreq_driver = { .verify = tegra_verify_speed, .target = tegra_target, .get = tegra_getspeed, .init = tegra_cpu_init, .exit = tegra_cpu_exit, .name = "tegra", .attr = tegra_cpufreq_attr, }; static int __init tegra_cpufreq_init(void) { int ret = 0; struct tegra_cpufreq_table_data *table_data = tegra_cpufreq_table_get(); if (IS_ERR_OR_NULL(table_data)) return -EINVAL; suspend_index = table_data->suspend_index; ret = tegra_throttle_init(&tegra_cpu_lock); if (ret) return ret; ret = tegra_auto_hotplug_init(&tegra_cpu_lock); if (ret) return ret; freq_table = table_data->freq_table; mutex_lock(&tegra_cpu_lock); tegra_cpu_edp_init(false); mutex_unlock(&tegra_cpu_lock); pm_qos_add_request(&cpufreq_max_req, PM_QOS_CPU_FREQ_MAX, PM_QOS_CPU_FREQ_MAX_DEFAULT_VALUE); pm_qos_add_request(&cpufreq_min_req, PM_QOS_CPU_FREQ_MIN, PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE); ret = register_pm_notifier(&tegra_cpu_pm_notifier); if (ret) return ret; ret = cpufreq_register_notifier( &tegra_cpufreq_policy_nb, CPUFREQ_POLICY_NOTIFIER); if (ret) return ret; return cpufreq_register_driver(&tegra_cpufreq_driver); } #ifdef CONFIG_TEGRA_CPU_FREQ_GOVERNOR_KERNEL_START static int __init tegra_cpufreq_governor_init(void) { /* * At this point, the full range of clocks should be available * Set the CPU governor to performance for a faster boot up */ unsigned int i; struct cpufreq_policy *policy; static char *start_scaling_gov = "performance"; for_each_online_cpu(i) { policy = cpufreq_cpu_get(i); if (!(policy && policy->governor && !(strcmp(policy->governor->name, start_scaling_gov) && cpufreq_set_gov(start_scaling_gov, i)))) pr_info("Failed to set the governor to %s for cpu %u\n", start_scaling_gov, i); if (policy) cpufreq_cpu_put(policy); } return 0; } late_initcall_sync(tegra_cpufreq_governor_init); #endif static void __exit tegra_cpufreq_exit(void) { tegra_throttle_exit(); tegra_cpu_edp_exit(); tegra_auto_hotplug_exit(); cpufreq_unregister_driver(&tegra_cpufreq_driver); cpufreq_unregister_notifier( &tegra_cpufreq_policy_nb, CPUFREQ_POLICY_NOTIFIER); } MODULE_AUTHOR("Colin Cross "); MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2"); MODULE_LICENSE("GPL"); module_init(tegra_cpufreq_init); module_exit(tegra_cpufreq_exit);