summaryrefslogtreecommitdiff
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorAllen Martin <amartin@nvidia.com>2011-06-28 10:40:30 -0700
committerDan Willemsen <dwillemsen@nvidia.com>2011-11-30 21:38:51 -0800
commit5cdaac0eca9378c7ce71352dc99ba9b69c7f6342 (patch)
treec1a138cb5c7ec979f0632b7ac2a2afeeb3b4eb5c /drivers/cpufreq
parent35226a84f238c29f4d6f2631bea93fb53cf5a680 (diff)
cpufreq: interactive: remove debug trace code
Remove debug trace code in preparation of upstreaming Change-Id: I0905885e75031f5e9d7cb06878fb68c1fd06d4fe Signed-off-by: Allen Martin <amartin@nvidia.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c164
1 files changed, 6 insertions, 158 deletions
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index bcbb7ac8306c..26a423bb3da0 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -67,93 +67,6 @@ static unsigned long go_maxspeed_load;
#define DEFAULT_MIN_SAMPLE_TIME 80000;
static unsigned long min_sample_time;
-#define DEBUG 0
-#define BUFSZ 128
-
-#if DEBUG
-#include <linux/proc_fs.h>
-
-struct dbgln {
- int cpu;
- unsigned long jiffy;
- unsigned long run;
- char buf[BUFSZ];
-};
-
-#define NDBGLNS 256
-
-static struct dbgln dbgbuf[NDBGLNS];
-static int dbgbufs;
-static int dbgbufe;
-static struct proc_dir_entry *dbg_proc;
-static spinlock_t dbgpr_lock;
-
-static u64 up_request_time;
-static unsigned int up_max_latency;
-
-static void dbgpr(char *fmt, ...)
-{
- va_list args;
- int n;
- unsigned long flags;
-
- spin_lock_irqsave(&dbgpr_lock, flags);
- n = dbgbufe;
- va_start(args, fmt);
- vsnprintf(dbgbuf[n].buf, BUFSZ, fmt, args);
- va_end(args);
- dbgbuf[n].cpu = smp_processor_id();
- dbgbuf[n].run = nr_running();
- dbgbuf[n].jiffy = jiffies;
-
- if (++dbgbufe >= NDBGLNS)
- dbgbufe = 0;
-
- if (dbgbufe == dbgbufs)
- if (++dbgbufs >= NDBGLNS)
- dbgbufs = 0;
-
- spin_unlock_irqrestore(&dbgpr_lock, flags);
-}
-
-static void dbgdump(void)
-{
- int i, j;
- unsigned long flags;
- static struct dbgln prbuf[NDBGLNS];
-
- spin_lock_irqsave(&dbgpr_lock, flags);
- i = dbgbufs;
- j = dbgbufe;
- memcpy(prbuf, dbgbuf, sizeof(dbgbuf));
- dbgbufs = 0;
- dbgbufe = 0;
- spin_unlock_irqrestore(&dbgpr_lock, flags);
-
- while (i != j)
- {
- printk("%lu %d %lu %s",
- prbuf[i].jiffy, prbuf[i].cpu, prbuf[i].run,
- prbuf[i].buf);
- if (++i == NDBGLNS)
- i = 0;
- }
-}
-
-static int dbg_proc_read(char *buffer, char **start, off_t offset,
- int count, int *peof, void *dat)
-{
- printk("max up_task latency=%uus\n", up_max_latency);
- dbgdump();
- *peof = 1;
- return 0;
-}
-
-
-#else
-#define dbgpr(...) do {} while (0)
-#endif
-
static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
unsigned int event);
@@ -202,16 +115,8 @@ static void cpufreq_interactive_timer(unsigned long data)
smp_wmb();
/* If we raced with cancelling a timer, skip. */
- if (!idle_exit_time) {
- dbgpr("timer %d: no valid idle exit sample\n", (int) data);
+ if (!idle_exit_time)
goto exit;
- }
-
-#if DEBUG
- if ((int) jiffies - (int) pcpu->cpu_timer.expires >= 10)
- dbgpr("timer %d: late by %d ticks\n",
- (int) data, jiffies - pcpu->cpu_timer.expires);
-#endif
delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle);
delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
@@ -220,11 +125,8 @@ static void cpufreq_interactive_timer(unsigned long data)
/*
* If timer ran less than 1ms after short-term sample started, retry.
*/
- if (delta_time < 1000) {
- dbgpr("timer %d: time delta %u too short exit=%llu now=%llu\n", (int) data,
- delta_time, idle_exit_time, pcpu->timer_run_time);
+ if (delta_time < 1000)
goto rearm;
- }
if (delta_idle > delta_time)
cpu_load = 0;
@@ -258,17 +160,15 @@ static void cpufreq_interactive_timer(unsigned long data)
if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
new_freq, CPUFREQ_RELATION_H,
&index)) {
- dbgpr("timer %d: cpufreq_frequency_table_target error\n", (int) data);
+ pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
+ (int) data);
goto rearm;
}
new_freq = pcpu->freq_table[index].frequency;
if (pcpu->target_freq == new_freq)
- {
- dbgpr("timer %d: load=%d, already at %d\n", (int) data, cpu_load, new_freq);
goto rearm_if_notmax;
- }
/*
* Do not scale down unless we have been at this frequency for the
@@ -276,14 +176,10 @@ static void cpufreq_interactive_timer(unsigned long data)
*/
if (new_freq < pcpu->target_freq) {
if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) <
- min_sample_time) {
- dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq);
+ min_sample_time)
goto rearm;
- }
}
- dbgpr("timer %d: load=%d cur=%d tgt=%d queue\n", (int) data, cpu_load, pcpu->target_freq, new_freq);
-
if (new_freq < pcpu->target_freq) {
pcpu->target_freq = new_freq;
spin_lock_irqsave(&down_cpumask_lock, flags);
@@ -292,9 +188,6 @@ static void cpufreq_interactive_timer(unsigned long data)
queue_work(down_wq, &freq_scale_down_work);
} else {
pcpu->target_freq = new_freq;
-#if DEBUG
- up_request_time = ktime_to_us(ktime_get());
-#endif
spin_lock_irqsave(&up_cpumask_lock, flags);
cpumask_set_cpu(data, &up_cpumask);
spin_unlock_irqrestore(&up_cpumask_lock, flags);
@@ -319,10 +212,8 @@ rearm:
if (pcpu->target_freq == pcpu->policy->min) {
smp_rmb();
- if (pcpu->idling) {
- dbgpr("timer %d: cpu idle, don't re-arm\n", (int) data);
+ if (pcpu->idling)
goto exit;
- }
pcpu->timer_idlecancel = 1;
}
@@ -330,7 +221,6 @@ rearm:
pcpu->time_in_idle = get_cpu_idle_time_us(
data, &pcpu->idle_exit_time);
mod_timer(&pcpu->cpu_timer, jiffies + 2);
- dbgpr("timer %d: set timer for %lu exit=%llu\n", (int) data, pcpu->cpu_timer.expires, pcpu->idle_exit_time);
}
exit:
@@ -367,9 +257,6 @@ static void cpufreq_interactive_idle(void)
smp_processor_id(), &pcpu->idle_exit_time);
pcpu->timer_idlecancel = 0;
mod_timer(&pcpu->cpu_timer, jiffies + 2);
- dbgpr("idle: enter at %d, set timer for %lu exit=%llu\n",
- pcpu->target_freq, pcpu->cpu_timer.expires,
- pcpu->idle_exit_time);
}
#endif
} else {
@@ -380,7 +267,6 @@ static void cpufreq_interactive_idle(void)
* CPU didn't go busy; we'll recheck things upon idle exit.
*/
if (pending && pcpu->timer_idlecancel) {
- dbgpr("idle: cancel timer for %lu\n", pcpu->cpu_timer.expires);
del_timer(&pcpu->cpu_timer);
/*
* Ensure last timer run time is after current idle
@@ -415,13 +301,6 @@ static void cpufreq_interactive_idle(void)
&pcpu->idle_exit_time);
pcpu->timer_idlecancel = 0;
mod_timer(&pcpu->cpu_timer, jiffies + 2);
- dbgpr("idle: exit, set timer for %lu exit=%llu\n", pcpu->cpu_timer.expires, pcpu->idle_exit_time);
-#if DEBUG
- } else if (timer_pending(&pcpu->cpu_timer) == 0 &&
- pcpu->timer_run_time < pcpu->idle_exit_time) {
- dbgpr("idle: timer not run yet: exit=%llu tmrrun=%llu\n",
- pcpu->idle_exit_time, pcpu->timer_run_time);
-#endif
}
}
@@ -433,12 +312,6 @@ static int cpufreq_interactive_up_task(void *data)
unsigned long flags;
struct cpufreq_interactive_cpuinfo *pcpu;
-#if DEBUG
- u64 now;
- u64 then;
- unsigned int lat;
-#endif
-
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&up_cpumask_lock, flags);
@@ -455,18 +328,6 @@ static int cpufreq_interactive_up_task(void *data)
set_current_state(TASK_RUNNING);
-#if DEBUG
- then = up_request_time;
- now = ktime_to_us(ktime_get());
-
- if (now > then) {
- lat = ktime_to_us(ktime_get()) - then;
-
- if (lat > up_max_latency)
- up_max_latency = lat;
- }
-#endif
-
tmp_mask = up_cpumask;
cpumask_clear(&up_cpumask);
spin_unlock_irqrestore(&up_cpumask_lock, flags);
@@ -474,11 +335,6 @@ static int cpufreq_interactive_up_task(void *data)
for_each_cpu(cpu, &tmp_mask) {
pcpu = &per_cpu(cpuinfo, cpu);
- if (nr_running() == 1) {
- dbgpr("up %d: tgt=%d nothing else running\n", cpu,
- pcpu->target_freq);
- }
-
smp_rmb();
if (!pcpu->governor_enabled)
@@ -490,7 +346,6 @@ static int cpufreq_interactive_up_task(void *data)
pcpu->freq_change_time_in_idle =
get_cpu_idle_time_us(cpu,
&pcpu->freq_change_time);
- dbgpr("up %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur);
}
}
@@ -523,7 +378,6 @@ static void cpufreq_interactive_freq_down(struct work_struct *work)
pcpu->freq_change_time_in_idle =
get_cpu_idle_time_us(cpu,
&pcpu->freq_change_time);
- dbgpr("down %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur);
}
}
@@ -688,12 +542,6 @@ static int __init cpufreq_interactive_init(void)
spin_lock_init(&up_cpumask_lock);
spin_lock_init(&down_cpumask_lock);
-#if DEBUG
- spin_lock_init(&dbgpr_lock);
- dbg_proc = create_proc_entry("igov", S_IWUSR | S_IRUGO, NULL);
- dbg_proc->read_proc = dbg_proc_read;
-#endif
-
return cpufreq_register_governor(&cpufreq_gov_interactive);
err_freeuptask: