diff options
37 files changed, 1598 insertions, 609 deletions
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index ef028cd62734..8269eea7ee77 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig @@ -75,6 +75,10 @@ config TEGRA_FIQ_DEBUGGER endif +config TEGRA_EMC_SCALING_ENABLE + bool "Enable scaling the memory frequency" + default n + config TEGRA_CPU_DVFS bool "Enable voltage scaling on Tegra CPU" default y diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile index 431ecbb659dc..9262acde3f0c 100644 --- a/arch/arm/mach-tegra/Makefile +++ b/arch/arm/mach-tegra/Makefile @@ -28,9 +28,11 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_clocks.o obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_dvfs.o obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_fuse.o obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += suspend-t2.o -obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_save.o -obj-$(CONFIG_CPU_V7) += cortex-a9.o -obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += pinmux-t2-tables.o +obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_save.o +obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_emc.o +obj-$(CONFIG_CPU_V7) += cortex-a9.o + +obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += pinmux-t2-tables.o obj-$(CONFIG_SMP) += localtimer.o obj-$(CONFIG_SMP) += platsmp.o obj-y += headsmp.o diff --git a/arch/arm/mach-tegra/board-ventana.c b/arch/arm/mach-tegra/board-ventana.c index ff0a81d93fee..2f22bea260d9 100644 --- a/arch/arm/mach-tegra/board-ventana.c +++ b/arch/arm/mach-tegra/board-ventana.c @@ -285,7 +285,7 @@ static void ventana_i2c_init(void) } static struct gpio_keys_button ventana_keys[] = { - [0] = GPIO_KEY(KEY_MENU, PQ0, 0), + [0] = GPIO_KEY(KEY_MENU, PQ3, 0), [1] = GPIO_KEY(KEY_HOME, PQ1, 0), [2] = GPIO_KEY(KEY_BACK, PQ2, 0), [3] = GPIO_KEY(KEY_VOLUMEUP, PQ5, 0), diff --git a/arch/arm/mach-tegra/board.h b/arch/arm/mach-tegra/board.h index c47a21f71260..04f1538b1a37 100644 --- a/arch/arm/mach-tegra/board.h +++ b/arch/arm/mach-tegra/board.h @@ -32,6 +32,7 @@ void __init tegra_reserve(unsigned long carveout_size, unsigned long fb_size, void __init tegra_protected_aperture_init(unsigned long aperture); void tegra_move_framebuffer(unsigned long to, unsigned long from, unsigned long size); +int tegra_dvfs_rail_disable_by_name(const char *reg_id); extern unsigned long tegra_bootloader_fb_start; extern unsigned long tegra_bootloader_fb_size; diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c index ad5f483af7fc..e3936af38356 100644 --- a/arch/arm/mach-tegra/clock.c +++ b/arch/arm/mach-tegra/clock.c @@ -87,7 +87,7 @@ static inline bool clk_is_auto_dvfs(struct clk *c) static inline bool clk_is_dvfs(struct clk *c) { - return c->is_dvfs; + return (c->dvfs != NULL); } static inline bool clk_cansleep(struct clk *c) @@ -207,22 +207,6 @@ void clk_set_cansleep(struct clk *c) mutex_unlock(&clock_list_lock); } -int tegra_dvfs_set_rate(struct clk *c, unsigned long rate) -{ - unsigned long flags; - int ret; - - if (!clk_is_dvfs(c)) - return -EINVAL; - - clk_lock_save(c, flags); - ret = tegra_dvfs_set_rate_locked(c, rate); - clk_unlock_restore(c, flags); - - return ret; -} -EXPORT_SYMBOL(tegra_dvfs_set_rate); - int clk_reparent(struct clk *c, struct clk *parent) { c->parent = parent; @@ -233,8 +217,6 @@ void clk_init(struct clk *c) { clk_lock_init(c); - INIT_LIST_HEAD(&c->dvfs); - if (c->ops && c->ops->init) c->ops->init(c); @@ -260,7 +242,7 @@ int clk_enable(struct clk *c) clk_lock_save(c, flags); if (clk_is_auto_dvfs(c)) { - ret = tegra_dvfs_set_rate_locked(c, clk_get_rate_locked(c)); + ret = tegra_dvfs_set_rate(c, clk_get_rate_locked(c)); if (ret) goto out; } @@ -313,7 +295,7 @@ void clk_disable(struct clk *c) c->refcnt--; if (clk_is_auto_dvfs(c) && c->refcnt == 0) - tegra_dvfs_set_rate_locked(c, 0); + tegra_dvfs_set_rate(c, 0); clk_unlock_restore(c, flags); } @@ -338,7 +320,7 @@ int clk_set_parent(struct clk *c, struct clk *parent) if (clk_is_auto_dvfs(c) && c->refcnt > 0 && (!c->parent || new_rate > old_rate)) { - ret = tegra_dvfs_set_rate_locked(c, new_rate); + ret = tegra_dvfs_set_rate(c, new_rate); if (ret) goto out; } @@ -349,7 +331,7 @@ int clk_set_parent(struct clk *c, struct clk *parent) if (clk_is_auto_dvfs(c) && c->refcnt > 0 && new_rate < old_rate) - ret = tegra_dvfs_set_rate_locked(c, new_rate); + ret = tegra_dvfs_set_rate(c, new_rate); out: clk_unlock_restore(c, flags); @@ -368,6 +350,7 @@ int clk_set_rate(struct clk *c, unsigned long rate) int ret = 0; unsigned long flags; unsigned long old_rate; + long new_rate; clk_lock_save(c, flags); @@ -381,8 +364,19 @@ int clk_set_rate(struct clk *c, unsigned long rate) if (rate > c->max_rate) rate = c->max_rate; + if (c->ops && c->ops->round_rate) { + new_rate = c->ops->round_rate(c, rate); + + if (new_rate < 0) { + ret = new_rate; + goto out; + } + + rate = new_rate; + } + if (clk_is_auto_dvfs(c) && rate > old_rate && c->refcnt > 0) { - ret = tegra_dvfs_set_rate_locked(c, rate); + ret = tegra_dvfs_set_rate(c, rate); if (ret) goto out; } @@ -392,7 +386,7 @@ int clk_set_rate(struct clk *c, unsigned long rate) goto out; if (clk_is_auto_dvfs(c) && rate < old_rate && c->refcnt > 0) - ret = tegra_dvfs_set_rate_locked(c, rate); + ret = tegra_dvfs_set_rate(c, rate); out: clk_unlock_restore(c, flags); @@ -527,36 +521,6 @@ void __init tegra_init_clock(void) } /* - * Iterate through all clocks, setting the dvfs rate to the current clock - * rate on all auto dvfs clocks, and to the saved dvfs rate on all manual - * dvfs clocks. Used to enable dvfs during late init, after the regulators - * are available. - */ -void __init tegra_clk_set_dvfs_rates(void) -{ - unsigned long flags; - struct clk *c; - - mutex_lock(&clock_list_lock); - - list_for_each_entry(c, &clocks, node) { - clk_lock_save(c, flags); - if (clk_is_auto_dvfs(c)) { - if (c->refcnt > 0) - tegra_dvfs_set_rate_locked(c, - clk_get_rate_locked(c)); - else - tegra_dvfs_set_rate_locked(c, 0); - } else if (clk_is_dvfs(c)) { - tegra_dvfs_set_rate_locked(c, c->dvfs_rate); - } - clk_unlock_restore(c, flags); - } - - mutex_unlock(&clock_list_lock); -} - -/* * Iterate through all clocks, disabling any for which the refcount is 0 * but the clock init detected the bootloader left the clock on. */ @@ -587,7 +551,6 @@ int __init tegra_late_init_clock(void) { tegra_dvfs_late_init(); tegra_disable_boot_clocks(); - tegra_clk_set_dvfs_rates(); return 0; } late_initcall(tegra_late_init_clock); @@ -711,7 +674,7 @@ static void dvfs_show_one(struct seq_file *s, struct dvfs *d, int level) { seq_printf(s, "%*s %-*s%21s%d mV\n", level * 3 + 1, "", - 30 - level * 3, d->reg_id, + 30 - level * 3, d->dvfs_rail->reg_id, "", d->cur_millivolts); } @@ -719,7 +682,6 @@ static void dvfs_show_one(struct seq_file *s, struct dvfs *d, int level) static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) { struct clk *child; - struct dvfs *d; const char *state = "uninit"; char div[8] = {0}; @@ -752,8 +714,8 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) 30 - level * 3, c->name, state, c->refcnt, div, clk_get_rate_all_locked(c)); - list_for_each_entry(d, &c->dvfs, node) - dvfs_show_one(s, d, level + 1); + if (c->dvfs) + dvfs_show_one(s, c->dvfs, level + 1); list_for_each_entry(child, &clocks, node) { if (child->parent != c) diff --git a/arch/arm/mach-tegra/clock.h b/arch/arm/mach-tegra/clock.h index 083815487c17..1d6a9acba412 100644 --- a/arch/arm/mach-tegra/clock.h +++ b/arch/arm/mach-tegra/clock.h @@ -77,7 +77,7 @@ enum clk_state { struct clk { /* node for master clocks list */ struct list_head node; /* node for list of all clocks */ - struct list_head dvfs; /* list of dvfs dependencies */ + struct dvfs *dvfs; struct clk_lookup lookup; #ifdef CONFIG_DEBUG_FS @@ -89,7 +89,7 @@ struct clk { unsigned long dvfs_rate; unsigned long rate; unsigned long max_rate; - bool is_dvfs; + unsigned long min_rate; bool auto_dvfs; bool cansleep; u32 flags; @@ -105,6 +105,8 @@ struct clk { u32 reg; u32 reg_shift; + struct list_head shared_bus_list; + union { struct { unsigned int clk_num; @@ -128,10 +130,6 @@ struct clk { struct clk *backup; } cpu; struct { - struct list_head list; - unsigned long min_rate; - } shared_bus; - struct { struct list_head node; bool enabled; unsigned long rate; @@ -162,9 +160,7 @@ struct clk *tegra_get_clock_by_name(const char *name); unsigned long clk_measure_input_freq(void); int clk_reparent(struct clk *c, struct clk *parent); void tegra_clk_init_from_table(struct tegra_clk_init_table *table); -void tegra_clk_set_dvfs_rates(void); void clk_set_cansleep(struct clk *c); unsigned long clk_get_rate_locked(struct clk *c); -int tegra_dvfs_set_rate_locked(struct clk *c, unsigned long rate); #endif diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c index 035e1bc31462..d8c103e8964c 100644 --- a/arch/arm/mach-tegra/cpu-tegra.c +++ b/arch/arm/mach-tegra/cpu-tegra.c @@ -29,6 +29,7 @@ #include <linux/clk.h> #include <linux/io.h> #include <linux/suspend.h> +#include <linux/debugfs.h> #include <asm/smp_twd.h> #include <asm/system.h> @@ -36,7 +37,7 @@ #include <mach/hardware.h> #include <mach/clk.h> -/* Frequency table index must be sequential starting at 0 */ +/* Frequency table index must be sequential starting at 0 and frequencies must be ascending*/ static struct cpufreq_frequency_table freq_table[] = { { 0, 216000 }, { 1, 312000 }, @@ -49,14 +50,28 @@ static struct cpufreq_frequency_table freq_table[] = { { 8, CPUFREQ_TABLE_END }, }; +/* CPU frequency is gradually lowered when throttling is enabled */ +#define THROTTLE_START_INDEX 2 +#define THROTTLE_END_INDEX 6 +#define THROTTLE_DELAY msecs_to_jiffies(2000) +#define NO_DELAY msecs_to_jiffies(0) + #define NUM_CPUS 2 static struct clk *cpu_clk; +static struct clk *emc_clk; + +static struct workqueue_struct *workqueue; static unsigned long target_cpu_speed[NUM_CPUS]; static DEFINE_MUTEX(tegra_cpu_lock); static bool is_suspended; +static DEFINE_MUTEX(throttling_lock); +static bool is_throttling; +static struct delayed_work throttle_work; + + int tegra_verify_speed(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, freq_table); @@ -101,6 +116,17 @@ static int tegra_update_cpu_speed(unsigned long rate) if (freqs.old == freqs.new) return ret; + /* + * Vote on memory bus frequency based on cpu frequency + * This sets the minimum frequency, display or avp may request higher + */ + if (rate >= 816000) + clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */ + else if (rate >= 456000) + clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */ + else + clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ + for_each_online_cpu(freqs.cpu) cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); @@ -143,6 +169,8 @@ static int tegra_target(struct cpufreq_policy *policy, { int idx; unsigned int freq; + unsigned int highest_speed; + unsigned int limit_when_throttling; int ret = 0; mutex_lock(&tegra_cpu_lock); @@ -159,13 +187,137 @@ static int tegra_target(struct cpufreq_policy *policy, target_cpu_speed[policy->cpu] = freq; - ret = tegra_update_cpu_speed(tegra_cpu_highest_speed()); + highest_speed = tegra_cpu_highest_speed(); + /* Do not go above this frequency when throttling */ + limit_when_throttling = freq_table[THROTTLE_START_INDEX].frequency; + + if (is_throttling && highest_speed > limit_when_throttling) { + if (tegra_getspeed(0) < limit_when_throttling) { + ret = tegra_update_cpu_speed(limit_when_throttling); + goto out; + } else { + ret = -EBUSY; + goto out; + } + } + + ret = tegra_update_cpu_speed(highest_speed); out: mutex_unlock(&tegra_cpu_lock); return ret; } +static bool tegra_throttling_needed(unsigned long *rate) +{ + unsigned int current_freq = tegra_getspeed(0); + int i; + + for (i = THROTTLE_END_INDEX; i >= THROTTLE_START_INDEX; i--) { + if (freq_table[i].frequency < current_freq) { + *rate = freq_table[i].frequency; + return true; + } + } + + return false; +} + +static void tegra_throttle_work_func(struct work_struct *work) +{ + unsigned long rate; + + mutex_lock(&tegra_cpu_lock); + + if (tegra_throttling_needed(&rate) && tegra_update_cpu_speed(rate) == 0) { + queue_delayed_work(workqueue, &throttle_work, THROTTLE_DELAY); + } + + mutex_unlock(&tegra_cpu_lock); +} + +/** + * tegra_throttling_enable + * This functions may sleep + */ +void tegra_throttling_enable(void) +{ + mutex_lock(&throttling_lock); + + if (!is_throttling) { + is_throttling = true; + queue_delayed_work(workqueue, &throttle_work, NO_DELAY); + } + + mutex_unlock(&throttling_lock); +} +EXPORT_SYMBOL_GPL(tegra_throttling_enable); + +/** + * tegra_throttling_disable + * This functions may sleep + */ +void tegra_throttling_disable(void) +{ + mutex_lock(&throttling_lock); + + if (is_throttling) { + cancel_delayed_work_sync(&throttle_work); + is_throttling = false; + } + + mutex_unlock(&throttling_lock); +} +EXPORT_SYMBOL_GPL(tegra_throttling_disable); + +#ifdef CONFIG_DEBUG_FS +static int throttle_debug_set(void *data, u64 val) +{ + if (val) { + tegra_throttling_enable(); + } else { + tegra_throttling_disable(); + } + + return 0; +} +static int throttle_debug_get(void *data, u64 *val) +{ + *val = (u64) is_throttling; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(throttle_fops, throttle_debug_get, throttle_debug_set, "%llu\n"); + +static struct dentry *cpu_tegra_debugfs_root; + +static int __init tegra_cpu_debug_init(void) +{ + cpu_tegra_debugfs_root = debugfs_create_dir("cpu-tegra", 0); + + if (!cpu_tegra_debugfs_root) + return -ENOMEM; + + if (!debugfs_create_file("throttle", 0644, cpu_tegra_debugfs_root, NULL, &throttle_fops)) + goto err_out; + + return 0; + +err_out: + debugfs_remove_recursive(cpu_tegra_debugfs_root); + return -ENOMEM; + +} + +static void __exit tegra_cpu_debug_exit(void) +{ + debugfs_remove_recursive(cpu_tegra_debugfs_root); +} + +late_initcall(tegra_cpu_debug_init); +module_exit(tegra_cpu_debug_exit); +#endif + static int tegra_pm_notify(struct notifier_block *nb, unsigned long event, void *dummy) { @@ -196,6 +348,13 @@ static int tegra_cpu_init(struct cpufreq_policy *policy) if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); + emc_clk = clk_get_sys("cpu", "emc"); + if (IS_ERR(emc_clk)) { + clk_put(cpu_clk); + return PTR_ERR(emc_clk); + } + clk_enable(emc_clk); + cpufreq_frequency_table_cpuinfo(policy, freq_table); cpufreq_frequency_table_get_attr(freq_table, policy->cpu); policy->cur = tegra_getspeed(policy->cpu); @@ -207,8 +366,10 @@ static int tegra_cpu_init(struct cpufreq_policy *policy) policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; cpumask_copy(policy->related_cpus, cpu_possible_mask); - if (policy->cpu == 0) + if (policy->cpu == 0) { + INIT_DELAYED_WORK(&throttle_work, tegra_throttle_work_func); register_pm_notifier(&tegra_cpu_pm_notifier); + } return 0; } @@ -216,6 +377,8 @@ static int tegra_cpu_init(struct cpufreq_policy *policy) static int tegra_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_cpuinfo(policy, freq_table); + clk_disable(emc_clk); + clk_put(emc_clk); clk_put(cpu_clk); return 0; } @@ -237,11 +400,15 @@ static struct cpufreq_driver tegra_cpufreq_driver = { static int __init tegra_cpufreq_init(void) { + workqueue = create_singlethread_workqueue("cpu-tegra"); + if (!workqueue) + return -ENOMEM; return cpufreq_register_driver(&tegra_cpufreq_driver); } static void __exit tegra_cpufreq_exit(void) { + destroy_workqueue(workqueue); cpufreq_unregister_driver(&tegra_cpufreq_driver); } diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c index 001dbcc14bfc..484a39182d62 100755 --- a/arch/arm/mach-tegra/dma.c +++ b/arch/arm/mach-tegra/dma.c @@ -752,7 +752,7 @@ static void handle_continuous_sngl_dma(struct tegra_dma_channel *ch) */ next_req = list_entry(req->node.next, typeof(*next_req), node); if (next_req->status != TEGRA_DMA_REQ_INFLIGHT) { - pr_warning("%s: interrupt during enqueue\n", __func__); + pr_debug("%s: interrupt during enqueue\n", __func__); tegra_dma_stop(ch); tegra_dma_update_hw(ch, next_req); } else if (!list_is_last(&next_req->node, &ch->list)) { diff --git a/arch/arm/mach-tegra/dvfs.c b/arch/arm/mach-tegra/dvfs.c index ef58fae8afbd..bc1e1a391b5a 100644 --- a/arch/arm/mach-tegra/dvfs.c +++ b/arch/arm/mach-tegra/dvfs.c @@ -18,131 +18,198 @@ #include <linux/kernel.h> #include <linux/clk.h> -#include <linux/list.h> +#include <linux/debugfs.h> #include <linux/init.h> +#include <linux/list.h> #include <linux/list_sort.h> #include <linux/module.h> -#include <linux/debugfs.h> -#include <linux/slab.h> -#include <linux/seq_file.h> #include <linux/regulator/consumer.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/suspend.h> +#include <linux/delay.h> + #include <asm/clkdev.h> + #include <mach/clk.h> #include "board.h" #include "clock.h" #include "dvfs.h" -struct dvfs_reg { - struct list_head node; /* node in dvfs_reg_list */ - struct list_head dvfs; /* list head of attached dvfs clocks */ - const char *reg_id; - struct regulator *reg; - int max_millivolts; - int millivolts; - struct mutex lock; -}; - -static LIST_HEAD(dvfs_debug_list); -static LIST_HEAD(dvfs_reg_list); +static LIST_HEAD(dvfs_rail_list); +static DEFINE_MUTEX(dvfs_lock); -static DEFINE_MUTEX(dvfs_debug_list_lock); -static DEFINE_MUTEX(dvfs_reg_list_lock); +static int dvfs_rail_update(struct dvfs_rail *rail); -static int dvfs_reg_set_voltage(struct dvfs_reg *dvfs_reg) +void tegra_dvfs_add_relationships(struct dvfs_relationship *rels, int n) { - int millivolts = 0; - struct dvfs *d; - int ret = 0; + int i; + struct dvfs_relationship *rel; - mutex_lock(&dvfs_reg->lock); + mutex_lock(&dvfs_lock); - list_for_each_entry(d, &dvfs_reg->dvfs, reg_node) - millivolts = max(d->cur_millivolts, millivolts); + for (i = 0; i < n; i++) { + rel = &rels[i]; + list_add_tail(&rel->from_node, &rel->to->relationships_from); + list_add_tail(&rel->to_node, &rel->from->relationships_to); + } + + mutex_unlock(&dvfs_lock); +} - if (millivolts == dvfs_reg->millivolts) - goto out; +int tegra_dvfs_init_rails(struct dvfs_rail *rails[], int n) +{ + int i; + + mutex_lock(&dvfs_lock); - dvfs_reg->millivolts = millivolts; + for (i = 0; i < n; i++) { + INIT_LIST_HEAD(&rails[i]->dvfs); + INIT_LIST_HEAD(&rails[i]->relationships_from); + INIT_LIST_HEAD(&rails[i]->relationships_to); + rails[i]->millivolts = rails[i]->nominal_millivolts; + rails[i]->new_millivolts = rails[i]->nominal_millivolts; + if (!rails[i]->step) + rails[i]->step = rails[i]->max_millivolts; - if (!dvfs_reg->reg) { - pr_warn("dvfs set voltage on %s ignored\n", dvfs_reg->reg_id); - goto out; + list_add_tail(&rails[i]->node, &dvfs_rail_list); } - ret = regulator_set_voltage(dvfs_reg->reg, - millivolts * 1000, dvfs_reg->max_millivolts * 1000); + mutex_unlock(&dvfs_lock); -out: - mutex_unlock(&dvfs_reg->lock); - return ret; + return 0; +}; + +static int dvfs_solve_relationship(struct dvfs_relationship *rel) +{ + return rel->solve(rel->from, rel->to); } -static int dvfs_reg_connect_to_regulator(struct dvfs_reg *dvfs_reg) +/* Sets the voltage on a dvfs rail to a specific value, and updates any + * rails that depend on this rail. */ +static int dvfs_rail_set_voltage(struct dvfs_rail *rail, int millivolts) { - struct regulator *reg; + int ret = 0; + struct dvfs_relationship *rel; + int step = (millivolts > rail->millivolts) ? rail->step : -rail->step; + int i; + int steps; - if (!dvfs_reg->reg) { - reg = regulator_get(NULL, dvfs_reg->reg_id); - if (IS_ERR(reg)) + if (!rail->reg) { + if (millivolts == rail->millivolts) + return 0; + else return -EINVAL; } - dvfs_reg->reg = reg; + if (rail->disabled) + return 0; - return 0; + steps = DIV_ROUND_UP(abs(millivolts - rail->millivolts), rail->step); + + for (i = 0; i < steps; i++) { + if (abs(millivolts - rail->millivolts) > rail->step) + rail->new_millivolts = rail->millivolts + step; + else + rail->new_millivolts = millivolts; + + /* Before changing the voltage, tell each rail that depends + * on this rail that the voltage will change. + * This rail will be the "from" rail in the relationship, + * the rail that depends on this rail will be the "to" rail. + * from->millivolts will be the old voltage + * from->new_millivolts will be the new voltage */ + list_for_each_entry(rel, &rail->relationships_to, to_node) { + ret = dvfs_rail_update(rel->to); + if (ret) + return ret; + } + + if (!rail->disabled) { + ret = regulator_set_voltage(rail->reg, + rail->new_millivolts * 1000, + rail->max_millivolts * 1000); + } + if (ret) { + pr_err("Failed to set dvfs regulator %s\n", rail->reg_id); + return ret; + } + + rail->millivolts = rail->new_millivolts; + + /* After changing the voltage, tell each rail that depends + * on this rail that the voltage has changed. + * from->millivolts and from->new_millivolts will be the + * new voltage */ + list_for_each_entry(rel, &rail->relationships_to, to_node) { + ret = dvfs_rail_update(rel->to); + if (ret) + return ret; + } + } + + if (unlikely(rail->millivolts != millivolts)) { + pr_err("%s: rail didn't reach target %d in %d steps (%d)\n", + __func__, millivolts, steps, rail->millivolts); + return -EINVAL; + } + + return ret; } -static struct dvfs_reg *get_dvfs_reg(struct dvfs *d) +/* Determine the minimum valid voltage for a rail, taking into account + * the dvfs clocks and any rails that this rail depends on. Calls + * dvfs_rail_set_voltage with the new voltage, which will call + * dvfs_rail_update on any rails that depend on this rail. */ +static int dvfs_rail_update(struct dvfs_rail *rail) { - struct dvfs_reg *dvfs_reg; + int millivolts = 0; + struct dvfs *d; + struct dvfs_relationship *rel; + int ret = 0; - mutex_lock(&dvfs_reg_list_lock); + /* if dvfs is suspended, return and handle it during resume */ + if (rail->suspended) + return 0; - list_for_each_entry(dvfs_reg, &dvfs_reg_list, node) - if (!strcmp(d->reg_id, dvfs_reg->reg_id)) - goto out; + /* if regulators are not connected yet, return and handle it later */ + if (!rail->reg) + return 0; - dvfs_reg = kzalloc(sizeof(struct dvfs_reg), GFP_KERNEL); - if (!dvfs_reg) { - pr_err("%s: Failed to allocate dvfs_reg\n", __func__); - goto out; - } + /* Find the maximum voltage requested by any clock */ + list_for_each_entry(d, &rail->dvfs, reg_node) + millivolts = max(d->cur_millivolts, millivolts); - mutex_init(&dvfs_reg->lock); - INIT_LIST_HEAD(&dvfs_reg->dvfs); - dvfs_reg->reg_id = kstrdup(d->reg_id, GFP_KERNEL); + rail->new_millivolts = millivolts; - list_add_tail(&dvfs_reg->node, &dvfs_reg_list); + /* Check any rails that this rail depends on */ + list_for_each_entry(rel, &rail->relationships_from, from_node) + rail->new_millivolts = dvfs_solve_relationship(rel); -out: - mutex_unlock(&dvfs_reg_list_lock); - return dvfs_reg; + if (rail->new_millivolts != rail->millivolts) + ret = dvfs_rail_set_voltage(rail, rail->new_millivolts); + + return ret; } -static struct dvfs_reg *attach_dvfs_reg(struct dvfs *d) +static int dvfs_rail_connect_to_regulator(struct dvfs_rail *rail) { - struct dvfs_reg *dvfs_reg; - - dvfs_reg = get_dvfs_reg(d); - if (!dvfs_reg) - return NULL; - - mutex_lock(&dvfs_reg->lock); - list_add_tail(&d->reg_node, &dvfs_reg->dvfs); + struct regulator *reg; - d->dvfs_reg = dvfs_reg; - if (d->max_millivolts > d->dvfs_reg->max_millivolts) - d->dvfs_reg->max_millivolts = d->max_millivolts; + if (!rail->reg) { + reg = regulator_get(NULL, rail->reg_id); + if (IS_ERR(reg)) + return -EINVAL; + } - d->cur_millivolts = d->max_millivolts; - mutex_unlock(&dvfs_reg->lock); + rail->reg = reg; - return dvfs_reg; + return 0; } static int -__tegra_dvfs_set_rate(struct clk *c, struct dvfs *d, unsigned long rate) +__tegra_dvfs_set_rate(struct dvfs *d, unsigned long rate) { int i = 0; int ret; @@ -152,7 +219,7 @@ __tegra_dvfs_set_rate(struct clk *c, struct dvfs *d, unsigned long rate) if (rate > d->freqs[d->num_freqs - 1]) { pr_warn("tegra_dvfs: rate %lu too high for dvfs on %s\n", rate, - c->name); + d->clk_name); return -EINVAL; } @@ -167,54 +234,39 @@ __tegra_dvfs_set_rate(struct clk *c, struct dvfs *d, unsigned long rate) d->cur_rate = rate; - if (!d->dvfs_reg) - return 0; - - ret = dvfs_reg_set_voltage(d->dvfs_reg); + ret = dvfs_rail_update(d->dvfs_rail); if (ret) pr_err("Failed to set regulator %s for clock %s to %d mV\n", - d->dvfs_reg->reg_id, c->name, d->cur_millivolts); + d->dvfs_rail->reg_id, d->clk_name, d->cur_millivolts); return ret; } -int tegra_dvfs_set_rate_locked(struct clk *c, unsigned long rate) +int tegra_dvfs_set_rate(struct clk *c, unsigned long rate) { - struct dvfs *d; - int ret = 0; - bool freq_up; - - c->dvfs_rate = rate; - - freq_up = (c->refcnt == 0) || (rate > clk_get_rate_locked(c)); + int ret; - list_for_each_entry(d, &c->dvfs, node) { - if (d->higher == freq_up) - ret = __tegra_dvfs_set_rate(c, d, rate); - if (ret) - return ret; - } + if (!c->dvfs) + return -EINVAL; - list_for_each_entry(d, &c->dvfs, node) { - if (d->higher != freq_up) - ret = __tegra_dvfs_set_rate(c, d, rate); - if (ret) - return ret; - } + mutex_lock(&dvfs_lock); + ret = __tegra_dvfs_set_rate(c->dvfs, rate); + mutex_unlock(&dvfs_lock); - return 0; + return ret; } +EXPORT_SYMBOL(tegra_dvfs_set_rate); /* May only be called during clock init, does not take any locks on clock c. */ int __init tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d) { int i; - struct dvfs_reg *dvfs_reg; - dvfs_reg = attach_dvfs_reg(d); - if (!dvfs_reg) { - pr_err("Failed to get regulator %s for clock %s\n", - d->reg_id, c->name); + if (c->dvfs) { + pr_err("Error when enabling dvfs on %s for clock %s:\n", + d->dvfs_rail->reg_id, c->name); + pr_err("DVFS already enabled for %s\n", + c->dvfs->dvfs_rail->reg_id); return -EINVAL; } @@ -235,17 +287,172 @@ int __init tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d) clk_set_cansleep(c); } - c->is_dvfs = true; + c->dvfs = d; - list_add_tail(&d->node, &c->dvfs); - - mutex_lock(&dvfs_debug_list_lock); - list_add_tail(&d->debug_node, &dvfs_debug_list); - mutex_unlock(&dvfs_debug_list_lock); + mutex_lock(&dvfs_lock); + list_add_tail(&d->reg_node, &d->dvfs_rail->dvfs); + mutex_unlock(&dvfs_lock); return 0; } +static bool tegra_dvfs_all_rails_suspended(void) +{ + struct dvfs_rail *rail; + bool all_suspended = true; + + list_for_each_entry(rail, &dvfs_rail_list, node) + if (!rail->suspended && !rail->disabled) + all_suspended = false; + + return all_suspended; +} + +static bool tegra_dvfs_from_rails_suspended(struct dvfs_rail *to) +{ + struct dvfs_relationship *rel; + bool all_suspended = true; + + list_for_each_entry(rel, &to->relationships_from, from_node) + if (!rel->from->suspended && !rel->from->disabled) + all_suspended = false; + + return all_suspended; +} + +static int tegra_dvfs_suspend_one(void) +{ + struct dvfs_rail *rail; + int ret; + + list_for_each_entry(rail, &dvfs_rail_list, node) { + if (!rail->suspended && !rail->disabled && + tegra_dvfs_from_rails_suspended(rail)) { + ret = dvfs_rail_set_voltage(rail, + rail->nominal_millivolts); + if (ret) + return ret; + rail->suspended = true; + return 0; + } + } + + return -EINVAL; +} + +static void tegra_dvfs_resume(void) +{ + struct dvfs_rail *rail; + + mutex_lock(&dvfs_lock); + + list_for_each_entry(rail, &dvfs_rail_list, node) + rail->suspended = false; + + list_for_each_entry(rail, &dvfs_rail_list, node) + dvfs_rail_update(rail); + + mutex_unlock(&dvfs_lock); +} + +static int tegra_dvfs_suspend(void) +{ + int ret = 0; + + mutex_lock(&dvfs_lock); + + while (!tegra_dvfs_all_rails_suspended()) { + ret = tegra_dvfs_suspend_one(); + if (ret) + break; + } + + mutex_unlock(&dvfs_lock); + + if (ret) + tegra_dvfs_resume(); + + return ret; +} + +static int tegra_dvfs_pm_notify(struct notifier_block *nb, + unsigned long event, void *data) +{ + switch (event) { + case PM_SUSPEND_PREPARE: + if (tegra_dvfs_suspend()) + return NOTIFY_STOP; + break; + case PM_POST_SUSPEND: + tegra_dvfs_resume(); + break; + } + + return NOTIFY_OK; +}; + +static struct notifier_block tegra_dvfs_nb = { + .notifier_call = tegra_dvfs_pm_notify, +}; + +/* must be called with dvfs lock held */ +static void __tegra_dvfs_rail_disable(struct dvfs_rail *rail) +{ + int ret; + + if (!rail->disabled) { + ret = dvfs_rail_set_voltage(rail, rail->nominal_millivolts); + if (ret) + pr_info("dvfs: failed to set regulator %s to disable " + "voltage %d\n", rail->reg_id, + rail->nominal_millivolts); + rail->disabled = true; + } +} + +/* must be called with dvfs lock held */ +static void __tegra_dvfs_rail_enable(struct dvfs_rail *rail) +{ + if (rail->disabled) { + rail->disabled = false; + dvfs_rail_update(rail); + } +} + +void tegra_dvfs_rail_enable(struct dvfs_rail *rail) +{ + mutex_lock(&dvfs_lock); + __tegra_dvfs_rail_enable(rail); + mutex_unlock(&dvfs_lock); +} + +void tegra_dvfs_rail_disable(struct dvfs_rail *rail) +{ + mutex_lock(&dvfs_lock); + __tegra_dvfs_rail_disable(rail); + mutex_unlock(&dvfs_lock); +} + +int tegra_dvfs_rail_disable_by_name(const char *reg_id) +{ + struct dvfs_rail *rail; + int ret = 0; + + mutex_lock(&dvfs_lock); + list_for_each_entry(rail, &dvfs_rail_list, node) { + if (!strcmp(reg_id, rail->reg_id)) { + __tegra_dvfs_rail_disable(rail); + goto out; + } + } + + ret = -EINVAL; + +out: + mutex_unlock(&dvfs_lock); + return ret; +} + /* * Iterate through all the dvfs regulators, finding the regulator exported * by the regulator api for each one. Must be called in late init, after @@ -253,12 +460,19 @@ int __init tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d) */ int __init tegra_dvfs_late_init(void) { - struct dvfs_reg *dvfs_reg; + struct dvfs_rail *rail; + + mutex_lock(&dvfs_lock); + + list_for_each_entry(rail, &dvfs_rail_list, node) + dvfs_rail_connect_to_regulator(rail); - mutex_lock(&dvfs_reg_list_lock); - list_for_each_entry(dvfs_reg, &dvfs_reg_list, node) - dvfs_reg_connect_to_regulator(dvfs_reg); - mutex_unlock(&dvfs_reg_list_lock); + list_for_each_entry(rail, &dvfs_rail_list, node) + dvfs_rail_update(rail); + + mutex_unlock(&dvfs_lock); + + register_pm_notifier(&tegra_dvfs_nb); return 0; } @@ -266,11 +480,11 @@ int __init tegra_dvfs_late_init(void) #ifdef CONFIG_DEBUG_FS static int dvfs_tree_sort_cmp(void *p, struct list_head *a, struct list_head *b) { - struct dvfs *da = list_entry(a, struct dvfs, debug_node); - struct dvfs *db = list_entry(b, struct dvfs, debug_node); + struct dvfs *da = list_entry(a, struct dvfs, reg_node); + struct dvfs *db = list_entry(b, struct dvfs, reg_node); int ret; - ret = strcmp(da->reg_id, db->reg_id); + ret = strcmp(da->dvfs_rail->reg_id, db->dvfs_rail->reg_id); if (ret != 0) return ret; @@ -285,27 +499,33 @@ static int dvfs_tree_sort_cmp(void *p, struct list_head *a, struct list_head *b) static int dvfs_tree_show(struct seq_file *s, void *data) { struct dvfs *d; - const char *last_reg = ""; + struct dvfs_rail *rail; + struct dvfs_relationship *rel; seq_printf(s, " clock rate mV\n"); seq_printf(s, "--------------------------------\n"); - mutex_lock(&dvfs_debug_list_lock); - - list_sort(NULL, &dvfs_debug_list, dvfs_tree_sort_cmp); + mutex_lock(&dvfs_lock); - list_for_each_entry(d, &dvfs_debug_list, debug_node) { - if (strcmp(last_reg, d->dvfs_reg->reg_id) != 0) { - last_reg = d->dvfs_reg->reg_id; - seq_printf(s, "%s %d mV:\n", d->dvfs_reg->reg_id, - d->dvfs_reg->millivolts); + list_for_each_entry(rail, &dvfs_rail_list, node) { + seq_printf(s, "%s %d mV%s:\n", rail->reg_id, + rail->millivolts, rail->disabled ? " disabled" : ""); + list_for_each_entry(rel, &rail->relationships_from, from_node) { + seq_printf(s, " %-10s %-7d mV %-4d mV\n", + rel->from->reg_id, + rel->from->millivolts, + dvfs_solve_relationship(rel)); } - seq_printf(s, " %-10s %-10lu %-4d mV\n", d->clk_name, - d->cur_rate, d->cur_millivolts); + list_sort(NULL, &rail->dvfs, dvfs_tree_sort_cmp); + + list_for_each_entry(d, &rail->dvfs, reg_node) { + seq_printf(s, " %-10s %-10lu %-4d mV\n", d->clk_name, + d->cur_rate, d->cur_millivolts); + } } - mutex_unlock(&dvfs_debug_list_lock); + mutex_unlock(&dvfs_lock); return 0; } diff --git a/arch/arm/mach-tegra/dvfs.h b/arch/arm/mach-tegra/dvfs.h index e5eac6cf9cd0..68622b899c59 100644 --- a/arch/arm/mach-tegra/dvfs.h +++ b/arch/arm/mach-tegra/dvfs.h @@ -22,25 +22,57 @@ #define MAX_DVFS_FREQS 16 struct clk; +struct dvfs_rail; + +/* + * dvfs_relationship between to rails, "from" and "to" + * when the rail changes, it will call dvfs_rail_update on the rails + * in the relationship_to list. + * when determining the voltage to set a rail to, it will consider each + * rail in the relationship_from list. + */ +struct dvfs_relationship { + struct dvfs_rail *to; + struct dvfs_rail *from; + int (*solve)(struct dvfs_rail *, struct dvfs_rail *); + + struct list_head to_node; /* node in relationship_to list */ + struct list_head from_node; /* node in relationship_from list */ +}; + +struct dvfs_rail { + const char *reg_id; + int min_millivolts; + int max_millivolts; + int nominal_millivolts; + int step; + bool disabled; + + struct list_head node; /* node in dvfs_rail_list */ + struct list_head dvfs; /* list head of attached dvfs clocks */ + struct list_head relationships_to; + struct list_head relationships_from; + struct regulator *reg; + int millivolts; + int new_millivolts; + bool suspended; +}; struct dvfs { /* Used only by tegra2_clock.c */ const char *clk_name; - int process_id; - bool cpu; + int cpu_process_id; /* Must be initialized before tegra_dvfs_init */ - const char *reg_id; int freqs_mult; unsigned long freqs[MAX_DVFS_FREQS]; - unsigned long millivolts[MAX_DVFS_FREQS]; + const int *millivolts; + struct dvfs_rail *dvfs_rail; bool auto_dvfs; - bool higher; /* Filled in by tegra_dvfs_init */ int max_millivolts; int num_freqs; - struct dvfs_reg *dvfs_reg; int cur_millivolts; unsigned long cur_rate; @@ -53,5 +85,9 @@ void tegra2_init_dvfs(void); int tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d); int dvfs_debugfs_init(struct dentry *clk_debugfs_root); int tegra_dvfs_late_init(void); +int tegra_dvfs_init_rails(struct dvfs_rail *dvfs_rails[], int n); +void tegra_dvfs_add_relationships(struct dvfs_relationship *rels, int n); +void tegra_dvfs_rail_enable(struct dvfs_rail *rail); +void tegra_dvfs_rail_disable(struct dvfs_rail *rail); #endif diff --git a/arch/arm/mach-tegra/suspend.c b/arch/arm/mach-tegra/suspend.c index af3252401256..146b93a014f3 100644 --- a/arch/arm/mach-tegra/suspend.c +++ b/arch/arm/mach-tegra/suspend.c @@ -458,13 +458,17 @@ static void tegra_suspend_dram(bool do_lp0) suspend_cpu_complex(); flush_cache_all(); +#ifdef CONFIG_CACHE_L2X0 l2x0_shutdown(); +#endif __cortex_a9_save(mode); restore_cpu_complex(); writel(orig, evp_reset); +#ifdef CONFIG_CACHE_L2X0 l2x0_restart(); +#endif if (!do_lp0) { memcpy(iram_code, iram_save, iram_save_size); diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c index c6fca17a28be..eef598e456f1 100644 --- a/arch/arm/mach-tegra/tegra2_clocks.c +++ b/arch/arm/mach-tegra/tegra2_clocks.c @@ -32,6 +32,7 @@ #include "clock.h" #include "fuse.h" +#include "tegra2_emc.h" #define RST_DEVICES 0x004 #define RST_DEVICES_SET 0x300 @@ -302,8 +303,6 @@ static void tegra2_super_clk_init(struct clk *c) } BUG_ON(sel->input == NULL); c->parent = sel->input; - - INIT_LIST_HEAD(&c->u.shared_bus.list); } static int tegra2_super_clk_enable(struct clk *c) @@ -1023,6 +1022,53 @@ static struct clk_ops tegra_periph_clk_ops = { .reset = &tegra2_periph_clk_reset, }; +/* External memory controller clock ops */ +static void tegra2_emc_clk_init(struct clk *c) +{ + tegra2_periph_clk_init(c); + c->max_rate = clk_get_rate_locked(c); +} + +static long tegra2_emc_clk_round_rate(struct clk *c, unsigned long rate) +{ + long new_rate = rate; + + new_rate = tegra_emc_round_rate(new_rate); + if (new_rate < 0) + return c->max_rate; + + BUG_ON(new_rate != tegra2_periph_clk_round_rate(c, new_rate)); + + return new_rate; +} + +static int tegra2_emc_clk_set_rate(struct clk *c, unsigned long rate) +{ + int ret; + /* The Tegra2 memory controller has an interlock with the clock + * block that allows memory shadowed registers to be updated, + * and then transfer them to the main registers at the same + * time as the clock update without glitches. */ + ret = tegra_emc_set_rate(rate); + if (ret < 0) + return ret; + + ret = tegra2_periph_clk_set_rate(c, rate); + udelay(1); + + return ret; +} + +static struct clk_ops tegra_emc_clk_ops = { + .init = &tegra2_emc_clk_init, + .enable = &tegra2_periph_clk_enable, + .disable = &tegra2_periph_clk_disable, + .set_parent = &tegra2_periph_clk_set_parent, + .set_rate = &tegra2_emc_clk_set_rate, + .round_rate = &tegra2_emc_clk_round_rate, + .reset = &tegra2_periph_clk_reset, +}; + /* Clock doubler ops */ static void tegra2_clk_double_init(struct clk *c) { @@ -1151,15 +1197,16 @@ static struct clk_ops tegra_cdev_clk_ops = { static void tegra_clk_shared_bus_update(struct clk *bus) { struct clk *c; - unsigned long rate = bus->u.shared_bus.min_rate; + unsigned long rate = bus->min_rate; - list_for_each_entry(c, &bus->u.shared_bus.list, + list_for_each_entry(c, &bus->shared_bus_list, u.shared_bus_user.node) { if (c->u.shared_bus_user.enabled) rate = max(c->u.shared_bus_user.rate, rate); } - clk_set_rate(bus, rate); + if (rate != clk_get_rate(bus)) + clk_set_rate(bus, rate); }; static void tegra_clk_shared_bus_init(struct clk *c) @@ -1170,7 +1217,7 @@ static void tegra_clk_shared_bus_init(struct clk *c) c->set = true; list_add_tail(&c->u.shared_bus_user.node, - &c->parent->u.shared_bus.list); + &c->parent->shared_bus_list); } static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate) @@ -1716,9 +1763,7 @@ static struct clk tegra_clk_sclk = { .reg = 0x28, .ops = &tegra_super_ops, .max_rate = 240000000, - .u.shared_bus = { - .min_rate = 120000000, - }, + .min_rate = 120000000, }; static struct clk tegra_clk_virtual_cpu = { @@ -1843,6 +1888,18 @@ static struct clk_mux_sel mux_clk_32k[] = { { 0, 0}, }; +static struct clk tegra_clk_emc = { + .name = "emc", + .ops = &tegra_emc_clk_ops, + .reg = 0x19c, + .max_rate = 800000000, + .inputs = mux_pllm_pllc_pllp_clkm, + .flags = MUX | DIV_U71 | PERIPH_EMC_ENB, + .u.periph = { + .clk_num = 57, + }, +}; + #define PERIPH_CLK(_name, _dev, _con, _clk_num, _reg, _max, _inputs, _flags) \ { \ .name = _name, \ @@ -1931,13 +1988,17 @@ struct clk tegra_list_clks[] = { PERIPH_CLK("usbd", "fsl-tegra-udc", NULL, 22, 0, 480000000, mux_clk_m, 0), /* requires min voltage */ PERIPH_CLK("usb2", "tegra-ehci.1", NULL, 58, 0, 480000000, mux_clk_m, 0), /* requires min voltage */ PERIPH_CLK("usb3", "tegra-ehci.2", NULL, 59, 0, 480000000, mux_clk_m, 0), /* requires min voltage */ - PERIPH_CLK("emc", "emc", NULL, 57, 0x19c, 800000000, mux_pllm_pllc_pllp_clkm, MUX | DIV_U71 | PERIPH_EMC_ENB), PERIPH_CLK("dsi", "dsi", NULL, 48, 0, 500000000, mux_plld, 0), /* scales with voltage */ PERIPH_CLK("csi", "tegra_camera", "csi", 52, 0, 72000000, mux_pllp_out3, 0), PERIPH_CLK("isp", "tegra_camera", "isp", 23, 0, 150000000, mux_clk_m, 0), /* same frequency as VI */ PERIPH_CLK("csus", "tegra_camera", "csus", 92, 0, 150000000, mux_clk_m, PERIPH_NO_RESET), SHARED_CLK("avp.sclk", "tegra-avp", "sclk", &tegra_clk_sclk), + SHARED_CLK("avp.emc", "tegra-avp", "emc", &tegra_clk_emc), + SHARED_CLK("cpu.emc", "cpu", "emc", &tegra_clk_emc), + SHARED_CLK("disp1.emc", "tegradc.0", "emc", &tegra_clk_emc), + SHARED_CLK("disp2.emc", "tegradc.1", "emc", &tegra_clk_emc), + SHARED_CLK("hdmi.emc", "hdmi", "emc", &tegra_clk_emc), }; #define CLK_DUPLICATE(_name, _dev, _con) \ @@ -2012,11 +2073,13 @@ struct clk *tegra_ptr_clks[] = { &tegra_clk_virtual_cpu, &tegra_clk_blink, &tegra_clk_cop, + &tegra_clk_emc, }; static void tegra2_init_one_clock(struct clk *c) { clk_init(c); + INIT_LIST_HEAD(&c->shared_bus_list); if (!c->lookup.dev_id && !c->lookup.con_id) c->lookup.con_id = c->name; c->lookup.clk = c; diff --git a/arch/arm/mach-tegra/tegra2_dvfs.c b/arch/arm/mach-tegra/tegra2_dvfs.c index 265a7b538f7f..b58a7d2ef92d 100644 --- a/arch/arm/mach-tegra/tegra2_dvfs.c +++ b/arch/arm/mach-tegra/tegra2_dvfs.c @@ -20,88 +20,137 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> +#include <linux/module.h> #include "clock.h" #include "dvfs.h" #include "fuse.h" -#define CORE_REGULATOR "vdd_core" -#define CPU_REGULATOR "vdd_cpu" +#ifdef CONFIG_TEGRA_CORE_DVFS +static bool tegra_dvfs_core_disabled; +#else +static bool tegra_dvfs_core_disabled = true; +#endif +#ifdef CONFIG_TEGRA_CPU_DVFS +static bool tegra_dvfs_cpu_disabled; +#else +static bool tegra_dvfs_cpu_disabled = true; +#endif static const int core_millivolts[MAX_DVFS_FREQS] = {950, 1000, 1100, 1200, 1275}; static const int cpu_millivolts[MAX_DVFS_FREQS] = {750, 775, 800, 825, 875, 900, 925, 975, 1000, 1050, 1100}; -static int cpu_core_millivolts[MAX_DVFS_FREQS]; - -#define CORE_MAX_MILLIVOLTS 1275 -#define CPU_MAX_MILLIVOLTS 1100 #define KHZ 1000 #define MHZ 1000000 -#ifdef CONFIG_TEGRA_CPU_DVFS -#define CPU_DVFS_CPU(_clk_name, _process_id, _mult, _freqs...) \ +static struct dvfs_rail tegra2_dvfs_rail_vdd_cpu = { + .reg_id = "vdd_cpu", + .max_millivolts = 1100, + .min_millivolts = 750, + .nominal_millivolts = 1100, +}; + +static struct dvfs_rail tegra2_dvfs_rail_vdd_core = { + .reg_id = "vdd_core", + .max_millivolts = 1275, + .min_millivolts = 950, + .nominal_millivolts = 1200, + .step = 150, /* step vdd_core by 150 mV to allow vdd_aon to follow */ +}; + +static struct dvfs_rail tegra2_dvfs_rail_vdd_aon = { + .reg_id = "vdd_aon", + .max_millivolts = 1275, + .min_millivolts = 950, + .nominal_millivolts = 1200, +#ifndef CONFIG_TEGRA_CORE_DVFS + .disabled = true, +#endif +}; + +/* vdd_core and vdd_aon must be 50 mV higher than vdd_cpu */ +static int tegra2_dvfs_rel_vdd_cpu_vdd_core(struct dvfs_rail *vdd_cpu, + struct dvfs_rail *vdd_core) +{ + if (vdd_cpu->new_millivolts > vdd_cpu->millivolts && + vdd_core->new_millivolts < vdd_cpu->new_millivolts + 50) + return vdd_cpu->new_millivolts + 50; + + if (vdd_core->new_millivolts < vdd_cpu->millivolts + 50) + return vdd_cpu->millivolts + 50; + + return vdd_core->new_millivolts; +} + +/* vdd_aon must be within 170 mV of vdd_core */ +static int tegra2_dvfs_rel_vdd_core_vdd_aon(struct dvfs_rail *vdd_core, + struct dvfs_rail *vdd_aon) +{ + BUG_ON(abs(vdd_aon->millivolts - vdd_core->millivolts) > + vdd_aon->step); + return vdd_core->millivolts; +} + +static struct dvfs_relationship tegra2_dvfs_relationships[] = { + { + /* vdd_core must be 50 mV higher than vdd_cpu */ + .from = &tegra2_dvfs_rail_vdd_cpu, + .to = &tegra2_dvfs_rail_vdd_core, + .solve = tegra2_dvfs_rel_vdd_cpu_vdd_core, + }, + { + /* vdd_aon must be 50 mV higher than vdd_cpu */ + .from = &tegra2_dvfs_rail_vdd_cpu, + .to = &tegra2_dvfs_rail_vdd_aon, + .solve = tegra2_dvfs_rel_vdd_cpu_vdd_core, + }, + { + /* vdd_aon must be within 170 mV of vdd_core */ + .from = &tegra2_dvfs_rail_vdd_core, + .to = &tegra2_dvfs_rail_vdd_aon, + .solve = tegra2_dvfs_rel_vdd_core_vdd_aon, + }, +}; + +static struct dvfs_rail *tegra2_dvfs_rails[] = { + &tegra2_dvfs_rail_vdd_cpu, + &tegra2_dvfs_rail_vdd_core, + &tegra2_dvfs_rail_vdd_aon, +}; + +#define CPU_DVFS(_clk_name, _process_id, _mult, _freqs...) \ { \ .clk_name = _clk_name, \ - .reg_id = CPU_REGULATOR, \ - .cpu = true, \ - .process_id = _process_id, \ + .cpu_process_id = _process_id, \ .freqs = {_freqs}, \ .freqs_mult = _mult, \ + .millivolts = cpu_millivolts, \ .auto_dvfs = true, \ - .max_millivolts = CPU_MAX_MILLIVOLTS \ - }, + .dvfs_rail = &tegra2_dvfs_rail_vdd_cpu, \ + } -#ifdef CONFIG_TEGRA_CORE_DVFS /* CPU_DVFS && CORE_DVFS */ -#define CPU_DVFS_CORE(_clk_name, _process_id, _mult, _freqs...) \ +#define CORE_DVFS(_clk_name, _auto, _mult, _freqs...) \ { \ .clk_name = _clk_name, \ - .reg_id = CORE_REGULATOR, \ - .cpu = false, \ - .process_id = _process_id, \ + .cpu_process_id = -1, \ .freqs = {_freqs}, \ .freqs_mult = _mult, \ - .auto_dvfs = true, \ - .higher = true, \ - .max_millivolts = CORE_MAX_MILLIVOLTS \ - }, -#else /* CPU_DVFS && !CORE_DVFS */ -#define CPU_DVFS_CORE(_clk_name, _process_id, _mult, _freqs...) -#endif -#else /* !CPU_DVFS */ -#define CPU_DVFS_CPU(_clk_name, _process_id, _mult, _freqs...) -#define CPU_DVFS_CORE(_clk_name, _process_id, _mult, _freqs...) -#endif - -#ifdef CONFIG_TEGRA_CORE_DVFS -#define CORE_DVFS(_clk_name, _auto, _mult, _freqs...) \ - { \ - .clk_name = _clk_name, \ - .reg_id = CORE_REGULATOR, \ - .process_id = -1, \ - .freqs = {_freqs}, \ - .freqs_mult = _mult, \ - .auto_dvfs = _auto, \ - .max_millivolts = CORE_MAX_MILLIVOLTS \ - }, -#else -#define CORE_DVFS(_clk_name, _process_id, _mult, _freqs...) -#endif - -#define CPU_DVFS(_clk_name, _process_id, _mult, _freqs...) \ - CPU_DVFS_CORE(_clk_name, _process_id, _mult, _freqs) \ - CPU_DVFS_CPU(_clk_name, _process_id, _mult, _freqs) \ - + .millivolts = core_millivolts, \ + .auto_dvfs = _auto, \ + .dvfs_rail = &tegra2_dvfs_rail_vdd_core, \ + } static struct dvfs dvfs_init[] = { /* Cpu voltages (mV): 750, 775, 800, 825, 875, 900, 925, 975, 1000, 1050, 1100 */ - CPU_DVFS("cpu", 0, MHZ, 314, 314, 314, 456, 456, 608, 608, 760, 817, 912, 1000) - CPU_DVFS("cpu", 1, MHZ, 314, 314, 314, 456, 456, 618, 618, 770, 827, 922, 1000) - CPU_DVFS("cpu", 2, MHZ, 494, 675, 675, 675, 817, 817, 922, 1000) - CPU_DVFS("cpu", 3, MHZ, 730, 760, 845, 845, 1000) + CPU_DVFS("cpu", 0, MHZ, 314, 314, 314, 456, 456, 608, 608, 760, 817, 912, 1000), + CPU_DVFS("cpu", 1, MHZ, 314, 314, 314, 456, 456, 618, 618, 770, 827, 922, 1000), + CPU_DVFS("cpu", 2, MHZ, 494, 675, 675, 675, 817, 817, 922, 1000), + CPU_DVFS("cpu", 3, MHZ, 730, 760, 845, 845, 1000), /* Core voltages (mV): 950, 1000, 1100, 1200, 1275 */ + CORE_DVFS("emc", 1, KHZ, 57000, 333000, 333000, 666000, 666000), #if 0 /* @@ -110,22 +159,22 @@ static struct dvfs dvfs_init[] = { * For now, boards must ensure that the core voltage does not drop * below 1V, or that the sdmmc busses are set to 44 MHz or less. */ - CORE_DVFS("sdmmc1", 1, KHZ, 44000, 52000, 52000, 52000, 52000) - CORE_DVFS("sdmmc2", 1, KHZ, 44000, 52000, 52000, 52000, 52000) - CORE_DVFS("sdmmc3", 1, KHZ, 44000, 52000, 52000, 52000, 52000) - CORE_DVFS("sdmmc4", 1, KHZ, 44000, 52000, 52000, 52000, 52000) + CORE_DVFS("sdmmc1", 1, KHZ, 44000, 52000, 52000, 52000, 52000), + CORE_DVFS("sdmmc2", 1, KHZ, 44000, 52000, 52000, 52000, 52000), + CORE_DVFS("sdmmc3", 1, KHZ, 44000, 52000, 52000, 52000, 52000), + CORE_DVFS("sdmmc4", 1, KHZ, 44000, 52000, 52000, 52000, 52000), #endif - CORE_DVFS("ndflash", 1, KHZ, 130000, 150000, 158000, 164000, 164000) - CORE_DVFS("nor", 1, KHZ, 0, 92000, 92000, 92000, 92000) - CORE_DVFS("ide", 1, KHZ, 0, 0, 100000, 100000, 100000) - CORE_DVFS("mipi", 1, KHZ, 0, 40000, 40000, 40000, 60000) - CORE_DVFS("usbd", 1, KHZ, 0, 0, 480000, 480000, 480000) - CORE_DVFS("usb2", 1, KHZ, 0, 0, 480000, 480000, 480000) - CORE_DVFS("usb3", 1, KHZ, 0, 0, 480000, 480000, 480000) - CORE_DVFS("pcie", 1, KHZ, 0, 0, 0, 250000, 250000) - CORE_DVFS("dsi", 1, KHZ, 100000, 100000, 100000, 500000, 500000) - CORE_DVFS("tvo", 1, KHZ, 0, 0, 0, 250000, 250000) + CORE_DVFS("ndflash", 1, KHZ, 130000, 150000, 158000, 164000, 164000), + CORE_DVFS("nor", 1, KHZ, 0, 92000, 92000, 92000, 92000), + CORE_DVFS("ide", 1, KHZ, 0, 0, 100000, 100000, 100000), + CORE_DVFS("mipi", 1, KHZ, 0, 40000, 40000, 40000, 60000), + CORE_DVFS("usbd", 1, KHZ, 0, 0, 480000, 480000, 480000), + CORE_DVFS("usb2", 1, KHZ, 0, 0, 480000, 480000, 480000), + CORE_DVFS("usb3", 1, KHZ, 0, 0, 480000, 480000, 480000), + CORE_DVFS("pcie", 1, KHZ, 0, 0, 0, 250000, 250000), + CORE_DVFS("dsi", 1, KHZ, 100000, 100000, 100000, 500000, 500000), + CORE_DVFS("tvo", 1, KHZ, 0, 0, 0, 250000, 250000), /* * The clock rate for the display controllers that determines the @@ -133,54 +182,99 @@ static struct dvfs dvfs_init[] = { * to the display block. Disable auto-dvfs on the display clocks, * and let the display driver call tegra_dvfs_set_rate manually */ - CORE_DVFS("disp1", 0, KHZ, 158000, 158000, 190000, 190000, 190000) - CORE_DVFS("disp2", 0, KHZ, 158000, 158000, 190000, 190000, 190000) - CORE_DVFS("hdmi", 0, KHZ, 0, 0, 0, 148500, 148500) + CORE_DVFS("disp1", 0, KHZ, 158000, 158000, 190000, 190000, 190000), + CORE_DVFS("disp2", 0, KHZ, 158000, 158000, 190000, 190000, 190000), + CORE_DVFS("hdmi", 0, KHZ, 0, 0, 0, 148500, 148500), /* * These clocks technically depend on the core process id, * but just use the worst case value for now */ - CORE_DVFS("host1x", 1, KHZ, 104500, 133000, 166000, 166000, 166000) - CORE_DVFS("epp", 1, KHZ, 133000, 171000, 247000, 300000, 300000) - CORE_DVFS("2d", 1, KHZ, 133000, 171000, 247000, 300000, 300000) - CORE_DVFS("3d", 1, KHZ, 114000, 161500, 247000, 300000, 300000) - CORE_DVFS("mpe", 1, KHZ, 104500, 152000, 228000, 250000, 250000) - CORE_DVFS("vi", 1, KHZ, 85000, 100000, 150000, 150000, 150000) - CORE_DVFS("sclk", 1, KHZ, 95000, 133000, 190000, 250000, 250000) - CORE_DVFS("vde", 1, KHZ, 95000, 123500, 209000, 250000, 250000) + CORE_DVFS("host1x", 1, KHZ, 104500, 133000, 166000, 166000, 166000), + CORE_DVFS("epp", 1, KHZ, 133000, 171000, 247000, 300000, 300000), + CORE_DVFS("2d", 1, KHZ, 133000, 171000, 247000, 300000, 300000), + CORE_DVFS("3d", 1, KHZ, 114000, 161500, 247000, 300000, 300000), + CORE_DVFS("mpe", 1, KHZ, 104500, 152000, 228000, 250000, 250000), + CORE_DVFS("vi", 1, KHZ, 85000, 100000, 150000, 150000, 150000), + CORE_DVFS("sclk", 1, KHZ, 95000, 133000, 190000, 250000, 250000), + CORE_DVFS("vde", 1, KHZ, 95000, 123500, 209000, 250000, 250000), /* What is this? */ - CORE_DVFS("NVRM_DEVID_CLK_SRC", 1, MHZ, 480, 600, 800, 1067, 1067) + CORE_DVFS("NVRM_DEVID_CLK_SRC", 1, MHZ, 480, 600, 800, 1067, 1067), }; +int tegra_dvfs_disable_core_set(const char *arg, const struct kernel_param *kp) +{ + int ret; + + ret = param_set_bool(arg, kp); + if (ret) + return ret; + + if (tegra_dvfs_core_disabled) + tegra_dvfs_rail_disable(&tegra2_dvfs_rail_vdd_core); + else + tegra_dvfs_rail_enable(&tegra2_dvfs_rail_vdd_core); + + return 0; +} + +int tegra_dvfs_disable_cpu_set(const char *arg, const struct kernel_param *kp) +{ + int ret; + + ret = param_set_bool(arg, kp); + if (ret) + return ret; + + if (tegra_dvfs_cpu_disabled) + tegra_dvfs_rail_disable(&tegra2_dvfs_rail_vdd_cpu); + else + tegra_dvfs_rail_enable(&tegra2_dvfs_rail_vdd_cpu); + + return 0; +} + +int tegra_dvfs_disable_get(char *buffer, const struct kernel_param *kp) +{ + return param_get_bool(buffer, kp); +} + +static struct kernel_param_ops tegra_dvfs_disable_core_ops = { + .set = tegra_dvfs_disable_core_set, + .get = tegra_dvfs_disable_get, +}; + +static struct kernel_param_ops tegra_dvfs_disable_cpu_ops = { + .set = tegra_dvfs_disable_cpu_set, + .get = tegra_dvfs_disable_get, +}; + +module_param_cb(disable_core, &tegra_dvfs_disable_core_ops, + &tegra_dvfs_core_disabled, 0644); +module_param_cb(disable_cpu, &tegra_dvfs_disable_cpu_ops, + &tegra_dvfs_cpu_disabled, 0644); + void __init tegra2_init_dvfs(void) { int i; struct clk *c; struct dvfs *d; - int process_id; int ret; - int cpu_process_id = tegra_cpu_process_id(); - int core_process_id = tegra_core_process_id(); + tegra_dvfs_init_rails(tegra2_dvfs_rails, ARRAY_SIZE(tegra2_dvfs_rails)); + tegra_dvfs_add_relationships(tegra2_dvfs_relationships, + ARRAY_SIZE(tegra2_dvfs_relationships)); /* * VDD_CORE must always be at least 50 mV higher than VDD_CPU * Fill out cpu_core_millivolts based on cpu_millivolts */ - for (i = 0; i < ARRAY_SIZE(cpu_millivolts); i++) - if (cpu_millivolts[i]) - cpu_core_millivolts[i] = cpu_millivolts[i] + 50; - for (i = 0; i < ARRAY_SIZE(dvfs_init); i++) { d = &dvfs_init[i]; - process_id = d->cpu ? cpu_process_id : core_process_id; - if (d->process_id != -1 && d->process_id != process_id) { - pr_debug("tegra_dvfs: rejected %s %d, process_id %d\n", - d->clk_name, d->process_id, process_id); + if (d->cpu_process_id != -1 && + d->cpu_process_id != cpu_process_id) continue; - } c = tegra_get_clock_by_name(d->clk_name); @@ -190,19 +284,15 @@ void __init tegra2_init_dvfs(void) continue; } - if (d->cpu) - memcpy(d->millivolts, cpu_millivolts, - sizeof(cpu_millivolts)); - else if (!strcmp(d->clk_name, "cpu")) - memcpy(d->millivolts, cpu_core_millivolts, - sizeof(cpu_core_millivolts)); - else - memcpy(d->millivolts, core_millivolts, - sizeof(core_millivolts)); - ret = tegra_enable_dvfs_on_clk(c, d); if (ret) pr_err("tegra_dvfs: failed to enable dvfs on %s\n", c->name); } + + if (tegra_dvfs_core_disabled) + tegra_dvfs_rail_disable(&tegra2_dvfs_rail_vdd_core); + + if (tegra_dvfs_cpu_disabled) + tegra_dvfs_rail_disable(&tegra2_dvfs_rail_vdd_cpu); } diff --git a/arch/arm/mach-tegra/tegra2_emc.c b/arch/arm/mach-tegra/tegra2_emc.c new file mode 100644 index 000000000000..bd4fa27b2086 --- /dev/null +++ b/arch/arm/mach-tegra/tegra2_emc.c @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2010 Google, Inc. + * + * Author: + * Colin Cross <ccross@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> + +#include <mach/iomap.h> + +#include "tegra2_emc.h" + +#ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE +static bool emc_enable = true; +#else +static bool emc_enable; +#endif +module_param(emc_enable, bool, 0644); + +static void __iomem *emc = IO_ADDRESS(TEGRA_EMC_BASE); +static const struct tegra_emc_table *tegra_emc_table; +static int tegra_emc_table_size; + +static inline void emc_writel(u32 val, unsigned long addr) +{ + writel(val, emc + addr); +} + +static inline u32 emc_readl(unsigned long addr) +{ + return readl(emc + addr); +} + +static const unsigned long emc_reg_addr[TEGRA_EMC_NUM_REGS] = { + 0x2c, /* RC */ + 0x30, /* RFC */ + 0x34, /* RAS */ + 0x38, /* RP */ + 0x3c, /* R2W */ + 0x40, /* W2R */ + 0x44, /* R2P */ + 0x48, /* W2P */ + 0x4c, /* RD_RCD */ + 0x50, /* WR_RCD */ + 0x54, /* RRD */ + 0x58, /* REXT */ + 0x5c, /* WDV */ + 0x60, /* QUSE */ + 0x64, /* QRST */ + 0x68, /* QSAFE */ + 0x6c, /* RDV */ + 0x70, /* REFRESH */ + 0x74, /* BURST_REFRESH_NUM */ + 0x78, /* PDEX2WR */ + 0x7c, /* PDEX2RD */ + 0x80, /* PCHG2PDEN */ + 0x84, /* ACT2PDEN */ + 0x88, /* AR2PDEN */ + 0x8c, /* RW2PDEN */ + 0x90, /* TXSR */ + 0x94, /* TCKE */ + 0x98, /* TFAW */ + 0x9c, /* TRPAB */ + 0xa0, /* TCLKSTABLE */ + 0xa4, /* TCLKSTOP */ + 0xa8, /* TREFBW */ + 0xac, /* QUSE_EXTRA */ + 0x114, /* FBIO_CFG6 */ + 0xb0, /* ODT_WRITE */ + 0xb4, /* ODT_READ */ + 0x104, /* FBIO_CFG5 */ + 0x2bc, /* CFG_DIG_DLL */ + 0x2c0, /* DLL_XFORM_DQS */ + 0x2c4, /* DLL_XFORM_QUSE */ + 0x2e0, /* ZCAL_REF_CNT */ + 0x2e4, /* ZCAL_WAIT_CNT */ + 0x2a8, /* AUTO_CAL_INTERVAL */ + 0x2d0, /* CFG_CLKTRIM_0 */ + 0x2d4, /* CFG_CLKTRIM_1 */ + 0x2d8, /* CFG_CLKTRIM_2 */ +}; + +/* Select the closest EMC rate that is higher than the requested rate */ +long tegra_emc_round_rate(unsigned long rate) +{ + int i; + int best = -1; + unsigned long distance = ULONG_MAX; + + if (!tegra_emc_table) + return -EINVAL; + + if (!emc_enable) + return -EINVAL; + + pr_debug("%s: %lu\n", __func__, rate); + + /* The EMC clock rate is twice the bus rate, and the bus rate is + * measured in kHz */ + rate = rate / 2 / 1000; + + for (i = 0; i < tegra_emc_table_size; i++) { + if (tegra_emc_table[i].rate >= rate && + (tegra_emc_table[i].rate - rate) < distance) { + distance = tegra_emc_table[i].rate - rate; + best = i; + } + } + + if (best < 0) + return -EINVAL; + + pr_debug("%s: using %lu\n", __func__, tegra_emc_table[best].rate); + + return tegra_emc_table[best].rate * 2 * 1000; +} + +/* The EMC registers have shadow registers. When the EMC clock is updated + * in the clock controller, the shadow registers are copied to the active + * registers, allowing glitchless memory bus frequency changes. + * This function updates the shadow registers for a new clock frequency, + * and relies on the clock lock on the emc clock to avoid races between + * multiple frequency changes */ +int tegra_emc_set_rate(unsigned long rate) +{ + int i; + int j; + + if (!tegra_emc_table) + return -EINVAL; + + /* The EMC clock rate is twice the bus rate, and the bus rate is + * measured in kHz */ + rate = rate / 2 / 1000; + + for (i = 0; i < tegra_emc_table_size; i++) + if (tegra_emc_table[i].rate == rate) + break; + + if (i >= tegra_emc_table_size) + return -EINVAL; + + pr_debug("%s: setting to %lu\n", __func__, rate); + + for (j = 0; j < TEGRA_EMC_NUM_REGS; j++) + emc_writel(tegra_emc_table[i].regs[j], emc_reg_addr[j]); + + emc_readl(tegra_emc_table[i].regs[TEGRA_EMC_NUM_REGS - 1]); + + return 0; +} + +void tegra_init_emc(const struct tegra_emc_table *table, int table_size) +{ + tegra_emc_table = table; + tegra_emc_table_size = table_size; +} diff --git a/arch/arm/mach-tegra/tegra2_emc.h b/arch/arm/mach-tegra/tegra2_emc.h new file mode 100644 index 000000000000..3515e57fd0d9 --- /dev/null +++ b/arch/arm/mach-tegra/tegra2_emc.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2010 Google, Inc. + * + * Author: + * Colin Cross <ccross@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define TEGRA_EMC_NUM_REGS 46 + +struct tegra_emc_table { + unsigned long rate; + u32 regs[TEGRA_EMC_NUM_REGS]; +}; + +int tegra_emc_set_rate(unsigned long rate); +long tegra_emc_round_rate(unsigned long rate); +void tegra_init_emc(const struct tegra_emc_table *table, int table_size); diff --git a/arch/arm/mach-tegra/tegra_i2s_audio.c b/arch/arm/mach-tegra/tegra_i2s_audio.c index f3ec7b6fb5f1..f3cd2c42278e 100644 --- a/arch/arm/mach-tegra/tegra_i2s_audio.c +++ b/arch/arm/mach-tegra/tegra_i2s_audio.c @@ -695,12 +695,17 @@ static void request_stop_nosync(struct audio_stream *as) pr_debug("%s\n", __func__); if (!as->stop) { as->stop = true; - wait_till_stopped(as); + if (pending_buffer_requests(as)) + wait_till_stopped(as); for (i = 0; i < as->num_bufs; i++) { init_completion(&as->comp[i]); complete(&as->comp[i]); } } + if (!tegra_dma_is_empty(as->dma_chan)) + pr_err("%s: DMA not empty!\n", __func__); + /* Stop the DMA then dequeue anything that's in progress. */ + tegra_dma_cancel(as->dma_chan); as->active = false; /* applies to recording only */ pr_debug("%s: done\n", __func__); } @@ -826,13 +831,9 @@ static void dma_tx_complete_callback(struct tegra_dma_req *req) complete(&aos->comp[req_num]); - if (stop_playback_if_necessary(aos)) { - pr_debug("%s: done (stopped)\n", __func__); - if (!completion_done(&aos->stop_completion)) { - pr_debug("%s: signalling stop completion\n", __func__); - complete(&aos->stop_completion); - } - return; + if (!pending_buffer_requests(aos)) { + pr_debug("%s: Playback underflow\n", __func__); + complete(&aos->stop_completion); } } @@ -851,6 +852,9 @@ static void dma_rx_complete_callback(struct tegra_dma_req *req) complete(&ais->comp[req_num]); + if (!pending_buffer_requests(ais)) + pr_debug("%s: Capture overflow\n", __func__); + spin_unlock_irqrestore(&ais->dma_req_lock, flags); } @@ -1088,6 +1092,8 @@ static long tegra_audio_out_ioctl(struct file *file, request_stop_nosync(aos); pr_debug("%s: flushed\n", __func__); } + if (stop_playback_if_necessary(aos)) + pr_debug("%s: done (stopped)\n", __func__); aos->stop = false; break; case TEGRA_AUDIO_OUT_SET_NUM_BUFS: { @@ -1111,6 +1117,7 @@ static long tegra_audio_out_ioctl(struct file *file, if (rc < 0) break; aos->num_bufs = num; + sound_ops->setup(ads); } break; case TEGRA_AUDIO_OUT_GET_NUM_BUFS: @@ -1265,10 +1272,11 @@ static long tegra_audio_in_ioctl(struct file *file, if (rc < 0) break; ais->num_bufs = num; + sound_ops->setup(ads); } break; case TEGRA_AUDIO_IN_GET_NUM_BUFS: - if (copy_from_user((void __user *)arg, + if (copy_to_user((void __user *)arg, &ais->num_bufs, sizeof(ais->num_bufs))) rc = -EFAULT; break; @@ -1404,6 +1412,8 @@ static int tegra_audio_out_release(struct inode *inode, struct file *file) mutex_lock(&ads->out.lock); ads->out.opened = 0; request_stop_nosync(&ads->out); + if (stop_playback_if_necessary(&ads->out)) + pr_debug("%s: done (stopped)\n", __func__); allow_suspend(&ads->out); mutex_unlock(&ads->out.lock); pr_debug("%s: done\n", __func__); diff --git a/arch/arm/mach-tegra/tegra_spdif_audio.c b/arch/arm/mach-tegra/tegra_spdif_audio.c index 0848b1550dfe..6613d3d5edeb 100644 --- a/arch/arm/mach-tegra/tegra_spdif_audio.c +++ b/arch/arm/mach-tegra/tegra_spdif_audio.c @@ -460,12 +460,17 @@ static void request_stop_nosync(struct audio_stream *as) pr_debug("%s\n", __func__); if (!as->stop) { as->stop = true; - wait_till_stopped(as); + if (pending_buffer_requests(as)) + wait_till_stopped(as); for (i = 0; i < as->num_bufs; i++) { init_completion(&as->comp[i]); complete(&as->comp[i]); } } + if (!tegra_dma_is_empty(as->dma_chan)) + pr_err("%s: DMA not empty!\n", __func__); + /* Stop the DMA then dequeue anything that's in progress. */ + tegra_dma_cancel(as->dma_chan); as->active = false; /* applies to recording only */ pr_debug("%s: done\n", __func__); } @@ -543,13 +548,9 @@ static void dma_tx_complete_callback(struct tegra_dma_req *req) complete(&aos->comp[req_num]); - if (stop_playback_if_necessary(aos)) { - pr_debug("%s: done (stopped)\n", __func__); - if (!completion_done(&aos->stop_completion)) { - pr_debug("%s: signalling stop completion\n", __func__); - complete(&aos->stop_completion); - } - return; + if (!pending_buffer_requests(aos)) { + pr_debug("%s: Playback underflow", __func__); + complete(&aos->stop_completion); } } @@ -729,6 +730,8 @@ static long tegra_spdif_out_ioctl(struct file *file, request_stop_nosync(aos); pr_debug("%s: flushed\n", __func__); } + if (stop_playback_if_necessary(aos)) + pr_debug("%s: done (stopped)\n", __func__); aos->stop = false; break; case TEGRA_AUDIO_OUT_SET_NUM_BUFS: { @@ -752,6 +755,7 @@ static long tegra_spdif_out_ioctl(struct file *file, if (rc < 0) break; aos->num_bufs = num; + sound_ops->setup(ads); } break; case TEGRA_AUDIO_OUT_GET_NUM_BUFS: @@ -806,6 +810,8 @@ static int tegra_spdif_out_release(struct inode *inode, struct file *file) mutex_lock(&ads->out.lock); ads->out.opened = 0; request_stop_nosync(&ads->out); + if (stop_playback_if_necessary(&ads->out)) + pr_debug("%s: done (stopped)\n", __func__); allow_suspend(&ads->out); mutex_unlock(&ads->out.lock); pr_debug("%s: done\n", __func__); diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c index 6069ca20a014..4909c7bb741b 100644 --- a/drivers/cpufreq/cpufreq_interactive.c +++ b/drivers/cpufreq/cpufreq_interactive.c @@ -180,6 +180,7 @@ static void cpufreq_interactive_timer(unsigned long data) u64 now_idle; unsigned int new_freq; unsigned int index; + unsigned long flags; /* * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time, @@ -280,18 +281,18 @@ static void cpufreq_interactive_timer(unsigned long data) if (new_freq < pcpu->target_freq) { pcpu->target_freq = new_freq; - spin_lock(&down_cpumask_lock); + spin_lock_irqsave(&down_cpumask_lock, flags); cpumask_set_cpu(data, &down_cpumask); - spin_unlock(&down_cpumask_lock); + spin_unlock_irqrestore(&down_cpumask_lock, flags); queue_work(down_wq, &freq_scale_down_work); } else { pcpu->target_freq = new_freq; #if DEBUG up_request_time = ktime_to_us(ktime_get()); #endif - spin_lock(&up_cpumask_lock); + spin_lock_irqsave(&up_cpumask_lock, flags); cpumask_set_cpu(data, &up_cpumask); - spin_unlock(&up_cpumask_lock); + spin_unlock_irqrestore(&up_cpumask_lock, flags); wake_up_process(up_task); } @@ -423,6 +424,7 @@ static int cpufreq_interactive_up_task(void *data) { unsigned int cpu; cpumask_t tmp_mask; + unsigned long flags; struct cpufreq_interactive_cpuinfo *pcpu; #if DEBUG @@ -433,16 +435,16 @@ static int cpufreq_interactive_up_task(void *data) while (1) { set_current_state(TASK_INTERRUPTIBLE); - spin_lock(&up_cpumask_lock); + spin_lock_irqsave(&up_cpumask_lock, flags); if (cpumask_empty(&up_cpumask)) { - spin_unlock(&up_cpumask_lock); + spin_unlock_irqrestore(&up_cpumask_lock, flags); schedule(); if (kthread_should_stop()) break; - spin_lock(&up_cpumask_lock); + spin_lock_irqsave(&up_cpumask_lock, flags); } set_current_state(TASK_RUNNING); @@ -461,7 +463,7 @@ static int cpufreq_interactive_up_task(void *data) tmp_mask = up_cpumask; cpumask_clear(&up_cpumask); - spin_unlock(&up_cpumask_lock); + spin_unlock_irqrestore(&up_cpumask_lock, flags); for_each_cpu(cpu, &tmp_mask) { pcpu = &per_cpu(cpuinfo, cpu); @@ -488,12 +490,13 @@ static void cpufreq_interactive_freq_down(struct work_struct *work) { unsigned int cpu; cpumask_t tmp_mask; + unsigned long flags; struct cpufreq_interactive_cpuinfo *pcpu; - spin_lock(&down_cpumask_lock); + spin_lock_irqsave(&down_cpumask_lock, flags); tmp_mask = down_cpumask; cpumask_clear(&down_cpumask); - spin_unlock(&down_cpumask_lock); + spin_unlock_irqrestore(&down_cpumask_lock, flags); for_each_cpu(cpu, &tmp_mask) { pcpu = &per_cpu(cpuinfo, cpu); diff --git a/drivers/media/video/tegra/avp/avp_svc.c b/drivers/media/video/tegra/avp/avp_svc.c index 57cd8019c305..2eed2891e556 100644 --- a/drivers/media/video/tegra/avp/avp_svc.c +++ b/drivers/media/video/tegra/avp/avp_svc.c @@ -82,6 +82,7 @@ struct avp_svc_info { struct avp_clk clks[NUM_CLK_REQUESTS]; /* used for dvfs */ struct clk *sclk; + struct clk *emcclk; struct mutex clk_lock; @@ -352,6 +353,7 @@ static void do_svc_module_clock(struct avp_svc_info *avp_svc, aclk = &avp_svc->clks[mod->clk_req]; if (msg->enable) { if (aclk->refcnt++ == 0) { + clk_enable(avp_svc->emcclk); clk_enable(avp_svc->sclk); clk_enable(aclk->clk); } @@ -362,6 +364,7 @@ static void do_svc_module_clock(struct avp_svc_info *avp_svc, } else if (--aclk->refcnt == 0) { clk_disable(aclk->clk); clk_disable(avp_svc->sclk); + clk_disable(avp_svc->emcclk); } } mutex_unlock(&avp_svc->clk_lock); @@ -631,8 +634,9 @@ void avp_svc_stop(struct avp_svc_info *avp_svc) pr_info("%s: remote left clock '%s' on\n", __func__, aclk->mod->name); clk_disable(aclk->clk); - /* sclk was enabled once for every clock */ + /* sclk/emcclk was enabled once for every clock */ clk_disable(avp_svc->sclk); + clk_disable(avp_svc->emcclk); } aclk->refcnt = 0; } @@ -682,6 +686,21 @@ struct avp_svc_info *avp_svc_init(struct platform_device *pdev, ret = -ENOENT; goto err_get_clks; } + + avp_svc->emcclk = clk_get(&pdev->dev, "emc"); + if (IS_ERR(avp_svc->emcclk)) { + pr_err("avp_svc: Couldn't get emcclk for dvfs\n"); + ret = -ENOENT; + goto err_get_clks; + } + + /* + * The emc is a shared clock, it will be set to the highest + * requested rate from any user. Set the rate to ULONG_MAX to + * always request the max rate whenever this request is enabled + */ + clk_set_rate(avp_svc->emcclk, ULONG_MAX); + avp_svc->rpc_node = rpc_node; mutex_init(&avp_svc->clk_lock); @@ -694,6 +713,8 @@ err_get_clks: clk_put(avp_svc->clks[i].clk); if (!IS_ERR_OR_NULL(avp_svc->sclk)) clk_put(avp_svc->sclk); + if (!IS_ERR_OR_NULL(avp_svc->emcclk)) + clk_put(avp_svc->emcclk); err_alloc: return ERR_PTR(ret); } @@ -705,6 +726,7 @@ void avp_svc_destroy(struct avp_svc_info *avp_svc) for (i = 0; i < NUM_CLK_REQUESTS; i++) clk_put(avp_svc->clks[i].clk); clk_put(avp_svc->sclk); + clk_put(avp_svc->emcclk); kfree(avp_svc); } diff --git a/drivers/net/wireless/bcm4329/dhd_linux.c b/drivers/net/wireless/bcm4329/dhd_linux.c index 472b992751e7..c7ef3edd908f 100644 --- a/drivers/net/wireless/bcm4329/dhd_linux.c +++ b/drivers/net/wireless/bcm4329/dhd_linux.c @@ -106,7 +106,7 @@ int wifi_set_power(int on, unsigned long msec) int wifi_set_reset(int on, unsigned long msec) { - printk("%s = %d\n", __FUNCTION__, on); + DHD_TRACE(("%s = %d\n", __FUNCTION__, on)); if (wifi_control_data && wifi_control_data->set_reset) { wifi_control_data->set_reset(on); } @@ -117,7 +117,7 @@ int wifi_set_reset(int on, unsigned long msec) int wifi_get_mac_addr(unsigned char *buf) { - printk("%s\n", __FUNCTION__); + DHD_TRACE(("%s\n", __FUNCTION__)); if (!buf) return -EINVAL; if (wifi_control_data && wifi_control_data->get_mac_addr) { @@ -1399,22 +1399,24 @@ dhd_watchdog_thread(void *data) /* Run until signal received */ while (1) { if (down_interruptible (&dhd->watchdog_sem) == 0) { - + dhd_os_sdlock(&dhd->pub); if (dhd->pub.dongle_reset == FALSE) { + DHD_TIMER(("%s:\n", __FUNCTION__)); /* Call the bus module watchdog */ dhd_bus_watchdog(&dhd->pub); - } - /* Count the tick for reference */ - dhd->pub.tickcnt++; - /* Reschedule the watchdog */ - if (dhd->wd_timer_valid) - mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000); + /* Count the tick for reference */ + dhd->pub.tickcnt++; + /* Reschedule the watchdog */ + if (dhd->wd_timer_valid) + mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000); + } + dhd_os_sdunlock(&dhd->pub); dhd_os_wake_unlock(&dhd->pub); - } - else + } else { break; + } } complete_and_exit(&dhd->watchdog_exited, 0); @@ -1426,11 +1428,17 @@ dhd_watchdog(ulong data) dhd_info_t *dhd = (dhd_info_t *)data; dhd_os_wake_lock(&dhd->pub); + if (dhd->pub.dongle_reset) { + dhd_os_wake_unlock(&dhd->pub); + return; + } + if (dhd->watchdog_pid >= 0) { up(&dhd->watchdog_sem); return; } + dhd_os_sdlock(&dhd->pub); /* Call the bus module watchdog */ dhd_bus_watchdog(&dhd->pub); @@ -1440,6 +1448,7 @@ dhd_watchdog(ulong data) /* Reschedule the watchdog */ if (dhd->wd_timer_valid) mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000); + dhd_os_sdunlock(&dhd->pub); dhd_os_wake_unlock(&dhd->pub); } @@ -2198,8 +2207,8 @@ dhd_bus_start(dhd_pub_t *dhdp) #if defined(OOB_INTR_ONLY) /* Host registration for OOB interrupt */ if (bcmsdh_register_oob_intr(dhdp)) { - del_timer_sync(&dhd->timer); dhd->wd_timer_valid = FALSE; + del_timer_sync(&dhd->timer); DHD_ERROR(("%s Host failed to resgister for OOB\n", __FUNCTION__)); return -ENODEV; } @@ -2210,8 +2219,8 @@ dhd_bus_start(dhd_pub_t *dhdp) /* If bus is not ready, can't come up */ if (dhd->pub.busstate != DHD_BUS_DATA) { - del_timer_sync(&dhd->timer); dhd->wd_timer_valid = FALSE; + del_timer_sync(&dhd->timer); DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__)); return -ENODEV; } @@ -2377,6 +2386,7 @@ dhd_net_attach(dhd_pub_t *dhdp, int ifidx) dhd->pub.mac.octet[3], dhd->pub.mac.octet[4], dhd->pub.mac.octet[5]); #if defined(CONFIG_WIRELESS_EXT) +#if defined(CONFIG_FIRST_SCAN) #ifdef SOFTAP if (ifidx == 0) /* Don't call for SOFTAP Interface in SOFTAP MODE */ @@ -2384,6 +2394,7 @@ dhd_net_attach(dhd_pub_t *dhdp, int ifidx) #else wl_iw_iscan_set_scan_broadcast_prep(net, 1); #endif /* SOFTAP */ +#endif /* CONFIG_FIRST_SCAN */ #endif /* CONFIG_WIRELESS_EXT */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) @@ -2420,8 +2431,8 @@ dhd_bus_detach(dhd_pub_t *dhdp) #endif /* defined(OOB_INTR_ONLY) */ /* Clear the watchdog timer */ - del_timer_sync(&dhd->timer); dhd->wd_timer_valid = FALSE; + del_timer_sync(&dhd->timer); } } } @@ -2687,29 +2698,28 @@ void dhd_os_wd_timer(void *bus, uint wdtick) { dhd_pub_t *pub = bus; - static uint save_dhd_watchdog_ms = 0; dhd_info_t *dhd = (dhd_info_t *)pub->info; + unsigned long flags; + int del_timer_flag = FALSE; - /* don't start the wd until fw is loaded */ - if (pub->busstate == DHD_BUS_DOWN) - return; + flags = dhd_os_spin_lock(pub); - /* Totally stop the timer */ - if (!wdtick && dhd->wd_timer_valid == TRUE) { - del_timer_sync(&dhd->timer); - dhd->wd_timer_valid = FALSE; - save_dhd_watchdog_ms = wdtick; - return; + /* don't start the wd until fw is loaded */ + if (pub->busstate != DHD_BUS_DOWN) { + if (wdtick) { + dhd_watchdog_ms = (uint)wdtick; + dhd->wd_timer_valid = TRUE; + /* Re arm the timer, at last watchdog period */ + mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000); + } else if (dhd->wd_timer_valid == TRUE) { + /* Totally stop the timer */ + dhd->wd_timer_valid = FALSE; + del_timer_flag = TRUE; + } } - - if (wdtick) { - dhd_watchdog_ms = (uint)wdtick; - - /* Re arm the timer, at last watchdog period */ - mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000); - - dhd->wd_timer_valid = TRUE; - save_dhd_watchdog_ms = wdtick; + dhd_os_spin_unlock(pub, flags); + if (del_timer_flag) { + del_timer_sync(&dhd->timer); } } @@ -2927,20 +2937,12 @@ dhd_dev_reset(struct net_device *dev, uint8 flag) dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - /* Turning off watchdog */ - if (flag) - dhd_os_wd_timer(&dhd->pub, 0); - ret = dhd_bus_devreset(&dhd->pub, flag); if (ret) { DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret)); return ret; } - - /* Turning on watchdog back */ - if (!flag) - dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms); - DHD_ERROR(("%s: WLAN OFF DONE\n", __FUNCTION__)); + DHD_ERROR(("%s: WLAN %s DONE\n", __FUNCTION__, flag ? "OFF" : "ON")); return ret; } diff --git a/drivers/net/wireless/bcm4329/dhd_sdio.c b/drivers/net/wireless/bcm4329/dhd_sdio.c index 7494e3836732..ac46ca91eaeb 100644 --- a/drivers/net/wireless/bcm4329/dhd_sdio.c +++ b/drivers/net/wireless/bcm4329/dhd_sdio.c @@ -438,7 +438,7 @@ static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh, void * regsva, uint16 devid); static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh); static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh); -static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh); +static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, int reset_flag); static uint process_nvram_vars(char *varbuf, uint len); @@ -705,6 +705,7 @@ dhdsdio_sdclk(dhd_bus_t *bus, bool on) static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok) { + int ret = BCME_OK; #ifdef DHD_DEBUG uint oldstate = bus->clkstate; #endif /* DHD_DEBUG */ @@ -717,7 +718,7 @@ dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok) dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); bus->activity = TRUE; } - return BCME_OK; + return ret; } switch (target) { @@ -726,29 +727,32 @@ dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok) if (bus->clkstate == CLK_NONE) dhdsdio_sdclk(bus, TRUE); /* Now request HT Avail on the backplane */ - dhdsdio_htclk(bus, TRUE, pendok); - dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); - bus->activity = TRUE; + ret = dhdsdio_htclk(bus, TRUE, pendok); + if (ret == BCME_OK) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + bus->activity = TRUE; + } break; case CLK_SDONLY: /* Remove HT request, or bring up SD clock */ if (bus->clkstate == CLK_NONE) - dhdsdio_sdclk(bus, TRUE); + ret = dhdsdio_sdclk(bus, TRUE); else if (bus->clkstate == CLK_AVAIL) - dhdsdio_htclk(bus, FALSE, FALSE); + ret = dhdsdio_htclk(bus, FALSE, FALSE); else DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n", bus->clkstate, target)); - dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + if (ret == BCME_OK) + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); break; case CLK_NONE: /* Make sure to remove HT request */ if (bus->clkstate == CLK_AVAIL) - dhdsdio_htclk(bus, FALSE, FALSE); + ret = dhdsdio_htclk(bus, FALSE, FALSE); /* Now remove the SD clock */ - dhdsdio_sdclk(bus, FALSE); + ret = dhdsdio_sdclk(bus, FALSE); dhd_os_wd_timer(bus->dhd, 0); break; } @@ -756,7 +760,7 @@ dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok) DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate)); #endif /* DHD_DEBUG */ - return BCME_OK; + return ret; } int @@ -2782,23 +2786,24 @@ dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) dhd_timeout_t tmo; uint retries = 0; uint8 ready, enable; - int err, ret = 0; + int err, ret = BCME_ERROR; uint8 saveclk; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); ASSERT(bus->dhd); if (!bus->dhd) - return 0; + return BCME_OK; if (enforce_mutex) dhd_os_sdlock(bus->dhd); /* Make sure backplane clock is on, needed to generate F2 interrupt */ - dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); - if (bus->clkstate != CLK_AVAIL) + err = dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if ((err != BCME_OK) || (bus->clkstate != CLK_AVAIL)) { + DHD_ERROR(("%s: Failed to set backplane clock: err %d\n", __FUNCTION__, err)); goto exit; - + } /* Force clocks on backplane to be sure F2 interrupt propagates */ saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); @@ -2873,6 +2878,7 @@ dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) if (dhdp->busstate != DHD_BUS_DATA) dhdsdio_clkctl(bus, CLK_NONE, FALSE); + ret = BCME_OK; exit: if (enforce_mutex) dhd_os_sdunlock(bus->dhd); @@ -4631,8 +4637,6 @@ dhd_bus_watchdog(dhd_pub_t *dhdp) if (bus->sleeping) return FALSE; - dhd_os_sdlock(bus->dhd); - /* Poll period: check device if appropriate. */ if (bus->poll && (++bus->polltick >= bus->pollrate)) { uint32 intstatus = 0; @@ -4702,8 +4706,6 @@ dhd_bus_watchdog(dhd_pub_t *dhdp) } } - dhd_os_sdunlock(bus->dhd); - return bus->ipend; } @@ -5299,7 +5301,7 @@ dhdsdio_release(dhd_bus_t *bus, osl_t *osh) if (bus->dhd) { - dhdsdio_release_dongle(bus, osh); + dhdsdio_release_dongle(bus, osh, TRUE); dhd_detach(bus->dhd); bus->dhd = NULL; @@ -5343,11 +5345,11 @@ dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh) static void -dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh) +dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, int reset_flag) { DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - if (bus->dhd && bus->dhd->dongle_reset) + if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) return; if (bus->sih) { @@ -5797,17 +5799,19 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) if (flag == TRUE) { if (!bus->dhd->dongle_reset) { + dhd_os_sdlock(dhdp); + /* Turning off watchdog */ + dhd_os_wd_timer(dhdp, 0); #if !defined(IGNORE_ETH0_DOWN) /* Force flow control as protection when stop come before ifconfig_down */ dhd_txflowcontrol(bus->dhd, 0, ON); #endif /* !defined(IGNORE_ETH0_DOWN) */ /* Expect app to have torn down any connection before calling */ /* Stop the bus, disable F2 */ - dhd_os_sdlock(dhdp); dhd_bus_stop(bus, FALSE); /* Clean tx/rx buffer pointers, detach from the dongle */ - dhdsdio_release_dongle(bus, bus->dhd->osh); + dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE); bus->dhd->dongle_reset = TRUE; bus->dhd->up = FALSE; @@ -5838,21 +5842,25 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh)) { /* Re-init bus, enable F2 transfer */ - dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE); - + bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE); + if (bcmerror == BCME_OK) { #if defined(OOB_INTR_ONLY) - dhd_enable_oob_intr(bus, TRUE); + dhd_enable_oob_intr(bus, TRUE); #endif /* defined(OOB_INTR_ONLY) */ - - bus->dhd->dongle_reset = FALSE; - bus->dhd->up = TRUE; - + bus->dhd->dongle_reset = FALSE; + bus->dhd->up = TRUE; #if !defined(IGNORE_ETH0_DOWN) - /* Restore flow control */ - dhd_txflowcontrol(bus->dhd, 0, OFF); -#endif + /* Restore flow control */ + dhd_txflowcontrol(bus->dhd, 0, OFF); +#endif + /* Turning on watchdog back */ + dhd_os_wd_timer(dhdp, dhd_watchdog_ms); - DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__)); + DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__)); + } else { + dhd_bus_stop(bus, FALSE); + dhdsdio_release_dongle(bus, bus->dhd->osh, FALSE); + } } else bcmerror = BCME_SDIO_ERROR; } else diff --git a/drivers/net/wireless/bcm4329/wl_iw.c b/drivers/net/wireless/bcm4329/wl_iw.c index e6a8c61aff9d..8adf2d87ed65 100644 --- a/drivers/net/wireless/bcm4329/wl_iw.c +++ b/drivers/net/wireless/bcm4329/wl_iw.c @@ -21,7 +21,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wl_iw.c,v 1.51.4.9.2.6.4.142.4.58 2010/11/18 02:08:30 Exp $ + * $Id: wl_iw.c,v 1.51.4.9.2.6.4.142.4.61 2010/12/03 22:09:41 Exp $ */ @@ -167,9 +167,11 @@ static wlc_ssid_t g_specific_ssid; static wlc_ssid_t g_ssid; static wl_iw_ss_cache_ctrl_t g_ss_cache_ctrl; +#if defined(CONFIG_FIRST_SCAN) static volatile uint g_first_broadcast_scan; static volatile uint g_first_counter_scans; #define MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN 3 +#endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) @@ -190,7 +192,9 @@ static volatile uint g_first_counter_scans; static void wl_iw_free_ss_cache(void); static int wl_iw_run_ss_cache_timer(int kick_off); #endif +#if defined(CONFIG_FIRST_SCAN) int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag); +#endif static int dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len); #define ISCAN_STATE_IDLE 0 #define ISCAN_STATE_SCANING 1 @@ -1532,13 +1536,13 @@ wl_control_wl_start(struct net_device *dev) ret = dhd_dev_reset(dev, 0); + if (ret == BCME_OK) { #if defined(BCMLXSDMMC) - sdioh_start(NULL, 1); + sdioh_start(NULL, 1); #endif - - dhd_dev_init_ioctl(dev); - - g_onoff = G_WLAN_SET_ON; + dhd_dev_init_ioctl(dev); + g_onoff = G_WLAN_SET_ON; + } } WL_TRACE(("Exited %s \n", __FUNCTION__)); @@ -1591,10 +1595,11 @@ wl_iw_control_wl_off( #endif memset(g_scan, 0, G_SCAN_RESULTS); g_scan_specified_ssid = 0; - +#if defined(CONFIG_FIRST_SCAN) g_first_broadcast_scan = BROADCAST_SCAN_FIRST_IDLE; g_first_counter_scans = 0; #endif +#endif #if defined(BCMLXSDMMC) sdioh_stop(NULL); @@ -1624,10 +1629,10 @@ wl_iw_control_wl_on( WL_TRACE(("Enter %s \n", __FUNCTION__)); - if ((ret = wl_control_wl_start(dev)) == BCME_SDIO_ERROR) { + if ((ret = wl_control_wl_start(dev)) != BCME_OK) { WL_ERROR(("%s failed first attemp\n", __FUNCTION__)); - bcm_mdelay(100); - if ((ret = wl_control_wl_start(dev)) == BCME_SDIO_ERROR) { + dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF); + if ((ret = wl_control_wl_start(dev)) != BCME_OK) { WL_ERROR(("%s failed second attemp\n", __FUNCTION__)); net_os_send_hang_message(dev); return ret; @@ -2552,7 +2557,7 @@ wl_iw_set_wap( join_params.ssid.SSID_len = htod32(g_ssid.SSID_len); memcpy(&join_params.params.bssid, awrq->sa_data, ETHER_ADDR_LEN); - WL_TRACE(("%s target_channel=%d\n", __FUNCTION__, g_wl_iw_params.target_channel)); + WL_ASSOC(("%s target_channel=%d\n", __FUNCTION__, g_wl_iw_params.target_channel)); wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, &join_params, &join_params_size); if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size))) { @@ -2561,7 +2566,7 @@ wl_iw_set_wap( } if (g_ssid.SSID_len) { - WL_TRACE(("%s: join SSID=%s BSSID="MACSTR" ch=%d\n", __FUNCTION__, \ + WL_ASSOC(("%s: join SSID=%s BSSID="MACSTR" ch=%d\n", __FUNCTION__, \ g_ssid.SSID, MAC2STR((u8 *)awrq->sa_data), \ g_wl_iw_params.target_channel)); } @@ -2801,9 +2806,10 @@ wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid) params->passive_time = -1; params->home_time = -1; params->channel_num = 0; +#if defined(CONFIG_FIRST_SCAN) if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED) params->passive_time = 30; - +#endif params->nprobes = htod32(params->nprobes); params->active_time = htod32(params->active_time); params->passive_time = htod32(params->passive_time); @@ -2932,7 +2938,7 @@ wl_iw_iscan_get(iscan_info_t *iscan) static void wl_iw_force_specific_scan(iscan_info_t *iscan) { - WL_TRACE(("%s force Specific SCAN for %s\n", __FUNCTION__, g_specific_ssid.SSID)); + WL_SCAN(("%s force Specific SCAN for %s\n", __FUNCTION__, g_specific_ssid.SSID)); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) rtnl_lock(); #endif @@ -2950,9 +2956,11 @@ static void wl_iw_send_scan_complete(iscan_info_t *iscan) memset(&wrqu, 0, sizeof(wrqu)); wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL); +#if defined(CONFIG_FIRST_SCAN) if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED) g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_READY; - WL_TRACE(("Send Event ISCAN complete\n")); +#endif + WL_SCAN(("Send Event ISCAN complete\n")); #endif } @@ -3347,14 +3355,16 @@ wl_iw_set_scan( if (wrqu->data.length == sizeof(struct iw_scan_req)) { if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { struct iw_scan_req *req = (struct iw_scan_req *)extra; +#if defined(CONFIG_FIRST_SCAN) if (g_first_broadcast_scan != BROADCAST_SCAN_FIRST_RESULT_CONSUMED) { - WL_TRACE(("%s Ignoring SC %s first BC is not done = %d\n", \ + WL_ERROR(("%s Ignoring SC %s first BC is not done = %d\n", \ __FUNCTION__, req->essid, \ g_first_broadcast_scan)); return -EBUSY; } +#endif if (g_scan_specified_ssid) { - WL_TRACE(("%s Specific SCAN is not done ignore scan for = %s \n", \ + WL_SCAN(("%s Specific SCAN is not done ignore scan for = %s \n", \ __FUNCTION__, req->essid)); return -EBUSY; } @@ -3372,7 +3382,7 @@ wl_iw_set_scan( #endif if ((error = dev_wlc_ioctl(dev, WLC_SCAN, &g_specific_ssid, sizeof(g_specific_ssid)))) { - WL_TRACE(("#### Set SCAN for %s failed with %d\n", g_specific_ssid.SSID, error)); + WL_SCAN(("Set SCAN for %s failed with %d\n", g_specific_ssid.SSID, error)); g_scan_specified_ssid = 0; return -EBUSY; } @@ -3387,14 +3397,16 @@ wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag) wlc_ssid_t ssid; iscan_info_t *iscan = g_iscan; +#if defined(CONFIG_FIRST_SCAN) if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_IDLE) { g_first_broadcast_scan = BROADCAST_SCAN_FIRST_STARTED; - WL_TRACE(("%s: First Brodcast scan was forced\n", __FUNCTION__)); + WL_SCAN(("%s: First Brodcast scan was forced\n", __FUNCTION__)); } else if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED) { - WL_TRACE(("%s: ignore ISCAN request first BS is not done yet\n", __FUNCTION__)); + WL_SCAN(("%s: ignore ISCAN request first BS is not done yet\n", __FUNCTION__)); return 0; } +#endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) if (flag) @@ -3404,7 +3416,7 @@ wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag) dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &iscan->scan_flag, sizeof(iscan->scan_flag)); wl_iw_set_event_mask(dev); - WL_TRACE(("+++: Set Broadcast ISCAN\n")); + WL_SCAN(("+++: Set Broadcast ISCAN\n")); memset(&ssid, 0, sizeof(ssid)); @@ -3439,7 +3451,7 @@ wl_iw_iscan_set_scan( iscan_info_t *iscan = g_iscan; int ret = 0; - WL_TRACE(("%s: SIOCSIWSCAN : ISCAN\n", dev->name)); + WL_SCAN(("%s: SIOCSIWSCAN : ISCAN\n", dev->name)); #if defined(CSCAN) WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __FUNCTION__)); @@ -3450,19 +3462,19 @@ wl_iw_iscan_set_scan( #if defined(SOFTAP) if (ap_cfg_running) { - WL_TRACE(("\n>%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__)); + WL_SCAN(("\n>%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__)); goto set_scan_end; } #endif if (g_onoff == G_WLAN_SET_OFF) { - WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__)); + WL_SCAN(("%s: driver is not up yet after START\n", __FUNCTION__)); goto set_scan_end; } #ifdef PNO_SUPPORT if (dhd_dev_get_pno_status(dev)) { - WL_ERROR(("%s: Scan called when PNO is active\n", __FUNCTION__)); + WL_SCAN(("%s: Scan called when PNO is active\n", __FUNCTION__)); } #endif @@ -3472,7 +3484,7 @@ wl_iw_iscan_set_scan( } if (g_scan_specified_ssid) { - WL_TRACE(("%s Specific SCAN already running ignoring BC scan\n", \ + WL_SCAN(("%s Specific SCAN already running ignoring BC scan\n", \ __FUNCTION__)); ret = EBUSY; goto set_scan_end; @@ -3498,14 +3510,14 @@ wl_iw_iscan_set_scan( g_scan_specified_ssid = 0; if (iscan->iscan_state == ISCAN_STATE_SCANING) { - WL_TRACE(("%s ISCAN already in progress \n", __FUNCTION__)); + WL_SCAN(("%s ISCAN already in progress \n", __FUNCTION__)); goto set_scan_end; } } } #endif -#if !defined(CSCAN) +#if defined(CONFIG_FIRST_SCAN) && !defined(CSCAN) if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_CONSUMED) { if (++g_first_counter_scans == MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN) { @@ -3809,7 +3821,6 @@ wl_iw_get_scan( return -EINVAL; } - if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) return error; ci.scan_channel = dtoh32(ci.scan_channel); @@ -3979,7 +3990,7 @@ wl_iw_iscan_get_scan( uint buflen_from_user = dwrq->length; #endif - WL_TRACE(("%s %s buflen_from_user %d:\n", dev->name, __FUNCTION__, dwrq->length)); + WL_SCAN(("%s %s buflen_from_user %d:\n", dev->name, __FUNCTION__, dwrq->length)); #if defined(SOFTAP) if (ap_cfg_running) { @@ -3993,11 +4004,13 @@ wl_iw_iscan_get_scan( return -EINVAL; } +#if defined(CONFIG_FIRST_SCAN) if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_READY) { WL_TRACE(("%s %s: first ISCAN results are NOT ready yet \n", \ dev->name, __FUNCTION__)); return -EAGAIN; } +#endif if ((!iscan) || (iscan->sysioc_pid < 0)) { WL_ERROR(("%ssysioc_pid\n", __FUNCTION__)); @@ -4126,7 +4139,9 @@ wl_iw_iscan_get_scan( wl_iw_run_ss_cache_timer(0); wl_iw_run_ss_cache_timer(1); #endif /* CSCAN */ +#if defined(CONFIG_FIRST_SCAN) g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED; +#endif WL_TRACE(("%s return to WE %d bytes APs=%d\n", __FUNCTION__, dwrq->length, counter)); @@ -4849,7 +4864,7 @@ wl_iw_set_encodeext( int error; struct iw_encode_ext *iwe; - WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name)); + WL_WSEC(("%s: SIOCSIWENCODEEXT\n", dev->name)); CHECK_EXTRA_FOR_NULL(extra); @@ -5079,7 +5094,7 @@ wl_iw_get_encodeext( char *extra ) { - WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name)); + WL_WSEC(("%s: SIOCGIWENCODEEXT\n", dev->name)); return 0; } @@ -5097,7 +5112,7 @@ wl_iw_set_wpaauth( int val = 0; wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev); - WL_TRACE(("%s: SIOCSIWAUTH\n", dev->name)); + WL_WSEC(("%s: SIOCSIWAUTH\n", dev->name)); #if defined(SOFTAP) if (ap_cfg_running) { @@ -5109,7 +5124,7 @@ wl_iw_set_wpaauth( paramid = vwrq->flags & IW_AUTH_INDEX; paramval = vwrq->value; - WL_TRACE(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n", + WL_WSEC(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n", dev->name, paramid, paramval)); switch (paramid) { @@ -5125,7 +5140,7 @@ wl_iw_set_wpaauth( #endif else if (paramval & IW_AUTH_WAPI_VERSION_1) val = WPA_AUTH_WAPI; - WL_INFORM(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val)); + WL_WSEC(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val)); if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) return error; break; @@ -5155,24 +5170,28 @@ wl_iw_set_wpaauth( WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming " "we're a WPS enrollee\n", dev->name, __FUNCTION__)); if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) { - WL_WSEC(("Failed to set iovar is_WPS_enrollee\n")); + WL_ERROR(("Failed to set iovar is_WPS_enrollee\n")); return error; } } else if (val) { if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) { - WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n")); + WL_ERROR(("Failed to clear iovar is_WPS_enrollee\n")); return error; } } - if ((error = dev_wlc_intvar_set(dev, "wsec", val))) + if ((error = dev_wlc_intvar_set(dev, "wsec", val))) { + WL_ERROR(("Failed to set 'wsec'iovar\n")); return error; + } break; case IW_AUTH_KEY_MGMT: - if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) + if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) { + WL_ERROR(("Failed to get 'wpa_auth'iovar\n")); return error; + } if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) { if (paramval & IW_AUTH_KEY_MGMT_PSK) @@ -5190,18 +5209,22 @@ wl_iw_set_wpaauth( #endif if (paramval & (IW_AUTH_KEY_MGMT_WAPI_PSK | IW_AUTH_KEY_MGMT_WAPI_CERT)) val = WPA_AUTH_WAPI; - WL_INFORM(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val)); - if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) + WL_WSEC(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val)); + if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) { + WL_ERROR(("Failed to set 'wpa_auth'iovar\n")); return error; + } break; case IW_AUTH_TKIP_COUNTERMEASURES: - dev_wlc_bufvar_set(dev, "tkip_countermeasures", (char *)¶mval, 1); + if ((error = dev_wlc_bufvar_set(dev, "tkip_countermeasures", \ + (char *)¶mval, sizeof(paramval)))) + WL_WSEC(("%s: tkip_countermeasures failed %d\n", __FUNCTION__, error)); break; case IW_AUTH_80211_AUTH_ALG: - WL_INFORM(("Setting the D11auth %d\n", paramval)); + WL_WSEC(("Setting the D11auth %d\n", paramval)); if (paramval == IW_AUTH_ALG_OPEN_SYSTEM) val = 0; else if (paramval == IW_AUTH_ALG_SHARED_KEY) @@ -5218,15 +5241,21 @@ wl_iw_set_wpaauth( if (paramval == 0) { iw->pwsec = 0; iw->gwsec = 0; - if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) + if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) { + WL_ERROR(("Failed to get 'wsec'iovar\n")); return error; + } if (val & (TKIP_ENABLED | AES_ENABLED)) { val &= ~(TKIP_ENABLED | AES_ENABLED); dev_wlc_intvar_set(dev, "wsec", val); } val = 0; - WL_INFORM(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val)); - dev_wlc_intvar_set(dev, "wpa_auth", 0); + + WL_INFORM(("%s: %d: setting wpa_auth to %d\n", + __FUNCTION__, __LINE__, val)); + error = dev_wlc_intvar_set(dev, "wpa_auth", 0); + if (error) + WL_ERROR(("Failed to set 'wpa_auth'iovar\n")); return error; } @@ -5234,11 +5263,17 @@ wl_iw_set_wpaauth( break; case IW_AUTH_DROP_UNENCRYPTED: - dev_wlc_bufvar_set(dev, "wsec_restrict", (char *)¶mval, 1); + error = dev_wlc_bufvar_set(dev, "wsec_restrict", \ + (char *)¶mval, sizeof(paramval)); + if (error) + WL_ERROR(("%s: wsec_restrict %d\n", __FUNCTION__, error)); break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: - dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", (char *)¶mval, 1); + error = dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", \ + (char *)¶mval, sizeof(paramval)); + if (error) + WL_WSEC(("%s: rx_unencrypted_eapol %d\n", __FUNCTION__, error)); break; #if WIRELESS_EXT > 17 @@ -5367,15 +5402,24 @@ wl_iw_get_wpaauth( break; case IW_AUTH_TKIP_COUNTERMEASURES: - dev_wlc_bufvar_get(dev, "tkip_countermeasures", (char *)¶mval, 1); + error = dev_wlc_bufvar_get(dev, "tkip_countermeasures", \ + (char *)¶mval, sizeof(paramval)); + if (error) + WL_ERROR(("%s get tkip_countermeasures %d\n", __FUNCTION__, error)); break; case IW_AUTH_DROP_UNENCRYPTED: - dev_wlc_bufvar_get(dev, "wsec_restrict", (char *)¶mval, 1); + error = dev_wlc_bufvar_get(dev, "wsec_restrict", \ + (char *)¶mval, sizeof(paramval)); + if (error) + WL_ERROR(("%s get wsec_restrict %d\n", __FUNCTION__, error)); break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: - dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", (char *)¶mval, 1); + error = dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", \ + (char *)¶mval, sizeof(paramval)); + if (error) + WL_ERROR(("%s get rx_unencrypted_eapol %d\n", __FUNCTION__, error)); break; case IW_AUTH_80211_AUTH_ALG: @@ -5604,7 +5648,7 @@ wl_iw_combined_scan_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nss int i; iscan_info_t *iscan = g_iscan; - WL_TRACE(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, nchan)); + WL_SCAN(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, nchan)); if ((!dev) && (!g_iscan) && (!iscan->iscan_ex_params_p)) { WL_ERROR(("%s error exit\n", __FUNCTION__)); @@ -5975,6 +6019,7 @@ wl_iw_set_cscan( goto exit_proc; } +#if defined(CONFIG_FIRST_SCAN) if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_CONSUMED) { if (++g_first_counter_scans == MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN) { @@ -5989,6 +6034,7 @@ wl_iw_set_cscan( goto exit_proc; } } +#endif res = wl_iw_combined_scan_set(dev, ssids_local, nssid, nchan); @@ -6842,18 +6888,18 @@ set_ap_mac_list(struct net_device *dev, void *buf) int ioc_res = 0; ap_macmode = mac_list_set->mode; - if (mac_mode == MACLIST_MODE_DISABLED) { + bzero(&ap_black_list, sizeof(struct mflist)); - bzero(&ap_black_list, sizeof(struct mflist)); + if (mac_mode == MACLIST_MODE_DISABLED) { ioc_res = dev_wlc_ioctl(dev, WLC_SET_MACMODE, &mac_mode, sizeof(mac_mode)); + check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__); WL_SOFTAP(("%s: MAC filtering disabled\n", __FUNCTION__)); } else { scb_val_t scbval; char mac_buf[256] = {0}; struct maclist *assoc_maclist = (struct maclist *) mac_buf; - bool deny_if_matched = (mac_mode == MACLIST_MODE_DENY); bcopy(maclist, &ap_black_list, sizeof(ap_black_list)); @@ -6877,21 +6923,27 @@ set_ap_mac_list(struct net_device *dev, void *buf) check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__); WL_SOFTAP((" Cur assoc clients:%d\n", assoc_maclist->count)); - if (assoc_maclist->count) { - int j; - + if (assoc_maclist->count) for (i = 0; i < assoc_maclist->count; i++) { + int j; + bool assoc_mac_matched = false; - WL_SOFTAP(("\ncheking assoc STA:")); - print_buf(&assoc_maclist->ea[i], 6, 0); - - for (j = 0; j < maclist->count; j++) { + WL_SOFTAP(("\n Cheking assoc STA: ")); + print_buf(&assoc_maclist->ea[i], 6, 7); + WL_SOFTAP(("with the b/w list:")); - if (!bcmp(&assoc_maclist->ea[i], &maclist->ea[j], \ + for (j = 0; j < maclist->count; j++) + if (!bcmp(&assoc_maclist->ea[i], &maclist->ea[j], ETHER_ADDR_LEN)) { - if (deny_if_matched) { - WL_SOFTAP(("black match," + assoc_mac_matched = true; + break; + } + + if (((mac_mode == MACLIST_MODE_ALLOW) && !assoc_mac_matched) || + ((mac_mode == MACLIST_MODE_DENY) && assoc_mac_matched)) { + + WL_SOFTAP(("b-match or w-mismatch," " do deauth/disassoc \n")); scbval.val = htod32(1); bcopy(&assoc_maclist->ea[i], &scbval.ea, \ @@ -6902,13 +6954,10 @@ set_ap_mac_list(struct net_device *dev, void *buf) check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__); - } else { - WL_SOFTAP(("white match, let it be\n")); - } - break; - } + + } else { + WL_SOFTAP((" no b/w list hits, let it be\n")); } - } } else { WL_SOFTAP(("No ASSOC CLIENTS\n")); } @@ -8182,9 +8231,11 @@ int wl_iw_attach(struct net_device *dev, void *dhdp) g_iscan = iscan; iscan->dev = dev; iscan->iscan_state = ISCAN_STATE_IDLE; +#if defined(CONFIG_FIRST_SCAN) g_first_broadcast_scan = BROADCAST_SCAN_FIRST_IDLE; g_first_counter_scans = 0; g_iscan->scan_flag = 0; +#endif iscan->timer_ms = 8000; init_timer(&iscan->timer); diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c index 51237fbb1bbb..6d20b0454a1d 100644 --- a/drivers/regulator/tps6586x-regulator.c +++ b/drivers/regulator/tps6586x-regulator.c @@ -231,8 +231,7 @@ static int tps6586x_dvm_voltages[] = { }; #define TPS6586X_REGULATOR(_id, vdata, _ops, vreg, shift, nbits, \ - ereg0, ebit0, ereg1, ebit1, goreg, gobit) \ -{ \ + ereg0, ebit0, ereg1, ebit1) \ .desc = { \ .name = "REG-" #_id, \ .ops = &tps6586x_regulator_##_ops, \ @@ -248,18 +247,26 @@ static int tps6586x_dvm_voltages[] = { .enable_bit[0] = (ebit0), \ .enable_reg[1] = TPS6586X_SUPPLY##ereg1, \ .enable_bit[1] = (ebit1), \ - .voltages = tps6586x_##vdata##_voltages, \ -} + .voltages = tps6586x_##vdata##_voltages, + +#define TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \ + .go_reg = TPS6586X_##goreg, \ + .go_bit = (gobit), #define TPS6586X_LDO(_id, vdata, vreg, shift, nbits, \ ereg0, ebit0, ereg1, ebit1) \ +{ \ TPS6586X_REGULATOR(_id, vdata, ldo_ops, vreg, shift, nbits, \ - ereg0, ebit0, ereg1, ebit1, 0, 0) + ereg0, ebit0, ereg1, ebit1) \ +} #define TPS6586X_DVM(_id, vdata, vreg, shift, nbits, \ ereg0, ebit0, ereg1, ebit1, goreg, gobit) \ +{ \ TPS6586X_REGULATOR(_id, vdata, dvm_ops, vreg, shift, nbits, \ - ereg0, ebit0, ereg1, ebit1, goreg, gobit) + ereg0, ebit0, ereg1, ebit1) \ + TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \ +} static struct tps6586x_regulator tps6586x_regulator[] = { TPS6586X_LDO(LDO_0, ldo, SUPPLYV1, 5, 3, ENC, 0, END, 0), @@ -267,11 +274,11 @@ static struct tps6586x_regulator tps6586x_regulator[] = { TPS6586X_LDO(LDO_5, ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6), TPS6586X_LDO(LDO_6, ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4), TPS6586X_LDO(LDO_7, ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5), - TPS6586X_LDO(LDO_8, ldo, SUPPLYV1, 5, 3, ENC, 6, END, 6), + TPS6586X_LDO(LDO_8, ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6), TPS6586X_LDO(LDO_9, ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7), - TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, ENE, 7, ENE, 7), + TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7), TPS6586X_LDO(LDO_1, dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1), - TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 1, END, 1), + TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7), TPS6586X_DVM(LDO_2, dvm, LDO2BV1, 0, 5, ENA, 3, ENB, 3, VCC2, 6), TPS6586X_DVM(LDO_4, ldo4, LDO4V1, 0, 5, ENC, 3, END, 3, VCC1, 6), @@ -290,6 +297,10 @@ static inline int tps6586x_regulator_preinit(struct device *parent, uint8_t val1, val2; int ret; + if (ri->enable_reg[0] == ri->enable_reg[1] && + ri->enable_bit[0] == ri->enable_bit[1]) + return 0; + ret = tps6586x_read(parent, ri->enable_reg[0], &val1); if (ret) return ret; @@ -298,14 +309,14 @@ static inline int tps6586x_regulator_preinit(struct device *parent, if (ret) return ret; - if (!(val2 & ri->enable_bit[1])) + if (!(val2 & (1 << ri->enable_bit[1]))) return 0; /* * The regulator is on, but it's enabled with the bit we don't * want to use, so we switch the enable bits */ - if (!(val1 & ri->enable_bit[0])) { + if (!(val1 & (1 << ri->enable_bit[0]))) { ret = tps6586x_set_bits(parent, ri->enable_reg[0], 1 << ri->enable_bit[0]); if (ret) diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c index ed4f5739cb9c..d9d4d2fd5998 100644 --- a/drivers/usb/gadget/android.c +++ b/drivers/usb/gadget/android.c @@ -108,7 +108,7 @@ static struct usb_device_descriptor device_desc = { }; static struct list_head _functions = LIST_HEAD_INIT(_functions); -static int _registered_function_count = 0; +static bool _are_functions_bound; static struct android_usb_function *get_function(const char *name) { @@ -120,6 +120,50 @@ static struct android_usb_function *get_function(const char *name) return 0; } +static bool are_functions_registered(struct android_dev *dev) +{ + char **functions = dev->functions; + int i; + + /* Look only for functions required by the board config */ + for (i = 0; i < dev->num_functions; i++) { + char *name = *functions++; + bool is_match = false; + /* Could reuse get_function() here, but a reverse search + * should yield less comparisons overall */ + struct android_usb_function *f; + list_for_each_entry_reverse(f, &_functions, list) { + if (!strcmp(name, f->name)) { + is_match = true; + break; + } + } + if (is_match) + continue; + else + return false; + } + + return true; +} + +static bool should_bind_functions(struct android_dev *dev) +{ + /* Don't waste time if the main driver hasn't bound */ + if (!dev->config) + return false; + + /* Don't waste time if we've already bound the functions */ + if (_are_functions_bound) + return false; + + /* This call is the most costly, so call it last */ + if (!are_functions_registered(dev)) + return false; + + return true; +} + static void bind_functions(struct android_dev *dev) { struct android_usb_function *f; @@ -134,6 +178,8 @@ static void bind_functions(struct android_dev *dev) else printk(KERN_ERR "function %s not found in bind_functions\n", name); } + + _are_functions_bound = true; } static int android_bind_config(struct usb_configuration *c) @@ -143,8 +189,7 @@ static int android_bind_config(struct usb_configuration *c) printk(KERN_DEBUG "android_bind_config\n"); dev->config = c; - /* bind our functions if they have all registered */ - if (_registered_function_count == dev->num_functions) + if (should_bind_functions(dev)) bind_functions(dev); return 0; @@ -188,7 +233,13 @@ static int product_has_function(struct android_usb_product *p, int i; for (i = 0; i < count; i++) { - if (!strcmp(name, *functions++)) + /* For functions with multiple instances, usb_function.name + * will have an index appended to the core name (ex: acm0), + * while android_usb_product.functions[i] will only have the + * core name (ex: acm). So, only compare up to the length of + * android_usb_product.functions[i]. + */ + if (!strncmp(name, functions[i], strlen(functions[i]))) return 1; } return 0; @@ -295,12 +346,8 @@ void android_register_function(struct android_usb_function *f) printk(KERN_INFO "android_register_function %s\n", f->name); list_add_tail(&f->list, &_functions); - _registered_function_count++; - /* bind our functions if they have all registered - * and the main driver has bound. - */ - if (dev && dev->config && _registered_function_count == dev->num_functions) + if (dev && should_bind_functions(dev)) bind_functions(dev); } diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c index e3e5410d922f..2d7fdcce310d 100644 --- a/drivers/usb/gadget/f_acm.c +++ b/drivers/usb/gadget/f_acm.c @@ -698,6 +698,7 @@ acm_unbind(struct usb_configuration *c, struct usb_function *f) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); gs_free_req(acm->notify, acm->notify_req); + kfree(acm->port.func.name); kfree(acm); } @@ -769,7 +770,11 @@ int acm_bind_config(struct usb_configuration *c, u8 port_num) acm->port.disconnect = acm_disconnect; acm->port.send_break = acm_send_break; - acm->port.func.name = "acm"; + acm->port.func.name = kasprintf(GFP_KERNEL, "acm%u", port_num); + if (!acm->port.func.name) { + kfree(acm); + return -ENOMEM; + } acm->port.func.strings = acm_strings; /* descriptors are per-instance copies */ acm->port.func.bind = acm_bind; @@ -785,12 +790,38 @@ int acm_bind_config(struct usb_configuration *c, u8 port_num) } #ifdef CONFIG_USB_ANDROID_ACM +#include <linux/platform_device.h> + +static struct acm_platform_data *acm_pdata; + +static int acm_probe(struct platform_device *pdev) +{ + acm_pdata = pdev->dev.platform_data; + return 0; +} + +static struct platform_driver acm_platform_driver = { + .driver = { .name = "acm", }, + .probe = acm_probe, +}; int acm_function_bind_config(struct usb_configuration *c) { - int ret = acm_bind_config(c, 0); - if (ret == 0) - gserial_setup(c->cdev->gadget, 1); + int i; + u8 num_inst = acm_pdata ? acm_pdata->num_inst : 1; + int ret = gserial_setup(c->cdev->gadget, num_inst); + + if (ret) + return ret; + + for (i = 0; i < num_inst; i++) { + ret = acm_bind_config(c, i); + if (ret) { + pr_err("Could not bind acm%u config\n", i); + break; + } + } + return ret; } @@ -802,6 +833,7 @@ static struct android_usb_function acm_function = { static int __init init(void) { printk(KERN_INFO "f_acm init\n"); + platform_driver_register(&acm_platform_driver); android_register_function(&acm_function); return 0; } diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c index 8964e58de2ba..6d522e0d529b 100644 --- a/drivers/usb/gadget/fsl_udc_core.c +++ b/drivers/usb/gadget/fsl_udc_core.c @@ -850,9 +850,11 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep); struct fsl_req *req = container_of(_req, struct fsl_req, req); - struct fsl_udc *udc; + struct fsl_udc *udc = ep->udc; unsigned long flags; + enum dma_data_direction dir; int is_iso = 0; + int status; /* catch various bogus parameters */ if (!_req || !req->req.complete || !req->req.buf @@ -860,17 +862,27 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) VDBG("%s, bad params", __func__); return -EINVAL; } - if (unlikely(!_ep || !ep->desc)) { + + spin_lock_irqsave(&udc->lock, flags); + + if (unlikely(!ep->desc)) { VDBG("%s, bad ep", __func__); + spin_unlock_irqrestore(&udc->lock, flags); return -EINVAL; } + if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { - if (req->req.length > ep->ep.maxpacket) + if (req->req.length > ep->ep.maxpacket) { + spin_unlock_irqrestore(&udc->lock, flags); return -EMSGSIZE; + } is_iso = 1; } - udc = ep->udc; + dir = ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + + spin_unlock_irqrestore(&udc->lock, flags); + if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; @@ -878,18 +890,12 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) /* map virtual address to hardware */ if (req->req.dma == DMA_ADDR_INVALID) { - req->req.dma = dma_map_single(ep->udc->gadget.dev.parent, - req->req.buf, - req->req.length, ep_is_in(ep) - ? DMA_TO_DEVICE - : DMA_FROM_DEVICE); + req->req.dma = dma_map_single(udc->gadget.dev.parent, + req->req.buf, req->req.length, dir); req->mapped = 1; } else { - dma_sync_single_for_device(ep->udc->gadget.dev.parent, - req->req.dma, req->req.length, - ep_is_in(ep) - ? DMA_TO_DEVICE - : DMA_FROM_DEVICE); + dma_sync_single_for_device(udc->gadget.dev.parent, + req->req.dma, req->req.length, dir); req->mapped = 0; } @@ -899,10 +905,19 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) /* build dtds and push them to device queue */ - if (fsl_req_to_dtd(req, gfp_flags)) - return -ENOMEM; + status = fsl_req_to_dtd(req, gfp_flags); + if (status) + goto err_unmap; spin_lock_irqsave(&udc->lock, flags); + + /* re-check if the ep has not been disabled */ + if (unlikely(!ep->desc)) { + spin_unlock_irqrestore(&udc->lock, flags); + status = -EINVAL; + goto err_unmap; + } + fsl_queue_td(ep, req); /* Update ep0 state */ @@ -915,6 +930,15 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) spin_unlock_irqrestore(&udc->lock, flags); return 0; + +err_unmap: + if (req->mapped) { + dma_unmap_single(udc->gadget.dev.parent, + req->req.dma, req->req.length, dir); + req->req.dma = DMA_ADDR_INVALID; + req->mapped = 0; + } + return status; } /* dequeues (cancels, unlinks) an I/O request from an endpoint */ diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c index 01e5354a4c20..fdc934523c0f 100644 --- a/drivers/usb/gadget/u_serial.c +++ b/drivers/usb/gadget/u_serial.c @@ -118,7 +118,7 @@ struct gs_port { }; /* increase N_PORTS if you need more */ -#define N_PORTS 4 +#define N_PORTS 8 static struct portmaster { struct mutex lock; /* protect open/close */ struct gs_port *port; diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c index a7754412202c..a86b4110565d 100644 --- a/drivers/usb/host/ehci-tegra.c +++ b/drivers/usb/host/ehci-tegra.c @@ -286,6 +286,18 @@ static int tegra_usb_resume(struct usb_hcd *hcd) writel(val, &hw->port_status[0]); udelay(10); + /* Program the field PTC in PORTSC based on the saved speed mode */ + val = readl(&hw->port_status[0]); + val &= ~(TEGRA_USB_PORTSC1_PTC(~0)); + if (context->port_speed == TEGRA_USB_PHY_PORT_HIGH) + val |= TEGRA_USB_PORTSC1_PTC(5); + else if (context->port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL) + val |= TEGRA_USB_PORTSC1_PTC(6); + else if (context->port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW) + val |= TEGRA_USB_PORTSC1_PTC(7); + writel(val, &hw->port_status[0]); + udelay(10); + /* Disable test mode by setting PTC field to NORMAL_OP */ val = readl(&hw->port_status[0]); val &= ~(TEGRA_USB_PORTSC1_PTC(~0)); diff --git a/drivers/video/tegra/dc/dc.c b/drivers/video/tegra/dc/dc.c index 2427f6098ae2..3c3a4754b7dc 100644 --- a/drivers/video/tegra/dc/dc.c +++ b/drivers/video/tegra/dc/dc.c @@ -490,7 +490,7 @@ int tegra_dc_update_windows(struct tegra_dc_win *windows[], int n) if (no_vsync) tegra_dc_writel(dc, WRITE_MUX_ACTIVE | READ_MUX_ACTIVE, DC_CMD_STATE_ACCESS); else - tegra_dc_writel(dc, WRITE_MUX_ASSEMBLY | WRITE_MUX_ASSEMBLY, DC_CMD_STATE_ACCESS); + tegra_dc_writel(dc, WRITE_MUX_ASSEMBLY | READ_MUX_ASSEMBLY, DC_CMD_STATE_ACCESS); for (i = 0; i < n; i++) { struct tegra_dc_win *win = windows[i]; @@ -823,7 +823,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *ptr) val = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); for (i = 0; i < DC_N_WINDOWS; i++) { - if (!(val & (WIN_A_ACT_REQ << i))) { + if (!(val & (WIN_A_UPDATE << i))) { dc->windows[i].dirty = 0; completed = 1; } else { @@ -967,6 +967,7 @@ static bool _tegra_dc_enable(struct tegra_dc *dc) tegra_dc_setup_clk(dc, dc->clk); clk_enable(dc->clk); + clk_enable(dc->emc_clk); enable_irq(dc->irq); tegra_dc_init(dc); @@ -997,6 +998,7 @@ static void _tegra_dc_disable(struct tegra_dc *dc) if (dc->out_ops && dc->out_ops->disable) dc->out_ops->disable(dc); + clk_disable(dc->emc_clk); clk_disable(dc->clk); tegra_dvfs_set_rate(dc->clk, 0); @@ -1029,6 +1031,7 @@ static int tegra_dc_probe(struct nvhost_device *ndev) { struct tegra_dc *dc; struct clk *clk; + struct clk *emc_clk; struct resource *res; struct resource *base_res; struct resource *fb_mem = NULL; @@ -1085,7 +1088,22 @@ static int tegra_dc_probe(struct nvhost_device *ndev) goto err_iounmap_reg; } + emc_clk = clk_get(&ndev->dev, "emc"); + if (IS_ERR_OR_NULL(emc_clk)) { + dev_err(&ndev->dev, "can't get emc clock\n"); + ret = -ENOENT; + goto err_put_clk; + } + + /* + * The emc is a shared clock, it will be set to the highest + * requested rate from any user. Set the rate to ULONG_MAX to + * always request the max rate whenever this request is enabled + */ + clk_set_rate(emc_clk, ULONG_MAX); + dc->clk = clk; + dc->emc_clk = emc_clk; dc->base_res = base_res; dc->base = base; dc->irq = irq; @@ -1108,7 +1126,7 @@ static int tegra_dc_probe(struct nvhost_device *ndev) dev_name(&ndev->dev), dc)) { dev_err(&ndev->dev, "request_irq %d failed\n", irq); ret = -EBUSY; - goto err_put_clk; + goto err_put_emc_clk; } /* hack to ballence enable_irq calls in _tegra_dc_enable() */ @@ -1158,6 +1176,8 @@ static int tegra_dc_probe(struct nvhost_device *ndev) err_free_irq: free_irq(irq, dc); +err_put_emc_clk: + clk_put(emc_clk); err_put_clk: clk_put(clk); err_iounmap_reg: @@ -1187,6 +1207,7 @@ static int tegra_dc_remove(struct nvhost_device *ndev) _tegra_dc_disable(dc); free_irq(dc->irq, dc); + clk_put(dc->emc_clk); clk_put(dc->clk); iounmap(dc->base); if (dc->fb_mem) diff --git a/drivers/video/tegra/dc/dc_priv.h b/drivers/video/tegra/dc/dc_priv.h index 7d0e340a6ee2..253d03f057d7 100644 --- a/drivers/video/tegra/dc/dc_priv.h +++ b/drivers/video/tegra/dc/dc_priv.h @@ -60,6 +60,7 @@ struct tegra_dc { int irq; struct clk *clk; + struct clk *emc_clk; bool enabled; diff --git a/drivers/video/tegra/dc/dc_reg.h b/drivers/video/tegra/dc/dc_reg.h index 5ae3cc4c1dec..f643ec9ec742 100644 --- a/drivers/video/tegra/dc/dc_reg.h +++ b/drivers/video/tegra/dc/dc_reg.h @@ -89,6 +89,10 @@ #define WIN_A_ACT_REQ (1 << 1) #define WIN_B_ACT_REQ (1 << 2) #define WIN_C_ACT_REQ (1 << 3) +#define GENERAL_UPDATE (1 << 8) +#define WIN_A_UPDATE (1 << 9) +#define WIN_B_UPDATE (1 << 10) +#define WIN_C_UPDATE (1 << 11) #define DC_CMD_DISPLAY_WINDOW_HEADER 0x042 #define WINDOW_A_SELECT (1 << 4) diff --git a/drivers/video/tegra/host/nvhost_channel.c b/drivers/video/tegra/host/nvhost_channel.c index e7bbcc50043c..949e67ffb653 100644 --- a/drivers/video/tegra/host/nvhost_channel.c +++ b/drivers/video/tegra/host/nvhost_channel.c @@ -231,12 +231,12 @@ static void power_3d(struct nvhost_module *mod, enum nvhost_power_action action) NVSYNCPT_3D, syncval); nvhost_intr_add_action(&ch->dev->intr, NVSYNCPT_3D, - syncval, - NVHOST_INTR_ACTION_WAKEUP, - &wq, &ref); + syncval, + NVHOST_INTR_ACTION_WAKEUP, + &wq, &ref); wait_event(wq, - nvhost_syncpt_min_cmp(&ch->dev->syncpt, - NVSYNCPT_3D, syncval)); + nvhost_syncpt_min_cmp(&ch->dev->syncpt, + NVSYNCPT_3D, syncval)); nvhost_intr_put_ref(&ch->dev->intr, ref); nvhost_cdma_update(&ch->cdma); } diff --git a/drivers/video/tegra/host/nvhost_intr.c b/drivers/video/tegra/host/nvhost_intr.c index c08f33634aee..977b8567cdfd 100644 --- a/drivers/video/tegra/host/nvhost_intr.c +++ b/drivers/video/tegra/host/nvhost_intr.c @@ -429,7 +429,7 @@ void nvhost_intr_deinit(struct nvhost_intr *intr) } } - if (!list_empty(&syncpt->wait_head)) { /* output diagnostics */ + if(!list_empty(&syncpt->wait_head)) { // output diagnostics printk("%s id=%d\n",__func__,id); BUG_ON(1); } diff --git a/drivers/video/tegra/nvmap/nvmap_dev.c b/drivers/video/tegra/nvmap/nvmap_dev.c index a899bb4ef1b9..1961c714efe5 100644 --- a/drivers/video/tegra/nvmap/nvmap_dev.c +++ b/drivers/video/tegra/nvmap/nvmap_dev.c @@ -50,7 +50,7 @@ struct nvmap_carveout_node { struct nvmap_heap *carveout; int index; struct list_head clients; - struct mutex clients_mutex; + spinlock_t clients_lock; }; struct nvmap_device { @@ -277,8 +277,10 @@ void nvmap_carveout_commit_add(struct nvmap_client *client, struct nvmap_carveout_node *node, size_t len) { - mutex_lock(&node->clients_mutex); + unsigned long flags; + nvmap_ref_lock(client); + spin_lock_irqsave(&node->clients_lock, flags); BUG_ON(list_empty(&client->carveout_commit[node->index].list) && client->carveout_commit[node->index].commit != 0); @@ -289,23 +291,26 @@ void nvmap_carveout_commit_add(struct nvmap_client *client, list_add(&client->carveout_commit[node->index].list, &node->clients); } - mutex_unlock(&node->clients_mutex); + spin_unlock_irqrestore(&node->clients_lock, flags); + nvmap_ref_unlock(client); } void nvmap_carveout_commit_subtract(struct nvmap_client *client, struct nvmap_carveout_node *node, size_t len) { + unsigned long flags; + if (!client) return; - mutex_lock(&node->clients_mutex); + spin_lock_irqsave(&node->clients_lock, flags); client->carveout_commit[node->index].commit -= len; BUG_ON(client->carveout_commit[node->index].commit < 0); /* if no more allocation in this carveout for this node, delete it */ if (!client->carveout_commit[node->index].commit) list_del_init(&client->carveout_commit[node->index].list); - mutex_unlock(&node->clients_mutex); + spin_unlock_irqrestore(&node->clients_lock, flags); } static struct nvmap_client* get_client_from_carveout_commit( @@ -316,7 +321,6 @@ static struct nvmap_client* get_client_from_carveout_commit( carveout_commit); } - struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client, size_t len, size_t align, unsigned long usage, @@ -341,10 +345,8 @@ struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client, if (nvmap_flush_heap_block(client, block, len)) { nvmap_heap_free(block); return NULL; - } else { - nvmap_carveout_commit_add(client, co_heap, len); + } else return block; - } } } @@ -764,32 +766,36 @@ static void client_stringify(struct nvmap_client *client, struct seq_file *s) static void allocations_stringify(struct nvmap_client *client, struct seq_file *s) { - struct rb_node *n = client->handle_refs.rb_node; + struct rb_node *n = rb_first(&client->handle_refs); + unsigned long long total = 0; for (; n != NULL; n = rb_next(n)) { struct nvmap_handle_ref *ref = rb_entry(n, struct nvmap_handle_ref, node); struct nvmap_handle *handle = ref->handle; - if (!handle->heap_pgalloc) + if (handle->alloc && !handle->heap_pgalloc) { seq_printf(s, " %8u@%8lx ", handle->size, handle->carveout->base); + total += handle->size; + } } - seq_printf(s, "\n"); + seq_printf(s, " total: %llu\n", total); } static int nvmap_debug_allocations_show(struct seq_file *s, void *unused) { struct nvmap_carveout_node *node = s->private; struct nvmap_carveout_commit *commit; + unsigned long flags; - mutex_lock(&node->clients_mutex); + spin_lock_irqsave(&node->clients_lock, flags); list_for_each_entry(commit, &node->clients, list) { struct nvmap_client *client = get_client_from_carveout_commit(node, commit); client_stringify(client, s); allocations_stringify(client, s); } - mutex_unlock(&node->clients_mutex); + spin_unlock_irqrestore(&node->clients_lock, flags); return 0; } @@ -811,15 +817,16 @@ static int nvmap_debug_clients_show(struct seq_file *s, void *unused) { struct nvmap_carveout_node *node = s->private; struct nvmap_carveout_commit *commit; + unsigned long flags; - mutex_lock(&node->clients_mutex); + spin_lock_irqsave(&node->clients_lock, flags); list_for_each_entry(commit, &node->clients, list) { struct nvmap_client *client = get_client_from_carveout_commit(node, commit); client_stringify(client, s); seq_printf(s, " %8u\n", commit->commit); } - mutex_unlock(&node->clients_mutex); + spin_unlock_irqrestore(&node->clients_lock, flags); return 0; } @@ -965,7 +972,7 @@ static int nvmap_probe(struct platform_device *pdev) goto fail_heaps; } dev->nr_carveouts++; - mutex_init(&node->clients_mutex); + spin_lock_init(&node->clients_lock); node->index = i; INIT_LIST_HEAD(&node->clients); node->heap_bit = co->usage_mask; diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c index 09502bff4883..44f55b3f59ba 100644 --- a/drivers/video/tegra/nvmap/nvmap_handle.c +++ b/drivers/video/tegra/nvmap/nvmap_handle.c @@ -81,11 +81,6 @@ void _nvmap_handle_free(struct nvmap_handle *h) goto out; if (!h->heap_pgalloc) { - mutex_lock(&h->lock); - nvmap_carveout_commit_subtract(h->owner, - nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)), - h->size); - mutex_unlock(&h->lock); nvmap_heap_free(h->carveout); goto out; } @@ -213,6 +208,9 @@ static void alloc_handle(struct nvmap_client *client, size_t align, h->carveout = b; h->heap_pgalloc = false; h->alloc = true; + nvmap_carveout_commit_add(client, + nvmap_heap_to_arg(nvmap_block_to_heap(b)), + h->size); } } else if (type & NVMAP_HEAP_IOVMM) { size_t reserved = PAGE_ALIGN(h->size); @@ -367,6 +365,11 @@ void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id) if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig) atomic_sub(h->size, &client->iovm_commit); + if (h->alloc && !h->heap_pgalloc) + nvmap_carveout_commit_subtract(client, + nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)), + h->size); + nvmap_ref_unlock(client); if (pins) @@ -376,6 +379,9 @@ void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id) while (pins--) nvmap_unpin_handles(client, &ref->handle, 1); + if (h->owner == client) + h->owner = NULL; + kfree(ref); out: @@ -499,6 +505,11 @@ struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client, return ERR_PTR(-ENOMEM); } + if (!h->heap_pgalloc) + nvmap_carveout_commit_add(client, + nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)), + h->size); + atomic_set(&ref->dupes, 1); ref->handle = h; atomic_set(&ref->pin, 0); diff --git a/include/linux/usb/android_composite.h b/include/linux/usb/android_composite.h index ac09dcb71775..62e72e3bd2b6 100644 --- a/include/linux/usb/android_composite.h +++ b/include/linux/usb/android_composite.h @@ -88,6 +88,11 @@ struct usb_ether_platform_data { const char *vendorDescr; }; +/* Platform data for ACM driver. */ +struct acm_platform_data { + u8 num_inst; +}; + extern void android_register_function(struct android_usb_function *f); extern void android_enable_function(struct usb_function *f, int enable); diff --git a/kernel/futex.c b/kernel/futex.c index 257db2460ab8..6a3a5fa1526d 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -220,7 +220,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) struct mm_struct *mm = current->mm; struct page *page; int err; - struct vm_area_struct *vma; /* * The futex address must be "naturally" aligned. @@ -246,37 +245,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) return 0; } - /* - * The futex is hashed differently depending on whether - * it's in a shared or private mapping. So check vma first. - */ - vma = find_extend_vma(mm, address); - if (unlikely(!vma)) - return -EFAULT; - - /* - * Permissions. - */ - if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ)) - return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES; - - /* - * Private mappings are handled in a simple way. - * - * NOTE: When userspace waits on a MAP_SHARED mapping, even if - * it's a read-only handle, it's expected that futexes attach to - * the object not the particular process. Therefore we use - * VM_MAYSHARE here, not VM_SHARED which is restricted to shared - * mappings of _writable_ handles. - */ - if (likely(!(vma->vm_flags & VM_MAYSHARE))) { - key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */ - key->private.mm = mm; - key->private.address = address; - get_futex_key_refs(key); - return 0; - } - again: err = get_user_pages_fast(address, 1, 1, &page); if (err < 0) |