summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/configs/tegra_defconfig6
-rw-r--r--arch/arm/mach-tegra/Kconfig9
-rw-r--r--arch/arm/mach-tegra/Makefile1
-rw-r--r--arch/arm/mach-tegra/clock.c608
-rw-r--r--arch/arm/mach-tegra/clock.h17
-rw-r--r--arch/arm/mach-tegra/common.c8
-rw-r--r--arch/arm/mach-tegra/cpuidle.c8
-rw-r--r--arch/arm/mach-tegra/devices.c19
-rw-r--r--arch/arm/mach-tegra/devices.h2
-rw-r--r--arch/arm/mach-tegra/dvfs.c103
-rw-r--r--arch/arm/mach-tegra/dvfs.h4
-rw-r--r--arch/arm/mach-tegra/include/mach/audio.h5
-rw-r--r--arch/arm/mach-tegra/include/mach/clk.h1
-rw-r--r--arch/arm/mach-tegra/include/mach/fb.h11
-rw-r--r--arch/arm/mach-tegra/include/mach/nvmap.h3
-rw-r--r--arch/arm/mach-tegra/include/mach/spdif.h392
-rw-r--r--arch/arm/mach-tegra/suspend.c5
-rw-r--r--arch/arm/mach-tegra/tegra2_clocks.c114
-rw-r--r--arch/arm/mach-tegra/tegra2_dvfs.c141
-rw-r--r--arch/arm/mach-tegra/tegra_i2s_audio.c142
-rw-r--r--arch/arm/mach-tegra/tegra_spdif_audio.c1422
-rw-r--r--drivers/media/video/tegra/Kconfig10
-rw-r--r--drivers/media/video/tegra/Makefile3
-rw-r--r--drivers/media/video/tegra/avp/Kconfig25
-rw-r--r--drivers/media/video/tegra/avp/Makefile6
-rw-r--r--drivers/media/video/tegra/avp/avp.c1683
-rw-r--r--drivers/media/video/tegra/avp/avp.h47
-rw-r--r--drivers/media/video/tegra/avp/avp_msg.h342
-rw-r--r--drivers/media/video/tegra/avp/avp_svc.c701
-rw-r--r--drivers/media/video/tegra/avp/headavp.S66
-rw-r--r--drivers/media/video/tegra/avp/headavp.h41
-rw-r--r--drivers/media/video/tegra/avp/tegra_rpc.c738
-rw-r--r--drivers/media/video/tegra/avp/trpc.h77
-rw-r--r--drivers/media/video/tegra/avp/trpc_local.c333
-rw-r--r--drivers/media/video/tegra/avp/trpc_sema.c220
-rw-r--r--drivers/media/video/tegra/avp/trpc_sema.h28
-rw-r--r--drivers/media/video/tegra/tegra_camera.c2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c28
-rw-r--r--drivers/net/wireless/bcm4329/dhd.h9
-rw-r--r--drivers/net/wireless/bcm4329/dhd_common.c74
-rw-r--r--drivers/net/wireless/bcm4329/dhd_linux.c109
-rw-r--r--drivers/net/wireless/bcm4329/include/epivers.h10
-rw-r--r--drivers/net/wireless/bcm4329/wl_iw.c84
-rw-r--r--drivers/net/wireless/bcm4329/wl_iw.h2
-rw-r--r--drivers/serial/tegra_hsuart.c41
-rw-r--r--drivers/video/fbmon.c37
-rw-r--r--drivers/video/modedb.c453
-rw-r--r--drivers/video/tegra/dc/dc.c4
-rw-r--r--drivers/video/tegra/fb.c33
-rw-r--r--drivers/video/tegra/host/dev.c2
-rw-r--r--drivers/video/tegra/nvmap/nvmap.h1
-rw-r--r--drivers/video/tegra/nvmap/nvmap_dev.c124
-rw-r--r--fs/ext4/super.c7
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/percpu_counter.h10
-rw-r--r--include/linux/tegra_avp.h41
-rw-r--r--include/linux/tegra_rpc.h47
-rw-r--r--include/linux/tegra_sema.h34
-rw-r--r--include/linux/tegra_spdif.h56
-rw-r--r--kernel/pm_qos_params.c4
60 files changed, 7939 insertions, 616 deletions
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 322547afac43..65b4eb5646ed 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -1432,9 +1432,9 @@ CONFIG_MFD_CORE=y
CONFIG_MFD_TPS6586X=y
CONFIG_REGULATOR=y
# CONFIG_REGULATOR_DEBUG is not set
-# CONFIG_REGULATOR_DUMMY is not set
-# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
-# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+CONFIG_REGULATOR_DUMMY=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
# CONFIG_REGULATOR_BQ24022 is not set
# CONFIG_REGULATOR_MAX1586 is not set
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index 4aa190156666..ebf17b4e8791 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -77,6 +77,15 @@ source "arch/arm/mach-tegra/nv/Kconfig"
endif
+config TEGRA_CPU_DVFS
+ bool "Enable voltage scaling on Tegra CPU"
+ default y
+
+config TEGRA_CORE_DVFS
+ bool "Enable voltage scaling on Tegra core"
+ depends on TEGRA_CPU_DVFS
+ default y
+
config TEGRA_IOVMM_GART
bool "Enable I/O virtual memory manager for GART"
depends on ARCH_TEGRA_2x_SOC
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index 2292bd9f7435..7433bf7138b1 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -17,6 +17,7 @@ obj-y += i2s_api.o
else
obj-y += tegra_i2s_audio.o
endif
+obj-y += tegra_spdif_audio.o
obj-y += mc.o
obj-$(CONFIG_USB_SUPPORT) += usb_phy.o
obj-$(CONFIG_FIQ) += fiq.o
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c
index d34f0efbdd37..ad5f483af7fc 100644
--- a/arch/arm/mach-tegra/clock.c
+++ b/arch/arm/mach-tegra/clock.c
@@ -18,119 +18,221 @@
#include <linux/kernel.h>
#include <linux/clk.h>
-#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/list.h>
#include <linux/module.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
+
#include <asm/clkdev.h>
+
#include <mach/clk.h>
#include "board.h"
#include "clock.h"
#include "dvfs.h"
-static LIST_HEAD(clocks);
-
/*
- * clock_lock must be held when:
- * Accessing any clock register non-atomically
- * or
- * Relying on any state of a clk struct not to change, unless clk_is_dvfs
- * returns true on that clk struct, and dvfs_lock is held instead.
+ * Locking:
+ *
+ * Each struct clk has a lock. Depending on the cansleep flag, that lock
+ * may be a spinlock or a mutex. For most clocks, the spinlock is sufficient,
+ * and using the spinlock allows the clock to be manipulated from an interrupt
+ * or while holding a spinlock. Some clocks may need to adjust a regulator
+ * in order to maintain the required voltage for a new frequency. Those
+ * clocks set the cansleep flag, and take a mutex so that the regulator api
+ * can be used while holding the lock.
+ *
+ * To avoid AB-BA locking problems, locks must always be traversed from child
+ * clock to parent clock. For example, when enabling a clock, the clock's lock
+ * is taken, and then clk_enable is called on the parent, which take's the
+ * parent clock's lock. There are two exceptions to this ordering:
+ * 1. When setting a clock as cansleep, in which case the entire list of clocks
+ * is traversed to set the children as cansleep as well. This must occur
+ * during init, before any calls to clk_get, so no other clock locks can
+ * get taken.
+ * 2. When dumping the clock tree through debugfs. In this case, clk_lock_all
+ * is called, which attemps to iterate through the entire list of clocks
+ * and take every clock lock. If any call to clk_trylock fails, a locked
+ * clocks are unlocked, and the process is retried. When all the locks
+ * are held, the only clock operation that can be called is
+ * clk_get_rate_all_locked.
*
- * Any function that changes the state of a clk struct must hold
- * the dvfs_lock if clk_is_auto_dvfs(clk) is true, and the clock_lock.
+ * Within a single clock, no clock operation can call another clock operation
+ * on itself, except for clk_get_rate_locked. Any clock operation can call
+ * any other clock operation on any of it's possible parents.
*
- * When taking dvfs_lock and clock_lock, dvfs_lock must be taken first.
+ * clk_set_cansleep is used to mark a clock as sleeping. It is called during
+ * dvfs (Dynamic Voltage and Frequency Scaling) init on any clock that has a
+ * dvfs requirement. It can only be called on clocks that are the sole parent
+ * of all of their child clocks, meaning the child clock can not be reparented
+ * onto a different, possibly non-sleeping, clock. This is inherently true
+ * of all leaf clocks in the clock tree
+ *
+ * An additional lock, clock_list_lock, is used to protect the list of all
+ * clocks.
+ *
+ * The clock operations must lock internally to protect against
+ * read-modify-write on registers that are shared by multiple clocks
*/
-static DEFINE_SPINLOCK(clock_lock);
+static DEFINE_MUTEX(clock_list_lock);
+static LIST_HEAD(clocks);
static inline bool clk_is_auto_dvfs(struct clk *c)
{
- smp_rmb();
return c->auto_dvfs;
-};
+}
static inline bool clk_is_dvfs(struct clk *c)
{
- smp_rmb();
return c->is_dvfs;
-};
+}
+
+static inline bool clk_cansleep(struct clk *c)
+{
+ return c->cansleep;
+}
+
+#define clk_lock_save(c, flags) \
+ do { \
+ if (clk_cansleep(c)) { \
+ flags = 0; \
+ mutex_lock(&c->mutex); \
+ } else { \
+ spin_lock_irqsave(&c->spinlock, flags); \
+ } \
+ } while (0)
+
+#define clk_unlock_restore(c, flags) \
+ do { \
+ if (clk_cansleep(c)) \
+ mutex_unlock(&c->mutex); \
+ else \
+ spin_unlock_irqrestore(&c->spinlock, flags); \
+ } while (0)
+
+static inline void clk_lock_init(struct clk *c)
+{
+ mutex_init(&c->mutex);
+ spin_lock_init(&c->spinlock);
+}
struct clk *tegra_get_clock_by_name(const char *name)
{
struct clk *c;
struct clk *ret = NULL;
- unsigned long flags;
- spin_lock_irqsave(&clock_lock, flags);
+ mutex_lock(&clock_list_lock);
list_for_each_entry(c, &clocks, node) {
if (strcmp(c->name, name) == 0) {
ret = c;
break;
}
}
- spin_unlock_irqrestore(&clock_lock, flags);
+ mutex_unlock(&clock_list_lock);
return ret;
}
+/* Must be called with clk_lock(c) held */
static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p)
{
u64 rate;
- rate = p->rate;
+ rate = clk_get_rate(p);
if (c->mul != 0 && c->div != 0) {
- rate = rate * c->mul;
+ rate *= c->mul;
do_div(rate, c->div);
}
return rate;
}
-static void clk_recalculate_rate(struct clk *c)
+/* Must be called with clk_lock(c) held */
+unsigned long clk_get_rate_locked(struct clk *c)
{
unsigned long rate;
- if (!c->parent)
- return;
+ if (c->parent)
+ rate = clk_predict_rate_from_parent(c, c->parent);
+ else
+ rate = c->rate;
- rate = clk_predict_rate_from_parent(c, c->parent);
+ return rate;
+}
- if (rate > c->max_rate)
- pr_warn("clocks: Set clock %s to rate %lu, max is %lu\n",
- c->name, rate, c->max_rate);
+unsigned long clk_get_rate(struct clk *c)
+{
+ unsigned long flags;
+ unsigned long rate;
+
+ clk_lock_save(c, flags);
+
+ rate = clk_get_rate_locked(c);
+
+ clk_unlock_restore(c, flags);
- c->rate = rate;
+ return rate;
}
+EXPORT_SYMBOL(clk_get_rate);
-int clk_reparent(struct clk *c, struct clk *parent)
+static void __clk_set_cansleep(struct clk *c)
{
- c->parent = parent;
- list_del(&c->sibling);
- list_add_tail(&c->sibling, &parent->children);
- return 0;
+ struct clk *child;
+ BUG_ON(mutex_is_locked(&c->mutex));
+ BUG_ON(spin_is_locked(&c->spinlock));
+
+ list_for_each_entry(child, &clocks, node) {
+ if (child->parent != c)
+ continue;
+
+ WARN(child->ops && child->ops->set_parent,
+ "can't make child clock %s of %s "
+ "sleepable if it's parent could change",
+ child->name, c->name);
+
+ __clk_set_cansleep(child);
+ }
+
+ c->cansleep = true;
}
-static void propagate_rate(struct clk *c)
+/* Must be called before any clk_get calls */
+void clk_set_cansleep(struct clk *c)
{
- struct clk *clkp;
- list_for_each_entry(clkp, &c->children, sibling) {
- clk_recalculate_rate(clkp);
- propagate_rate(clkp);
- }
+ mutex_lock(&clock_list_lock);
+ __clk_set_cansleep(c);
+ mutex_unlock(&clock_list_lock);
}
-void clk_init(struct clk *c)
+int tegra_dvfs_set_rate(struct clk *c, unsigned long rate)
{
unsigned long flags;
+ int ret;
- spin_lock_irqsave(&clock_lock, flags);
+ if (!clk_is_dvfs(c))
+ return -EINVAL;
+
+ clk_lock_save(c, flags);
+ ret = tegra_dvfs_set_rate_locked(c, rate);
+ clk_unlock_restore(c, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(tegra_dvfs_set_rate);
+
+int clk_reparent(struct clk *c, struct clk *parent)
+{
+ c->parent = parent;
+ return 0;
+}
+
+void clk_init(struct clk *c)
+{
+ clk_lock_init(c);
- INIT_LIST_HEAD(&c->children);
- INIT_LIST_HEAD(&c->sibling);
INIT_LIST_HEAD(&c->dvfs);
if (c->ops && c->ops->init)
@@ -145,71 +247,58 @@ void clk_init(struct clk *c)
c->state = ON;
}
- clk_recalculate_rate(c);
-
+ mutex_lock(&clock_list_lock);
list_add(&c->node, &clocks);
-
- if (c->parent)
- list_add_tail(&c->sibling, &c->parent->children);
-
- spin_unlock_irqrestore(&clock_lock, flags);
+ mutex_unlock(&clock_list_lock);
}
-int clk_enable_locked(struct clk *c)
+int clk_enable(struct clk *c)
{
- int ret;
+ int ret = 0;
+ unsigned long flags;
+
+ clk_lock_save(c, flags);
+
+ if (clk_is_auto_dvfs(c)) {
+ ret = tegra_dvfs_set_rate_locked(c, clk_get_rate_locked(c));
+ if (ret)
+ goto out;
+ }
if (c->refcnt == 0) {
if (c->parent) {
- ret = clk_enable_locked(c->parent);
+ ret = clk_enable(c->parent);
if (ret)
- return ret;
+ goto out;
}
if (c->ops && c->ops->enable) {
ret = c->ops->enable(c);
if (ret) {
if (c->parent)
- clk_disable_locked(c->parent);
- return ret;
+ clk_disable(c->parent);
+ goto out;
}
c->state = ON;
c->set = true;
}
}
c->refcnt++;
-
- return 0;
-}
-
-int clk_enable(struct clk *c)
-{
- int ret;
- unsigned long flags;
-
- if (clk_is_auto_dvfs(c)) {
- lock_dvfs();
- ret = tegra_dvfs_set_rate(c, c->rate);
- if (ret)
- goto out;
- }
-
- spin_lock_irqsave(&clock_lock, flags);
- ret = clk_enable_locked(c);
- spin_unlock_irqrestore(&clock_lock, flags);
-
out:
- if (clk_is_auto_dvfs(c))
- unlock_dvfs();
-
+ clk_unlock_restore(c, flags);
return ret;
}
EXPORT_SYMBOL(clk_enable);
-void clk_disable_locked(struct clk *c)
+void clk_disable(struct clk *c)
{
+ unsigned long flags;
+
+ clk_lock_save(c, flags);
+
if (c->refcnt == 0) {
WARN(1, "Attempting to disable clock %s with refcnt 0", c->name);
+ clk_unlock_restore(c, flags);
return;
}
if (c->refcnt == 1) {
@@ -217,79 +306,53 @@ void clk_disable_locked(struct clk *c)
c->ops->disable(c);
if (c->parent)
- clk_disable_locked(c->parent);
+ clk_disable(c->parent);
c->state = OFF;
}
c->refcnt--;
-}
-void clk_disable(struct clk *c)
-{
- unsigned long flags;
-
- if (clk_is_auto_dvfs(c))
- lock_dvfs();
+ if (clk_is_auto_dvfs(c) && c->refcnt == 0)
+ tegra_dvfs_set_rate_locked(c, 0);
- spin_lock_irqsave(&clock_lock, flags);
- clk_disable_locked(c);
- spin_unlock_irqrestore(&clock_lock, flags);
-
- if (clk_is_auto_dvfs(c)) {
- if (c->refcnt == 0)
- tegra_dvfs_set_rate(c, 0);
- unlock_dvfs();
- }
+ clk_unlock_restore(c, flags);
}
EXPORT_SYMBOL(clk_disable);
-int clk_set_parent_locked(struct clk *c, struct clk *parent)
-{
- int ret;
-
- if (!c->ops || !c->ops->set_parent)
- return -ENOSYS;
-
- ret = c->ops->set_parent(c, parent);
-
- if (ret)
- return ret;
-
- clk_recalculate_rate(c);
-
- propagate_rate(c);
-
- return 0;
-}
-
int clk_set_parent(struct clk *c, struct clk *parent)
{
int ret = 0;
unsigned long flags;
- unsigned long new_rate = clk_predict_rate_from_parent(c, parent);
+ unsigned long new_rate;
+ unsigned long old_rate;
+ clk_lock_save(c, flags);
- if (clk_is_auto_dvfs(c)) {
- lock_dvfs();
- if (c->refcnt > 0 && (!c->parent || new_rate > c->rate))
- ret = tegra_dvfs_set_rate(c, new_rate);
- if (!ret)
+ if (!c->ops || !c->ops->set_parent) {
+ ret = -ENOSYS;
+ goto out;
+ }
+
+ new_rate = clk_predict_rate_from_parent(c, parent);
+ old_rate = clk_get_rate_locked(c);
+
+ if (clk_is_auto_dvfs(c) && c->refcnt > 0 &&
+ (!c->parent || new_rate > old_rate)) {
+ ret = tegra_dvfs_set_rate_locked(c, new_rate);
+ if (ret)
goto out;
}
- spin_lock_irqsave(&clock_lock, flags);
- ret = clk_set_parent_locked(c, parent);
- spin_unlock_irqrestore(&clock_lock, flags);
- if (!ret)
+ ret = c->ops->set_parent(c, parent);
+ if (ret)
goto out;
- if (clk_is_auto_dvfs(c) && c->refcnt > 0)
- ret = tegra_dvfs_set_rate(c, new_rate);
+ if (clk_is_auto_dvfs(c) && c->refcnt > 0 &&
+ new_rate < old_rate)
+ ret = tegra_dvfs_set_rate_locked(c, new_rate);
out:
- if (clk_is_auto_dvfs(c))
- unlock_dvfs();
-
+ clk_unlock_restore(c, flags);
return ret;
}
EXPORT_SYMBOL(clk_set_parent);
@@ -300,86 +363,87 @@ struct clk *clk_get_parent(struct clk *c)
}
EXPORT_SYMBOL(clk_get_parent);
-int clk_set_rate_locked(struct clk *c, unsigned long rate)
+int clk_set_rate(struct clk *c, unsigned long rate)
{
- int ret;
-
- if (rate == c->requested_rate)
- return 0;
-
- if (rate > c->max_rate)
- rate = c->max_rate;
-
- if (!c->ops || !c->ops->set_rate)
- return -ENOSYS;
-
- c->requested_rate = rate;
-
- ret = c->ops->set_rate(c, rate);
-
- if (ret)
- return ret;
+ int ret = 0;
+ unsigned long flags;
+ unsigned long old_rate;
- clk_recalculate_rate(c);
+ clk_lock_save(c, flags);
- propagate_rate(c);
+ if (!c->ops || !c->ops->set_rate) {
+ ret = -ENOSYS;
+ goto out;
+ }
- return 0;
-}
+ old_rate = clk_get_rate_locked(c);
-int clk_set_rate(struct clk *c, unsigned long rate)
-{
- int ret = 0;
- unsigned long flags;
+ if (rate > c->max_rate)
+ rate = c->max_rate;
- if (clk_is_auto_dvfs(c)) {
- lock_dvfs();
- if (rate > c->rate && c->refcnt > 0)
- ret = tegra_dvfs_set_rate(c, rate);
+ if (clk_is_auto_dvfs(c) && rate > old_rate && c->refcnt > 0) {
+ ret = tegra_dvfs_set_rate_locked(c, rate);
if (ret)
goto out;
}
- spin_lock_irqsave(&clock_lock, flags);
- ret = clk_set_rate_locked(c, rate);
- spin_unlock_irqrestore(&clock_lock, flags);
-
+ ret = c->ops->set_rate(c, rate);
if (ret)
goto out;
- if (clk_is_auto_dvfs(c) && c->refcnt > 0)
- ret = tegra_dvfs_set_rate(c, rate);
+ if (clk_is_auto_dvfs(c) && rate < old_rate && c->refcnt > 0)
+ ret = tegra_dvfs_set_rate_locked(c, rate);
out:
- if (clk_is_auto_dvfs(c))
- unlock_dvfs();
+ clk_unlock_restore(c, flags);
return ret;
}
EXPORT_SYMBOL(clk_set_rate);
-unsigned long clk_get_rate(struct clk *c)
+/* Must be called with clocks lock and all indvidual clock locks held */
+unsigned long clk_get_rate_all_locked(struct clk *c)
{
- unsigned long flags;
- unsigned long ret;
-
- spin_lock_irqsave(&clock_lock, flags);
+ u64 rate;
+ int mul = 1;
+ int div = 1;
+ struct clk *p = c;
+
+ while (p) {
+ c = p;
+ if (c->mul != 0 && c->div != 0) {
+ mul *= c->mul;
+ div *= c->div;
+ }
+ p = c->parent;
+ }
- ret = c->rate;
+ rate = c->rate;
+ rate *= mul;
+ do_div(rate, div);
- spin_unlock_irqrestore(&clock_lock, flags);
- return ret;
+ return rate;
}
-EXPORT_SYMBOL(clk_get_rate);
long clk_round_rate(struct clk *c, unsigned long rate)
{
- if (!c->ops || !c->ops->round_rate)
- return -ENOSYS;
+ unsigned long flags;
+ long ret;
+
+ clk_lock_save(c, flags);
+
+ if (!c->ops || !c->ops->round_rate) {
+ ret = -ENOSYS;
+ goto out;
+ }
if (rate > c->max_rate)
rate = c->max_rate;
- return c->ops->round_rate(c, rate);
+ ret = c->ops->round_rate(c, rate);
+
+out:
+ clk_unlock_restore(c, flags);
+ return ret;
}
EXPORT_SYMBOL(clk_round_rate);
@@ -459,32 +523,52 @@ EXPORT_SYMBOL(tegra_periph_reset_assert);
void __init tegra_init_clock(void)
{
tegra2_init_clocks();
+ tegra2_init_dvfs();
}
+/*
+ * Iterate through all clocks, setting the dvfs rate to the current clock
+ * rate on all auto dvfs clocks, and to the saved dvfs rate on all manual
+ * dvfs clocks. Used to enable dvfs during late init, after the regulators
+ * are available.
+ */
void __init tegra_clk_set_dvfs_rates(void)
{
+ unsigned long flags;
struct clk *c;
+
+ mutex_lock(&clock_list_lock);
+
list_for_each_entry(c, &clocks, node) {
+ clk_lock_save(c, flags);
if (clk_is_auto_dvfs(c)) {
if (c->refcnt > 0)
- tegra_dvfs_set_rate(c, c->rate);
+ tegra_dvfs_set_rate_locked(c,
+ clk_get_rate_locked(c));
else
- tegra_dvfs_set_rate(c, 0);
+ tegra_dvfs_set_rate_locked(c, 0);
} else if (clk_is_dvfs(c)) {
- tegra_dvfs_set_rate(c, c->dvfs_rate);
+ tegra_dvfs_set_rate_locked(c, c->dvfs_rate);
}
+ clk_unlock_restore(c, flags);
}
+
+ mutex_unlock(&clock_list_lock);
}
+/*
+ * Iterate through all clocks, disabling any for which the refcount is 0
+ * but the clock init detected the bootloader left the clock on.
+ */
int __init tegra_disable_boot_clocks(void)
{
unsigned long flags;
struct clk *c;
- lock_dvfs();
- spin_lock_irqsave(&clock_lock, flags);
+ mutex_lock(&clock_list_lock);
list_for_each_entry(c, &clocks, node) {
+ clk_lock_save(c, flags);
if (c->refcnt == 0 && c->state == ON &&
c->ops && c->ops->disable) {
pr_warning("Disabling clock %s left on by bootloader\n",
@@ -492,15 +576,135 @@ int __init tegra_disable_boot_clocks(void)
c->ops->disable(c);
c->state = OFF;
}
+ clk_unlock_restore(c, flags);
}
- spin_unlock_irqrestore(&clock_lock, flags);
- unlock_dvfs();
+ mutex_unlock(&clock_list_lock);
+ return 0;
+}
+
+int __init tegra_late_init_clock(void)
+{
+ tegra_dvfs_late_init();
+ tegra_disable_boot_clocks();
+ tegra_clk_set_dvfs_rates();
return 0;
}
-late_initcall(tegra_disable_boot_clocks);
+late_initcall(tegra_late_init_clock);
#ifdef CONFIG_DEBUG_FS
+
+/*
+ * Attempt to lock all the clocks that are marked cansleep
+ * Must be called with irqs enabled
+ */
+static int __clk_lock_all_mutexes(void)
+{
+ struct clk *c;
+
+ might_sleep();
+
+ list_for_each_entry(c, &clocks, node)
+ if (clk_cansleep(c))
+ if (!mutex_trylock(&c->mutex))
+ goto unlock_mutexes;
+
+ return 0;
+
+unlock_mutexes:
+ list_for_each_entry_continue_reverse(c, &clocks, node)
+ if (clk_cansleep(c))
+ mutex_unlock(&c->mutex);
+
+ return -EAGAIN;
+}
+
+/*
+ * Attempt to lock all the clocks that are not marked cansleep
+ * Must be called with irqs disabled
+ */
+static int __clk_lock_all_spinlocks(void)
+{
+ struct clk *c;
+
+ list_for_each_entry(c, &clocks, node)
+ if (!clk_cansleep(c))
+ if (!spin_trylock(&c->spinlock))
+ goto unlock_spinlocks;
+
+ return 0;
+
+unlock_spinlocks:
+ list_for_each_entry_continue_reverse(c, &clocks, node)
+ if (!clk_cansleep(c))
+ spin_unlock(&c->spinlock);
+
+ return -EAGAIN;
+}
+
+static void __clk_unlock_all_mutexes(void)
+{
+ struct clk *c;
+
+ list_for_each_entry_reverse(c, &clocks, node)
+ if (clk_cansleep(c))
+ mutex_unlock(&c->mutex);
+}
+
+static void __clk_unlock_all_spinlocks(void)
+{
+ struct clk *c;
+
+ list_for_each_entry_reverse(c, &clocks, node)
+ if (!clk_cansleep(c))
+ spin_unlock(&c->spinlock);
+}
+
+/*
+ * This function retries until it can take all locks, and may take
+ * an arbitrarily long time to complete.
+ * Must be called with irqs enabled, returns with irqs disabled
+ * Must be called with clock_list_lock held
+ */
+static void clk_lock_all(void)
+{
+ int ret;
+retry:
+ ret = __clk_lock_all_mutexes();
+ if (ret)
+ goto failed_mutexes;
+
+ local_irq_disable();
+
+ ret = __clk_lock_all_spinlocks();
+ if (ret)
+ goto failed_spinlocks;
+
+ /* All locks taken successfully, return */
+ return;
+
+failed_spinlocks:
+ local_irq_enable();
+ __clk_unlock_all_mutexes();
+failed_mutexes:
+ msleep(1);
+ goto retry;
+}
+
+/*
+ * Unlocks all clocks after a clk_lock_all
+ * Must be called with irqs disabled, returns with irqs enabled
+ * Must be called with clock_list_lock held
+ */
+static void clk_unlock_all(void)
+{
+ __clk_unlock_all_spinlocks();
+
+ local_irq_enable();
+
+ __clk_unlock_all_mutexes();
+}
+
static struct dentry *clk_debugfs_root;
static void dvfs_show_one(struct seq_file *s, struct dvfs *d, int level)
@@ -515,7 +719,6 @@ static void dvfs_show_one(struct seq_file *s, struct dvfs *d, int level)
static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
{
struct clk *child;
- struct clk *safe;
struct dvfs *d;
const char *state = "uninit";
char div[8] = {0};
@@ -547,12 +750,15 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
c->rate > c->max_rate ? '!' : ' ',
!c->set ? '*' : ' ',
30 - level * 3, c->name,
- state, c->refcnt, div, c->rate);
+ state, c->refcnt, div, clk_get_rate_all_locked(c));
list_for_each_entry(d, &c->dvfs, node)
dvfs_show_one(s, d, level + 1);
- list_for_each_entry_safe(child, safe, &c->children, sibling) {
+ list_for_each_entry(child, &clocks, node) {
+ if (child->parent != c)
+ continue;
+
clock_tree_show_one(s, child, level + 1);
}
}
@@ -560,14 +766,20 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
static int clock_tree_show(struct seq_file *s, void *data)
{
struct clk *c;
- unsigned long flags;
seq_printf(s, " clock state ref div rate\n");
seq_printf(s, "--------------------------------------------------------------\n");
- spin_lock_irqsave(&clock_lock, flags);
+
+ mutex_lock(&clock_list_lock);
+
+ clk_lock_all();
+
list_for_each_entry(c, &clocks, node)
if (c->parent == NULL)
clock_tree_show_one(s, c, 0);
- spin_unlock_irqrestore(&clock_lock, flags);
+
+ clk_unlock_all();
+
+ mutex_unlock(&clock_list_lock);
return 0;
}
diff --git a/arch/arm/mach-tegra/clock.h b/arch/arm/mach-tegra/clock.h
index b13aab47083e..083815487c17 100644
--- a/arch/arm/mach-tegra/clock.h
+++ b/arch/arm/mach-tegra/clock.h
@@ -21,6 +21,8 @@
#define __MACH_TEGRA_CLOCK_H
#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <asm/clkdev.h>
#define DIV_BUS (1 << 0)
@@ -75,8 +77,6 @@ enum clk_state {
struct clk {
/* node for master clocks list */
struct list_head node; /* node for list of all clocks */
- struct list_head children; /* list of children */
- struct list_head sibling; /* node for children */
struct list_head dvfs; /* list of dvfs dependencies */
struct clk_lookup lookup;
@@ -91,11 +91,11 @@ struct clk {
unsigned long max_rate;
bool is_dvfs;
bool auto_dvfs;
+ bool cansleep;
u32 flags;
const char *name;
u32 refcnt;
- unsigned long requested_rate;
enum clk_state state;
struct clk *parent;
u32 div;
@@ -137,8 +137,10 @@ struct clk {
unsigned long rate;
} shared_bus_user;
} u;
-};
+ struct mutex mutex;
+ spinlock_t spinlock;
+};
struct clk_duplicate {
const char *name;
@@ -158,12 +160,11 @@ void tegra2_periph_reset_assert(struct clk *c);
void clk_init(struct clk *clk);
struct clk *tegra_get_clock_by_name(const char *name);
unsigned long clk_measure_input_freq(void);
-void clk_disable_locked(struct clk *c);
-int clk_enable_locked(struct clk *c);
-int clk_set_parent_locked(struct clk *c, struct clk *parent);
-int clk_set_rate_locked(struct clk *c, unsigned long rate);
int clk_reparent(struct clk *c, struct clk *parent);
void tegra_clk_init_from_table(struct tegra_clk_init_table *table);
void tegra_clk_set_dvfs_rates(void);
+void clk_set_cansleep(struct clk *c);
+unsigned long clk_get_rate_locked(struct clk *c);
+int tegra_dvfs_set_rate_locked(struct clk *c, unsigned long rate);
#endif
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index 82bca0ccb223..2fdb99b6fdc5 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -59,10 +59,10 @@ static __initdata struct tegra_clk_init_table common_clk_init_table[] = {
{ "pll_p_out1", "pll_p", 28800000, true },
{ "pll_p_out2", "pll_p", 48000000, true },
{ "pll_p_out3", "pll_p", 72000000, true },
- { "pll_m_out1", "pll_m", 240000000, true },
- { "sclk", "pll_m_out1", 240000000, true },
- { "hclk", "sclk", 240000000, true },
- { "pclk", "hclk", 120000000, true },
+ { "pll_m_out1", "pll_m", 120000000, true },
+ { "sclk", "pll_m_out1", 120000000, true },
+ { "hclk", "sclk", 120000000, true },
+ { "pclk", "hclk", 60000000, true },
{ "pll_x", NULL, 0, true },
{ "cpu", NULL, 0, true },
{ "emc", NULL, 0, true },
diff --git a/arch/arm/mach-tegra/cpuidle.c b/arch/arm/mach-tegra/cpuidle.c
index 765e368401aa..a063c34ecf60 100644
--- a/arch/arm/mach-tegra/cpuidle.c
+++ b/arch/arm/mach-tegra/cpuidle.c
@@ -131,12 +131,14 @@ static inline void tegra_flow_wfi(struct cpuidle_device *dev)
flow_ctrl = flow_ctrl + FLOW_CTRL_HALT_CPUx_EVENTS(dev->cpu);
+ stop_critical_timings();
dsb();
__raw_writel(reg, flow_ctrl);
reg = __raw_readl(flow_ctrl);
__asm__ volatile ("wfi");
__raw_writel(0, flow_ctrl);
reg = __raw_readl(flow_ctrl);
+ start_critical_timings();
}
#ifdef CONFIG_SMP
@@ -377,6 +379,7 @@ static void tegra_idle_enter_lp2_cpu1(struct cpuidle_device *dev,
/* Prepare CPU1 for LP2 by putting it in reset */
+ stop_critical_timings();
gic_cpu_exit(0);
barrier();
twd_ctrl = readl(twd_base + 0x8);
@@ -398,6 +401,7 @@ static void tegra_idle_enter_lp2_cpu1(struct cpuidle_device *dev,
tegra_legacy_force_irq_clr(TEGRA_CPUIDLE_BOTH_IDLE);
writel(smp_processor_id(), EVP_CPU_RESET_VECTOR);
+ start_critical_timings();
/*
* TODO: is it worth going back to wfi if no interrupt is pending
@@ -474,7 +478,7 @@ static int tegra_idle_enter_lp2(struct cpuidle_device *dev,
return (int)us;
}
-static int tegra_idle_enter(unsigned int cpu)
+static int tegra_cpuidle_register_device(unsigned int cpu)
{
struct cpuidle_device *dev;
struct cpuidle_state *state;
@@ -589,7 +593,7 @@ static int __init tegra_cpuidle_init(void)
return ret;
for_each_possible_cpu(cpu) {
- if (tegra_idle_enter(cpu))
+ if (tegra_cpuidle_register_device(cpu))
pr_err("CPU%u: error initializing idle loop\n", cpu);
}
diff --git a/arch/arm/mach-tegra/devices.c b/arch/arm/mach-tegra/devices.c
index 67937e3b10b6..d976fc49ec52 100644
--- a/arch/arm/mach-tegra/devices.c
+++ b/arch/arm/mach-tegra/devices.c
@@ -787,3 +787,22 @@ struct platform_device tegra_grhost_device = {
.resource = tegra_grhost_resources,
.num_resources = ARRAY_SIZE(tegra_grhost_resources),
};
+
+static struct resource tegra_avp_resources[] = {
+ [0] = {
+ .start = INT_SHR_SEM_INBOX_IBF,
+ .end = INT_SHR_SEM_INBOX_IBF,
+ .flags = IORESOURCE_IRQ,
+ .name = "mbox_from_avp_pending",
+ },
+};
+
+struct platform_device tegra_avp_device = {
+ .name = "tegra-avp",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(tegra_avp_resources),
+ .resource = tegra_avp_resources,
+ .dev = {
+ .coherent_dma_mask = 0xffffffffULL,
+ },
+};
diff --git a/arch/arm/mach-tegra/devices.h b/arch/arm/mach-tegra/devices.h
index b0ee0c2916a6..d309313bf7dc 100644
--- a/arch/arm/mach-tegra/devices.h
+++ b/arch/arm/mach-tegra/devices.h
@@ -57,5 +57,7 @@ extern struct platform_device tegra_uart3_device;
extern struct platform_device tegra_uart4_device;
extern struct platform_device tegra_spdif_device;
extern struct platform_device tegra_grhost_device;
+extern struct platform_device tegra_spdif_device;
+extern struct platform_device tegra_avp_device;
#endif
diff --git a/arch/arm/mach-tegra/dvfs.c b/arch/arm/mach-tegra/dvfs.c
index 0a2135e3b784..ef58fae8afbd 100644
--- a/arch/arm/mach-tegra/dvfs.c
+++ b/arch/arm/mach-tegra/dvfs.c
@@ -40,77 +40,83 @@ struct dvfs_reg {
struct regulator *reg;
int max_millivolts;
int millivolts;
+ struct mutex lock;
};
-static LIST_HEAD(dvfs_list);
static LIST_HEAD(dvfs_debug_list);
static LIST_HEAD(dvfs_reg_list);
-static DEFINE_MUTEX(dvfs_lock);
-
-void lock_dvfs(void)
-{
- mutex_lock(&dvfs_lock);
-}
-
-void unlock_dvfs(void)
-{
- mutex_unlock(&dvfs_lock);
-}
+static DEFINE_MUTEX(dvfs_debug_list_lock);
+static DEFINE_MUTEX(dvfs_reg_list_lock);
static int dvfs_reg_set_voltage(struct dvfs_reg *dvfs_reg)
{
int millivolts = 0;
struct dvfs *d;
+ int ret = 0;
+
+ mutex_lock(&dvfs_reg->lock);
list_for_each_entry(d, &dvfs_reg->dvfs, reg_node)
millivolts = max(d->cur_millivolts, millivolts);
if (millivolts == dvfs_reg->millivolts)
- return 0;
+ goto out;
dvfs_reg->millivolts = millivolts;
- return regulator_set_voltage(dvfs_reg->reg,
+ if (!dvfs_reg->reg) {
+ pr_warn("dvfs set voltage on %s ignored\n", dvfs_reg->reg_id);
+ goto out;
+ }
+
+ ret = regulator_set_voltage(dvfs_reg->reg,
millivolts * 1000, dvfs_reg->max_millivolts * 1000);
+
+out:
+ mutex_unlock(&dvfs_reg->lock);
+ return ret;
}
-static int dvfs_reg_get_voltage(struct dvfs_reg *dvfs_reg)
+static int dvfs_reg_connect_to_regulator(struct dvfs_reg *dvfs_reg)
{
- int ret = regulator_get_voltage(dvfs_reg->reg);
+ struct regulator *reg;
- if (ret > 0)
- return ret / 1000;
+ if (!dvfs_reg->reg) {
+ reg = regulator_get(NULL, dvfs_reg->reg_id);
+ if (IS_ERR(reg))
+ return -EINVAL;
+ }
- return ret;
+ dvfs_reg->reg = reg;
+
+ return 0;
}
static struct dvfs_reg *get_dvfs_reg(struct dvfs *d)
{
struct dvfs_reg *dvfs_reg;
- struct regulator *reg;
+
+ mutex_lock(&dvfs_reg_list_lock);
list_for_each_entry(dvfs_reg, &dvfs_reg_list, node)
if (!strcmp(d->reg_id, dvfs_reg->reg_id))
- return dvfs_reg;
-
- reg = regulator_get(NULL, d->reg_id);
- if (IS_ERR(reg))
- return NULL;
+ goto out;
dvfs_reg = kzalloc(sizeof(struct dvfs_reg), GFP_KERNEL);
if (!dvfs_reg) {
pr_err("%s: Failed to allocate dvfs_reg\n", __func__);
- regulator_put(reg);
- return NULL;
+ goto out;
}
+ mutex_init(&dvfs_reg->lock);
INIT_LIST_HEAD(&dvfs_reg->dvfs);
- dvfs_reg->reg = reg;
dvfs_reg->reg_id = kstrdup(d->reg_id, GFP_KERNEL);
list_add_tail(&dvfs_reg->node, &dvfs_reg_list);
+out:
+ mutex_unlock(&dvfs_reg_list_lock);
return dvfs_reg;
}
@@ -122,12 +128,15 @@ static struct dvfs_reg *attach_dvfs_reg(struct dvfs *d)
if (!dvfs_reg)
return NULL;
+ mutex_lock(&dvfs_reg->lock);
list_add_tail(&d->reg_node, &dvfs_reg->dvfs);
+
d->dvfs_reg = dvfs_reg;
if (d->max_millivolts > d->dvfs_reg->max_millivolts)
d->dvfs_reg->max_millivolts = d->max_millivolts;
- d->cur_millivolts = dvfs_reg_get_voltage(d->dvfs_reg);
+ d->cur_millivolts = d->max_millivolts;
+ mutex_unlock(&dvfs_reg->lock);
return dvfs_reg;
}
@@ -169,7 +178,7 @@ __tegra_dvfs_set_rate(struct clk *c, struct dvfs *d, unsigned long rate)
return ret;
}
-int tegra_dvfs_set_rate(struct clk *c, unsigned long rate)
+int tegra_dvfs_set_rate_locked(struct clk *c, unsigned long rate)
{
struct dvfs *d;
int ret = 0;
@@ -177,7 +186,7 @@ int tegra_dvfs_set_rate(struct clk *c, unsigned long rate)
c->dvfs_rate = rate;
- freq_up = (c->refcnt == 0) || (rate > c->rate);
+ freq_up = (c->refcnt == 0) || (rate > clk_get_rate_locked(c));
list_for_each_entry(d, &c->dvfs, node) {
if (d->higher == freq_up)
@@ -195,9 +204,9 @@ int tegra_dvfs_set_rate(struct clk *c, unsigned long rate)
return 0;
}
-EXPORT_SYMBOL(tegra_dvfs_set_rate);
-int tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d)
+/* May only be called during clock init, does not take any locks on clock c. */
+int __init tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d)
{
int i;
struct dvfs_reg *dvfs_reg;
@@ -221,30 +230,38 @@ int tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d)
}
d->num_freqs = i;
- if (d->auto_dvfs)
+ if (d->auto_dvfs) {
c->auto_dvfs = true;
+ clk_set_cansleep(c);
+ }
c->is_dvfs = true;
- smp_wmb();
list_add_tail(&d->node, &c->dvfs);
+ mutex_lock(&dvfs_debug_list_lock);
list_add_tail(&d->debug_node, &dvfs_debug_list);
+ mutex_unlock(&dvfs_debug_list_lock);
return 0;
}
-int __init tegra_init_dvfs(void)
+/*
+ * Iterate through all the dvfs regulators, finding the regulator exported
+ * by the regulator api for each one. Must be called in late init, after
+ * all the regulator api's regulators are initialized.
+ */
+int __init tegra_dvfs_late_init(void)
{
- lock_dvfs();
- tegra2_init_dvfs();
+ struct dvfs_reg *dvfs_reg;
- tegra_clk_set_dvfs_rates();
- unlock_dvfs();
+ mutex_lock(&dvfs_reg_list_lock);
+ list_for_each_entry(dvfs_reg, &dvfs_reg_list, node)
+ dvfs_reg_connect_to_regulator(dvfs_reg);
+ mutex_unlock(&dvfs_reg_list_lock);
return 0;
}
-late_initcall(tegra_init_dvfs);
#ifdef CONFIG_DEBUG_FS
static int dvfs_tree_sort_cmp(void *p, struct list_head *a, struct list_head *b)
@@ -273,7 +290,7 @@ static int dvfs_tree_show(struct seq_file *s, void *data)
seq_printf(s, " clock rate mV\n");
seq_printf(s, "--------------------------------\n");
- lock_dvfs();
+ mutex_lock(&dvfs_debug_list_lock);
list_sort(NULL, &dvfs_debug_list, dvfs_tree_sort_cmp);
@@ -288,7 +305,7 @@ static int dvfs_tree_show(struct seq_file *s, void *data)
d->cur_rate, d->cur_millivolts);
}
- unlock_dvfs();
+ mutex_unlock(&dvfs_debug_list_lock);
return 0;
}
diff --git a/arch/arm/mach-tegra/dvfs.h b/arch/arm/mach-tegra/dvfs.h
index df6a3866d31b..e5eac6cf9cd0 100644
--- a/arch/arm/mach-tegra/dvfs.h
+++ b/arch/arm/mach-tegra/dvfs.h
@@ -49,11 +49,9 @@ struct dvfs {
struct list_head reg_node;
};
-void lock_dvfs(void);
-void unlock_dvfs(void);
-
void tegra2_init_dvfs(void);
int tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d);
int dvfs_debugfs_init(struct dentry *clk_debugfs_root);
+int tegra_dvfs_late_init(void);
#endif
diff --git a/arch/arm/mach-tegra/include/mach/audio.h b/arch/arm/mach-tegra/include/mach/audio.h
index 80f8b2c2d8cd..5950ececae00 100644
--- a/arch/arm/mach-tegra/include/mach/audio.h
+++ b/arch/arm/mach-tegra/include/mach/audio.h
@@ -35,7 +35,10 @@
#define TEGRA_AUDIO_ENABLE_RX 2
struct tegra_audio_platform_data {
- bool master;
+ bool i2s_master;
+ bool dsp_master;
+ int i2s_master_clk; /* When I2S mode and master, the framesync rate. */
+ int dsp_master_clk; /* When DSP mode and master, the framesync rate. */
bool dma_on;
unsigned long i2s_clk_rate;
const char *dap_clk;
diff --git a/arch/arm/mach-tegra/include/mach/clk.h b/arch/arm/mach-tegra/include/mach/clk.h
index 04ff7b672ad8..f96f8c7c53ee 100644
--- a/arch/arm/mach-tegra/include/mach/clk.h
+++ b/arch/arm/mach-tegra/include/mach/clk.h
@@ -26,5 +26,6 @@ void tegra_periph_reset_deassert(struct clk *c);
void tegra_periph_reset_assert(struct clk *c);
int tegra_dvfs_set_rate(struct clk *c, unsigned long rate);
+unsigned long clk_get_rate_all_locked(struct clk *c);
#endif
diff --git a/arch/arm/mach-tegra/include/mach/fb.h b/arch/arm/mach-tegra/include/mach/fb.h
index 0c9577208a2c..8130da0ed8e7 100644
--- a/arch/arm/mach-tegra/include/mach/fb.h
+++ b/arch/arm/mach-tegra/include/mach/fb.h
@@ -37,6 +37,8 @@ void tegra_fb_unregister(struct tegra_fb_info *fb_info);
void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info,
struct fb_monspecs *specs,
bool (*mode_filter)(struct fb_videomode *mode));
+/* called by display controller on suspend */
+void tegra_fb_suspend(struct tegra_fb_info *tegra_fb);
#else
static inline struct tegra_fb_info *tegra_fb_register(struct nvhost_device *ndev,
struct tegra_dc *dc,
@@ -50,9 +52,12 @@ static inline void tegra_fb_unregister(struct tegra_fb_info *fb_info)
{
}
-void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info,
- struct fb_monspecs *specs,
- bool (*mode_filter)(struct fb_videomode *mode))
+static inline void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info,
+ struct fb_monspecs *specs,
+ bool (*mode_filter)(struct fb_videomode *mode))
+{
+}
+static inline void tegra_fb_suspend(struct tegra_fb_info *tegra_fb)
{
}
#endif
diff --git a/arch/arm/mach-tegra/include/mach/nvmap.h b/arch/arm/mach-tegra/include/mach/nvmap.h
index 41f06f532c39..7422d1a44d8a 100644
--- a/arch/arm/mach-tegra/include/mach/nvmap.h
+++ b/arch/arm/mach-tegra/include/mach/nvmap.h
@@ -66,7 +66,8 @@ struct nvmap_pinarray_elem {
__u32 pin_offset;
};
-struct nvmap_client *nvmap_create_client(struct nvmap_device *dev);
+struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
+ const char *name);
struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
size_t align, unsigned int flags);
diff --git a/arch/arm/mach-tegra/include/mach/spdif.h b/arch/arm/mach-tegra/include/mach/spdif.h
new file mode 100644
index 000000000000..96103fae91b1
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/spdif.h
@@ -0,0 +1,392 @@
+/*
+ * arch/arm/mach-tegra/include/mach/spdif.h
+ *
+ *
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+
+#ifndef __ARCH_ARM_MACH_TEGRA_SPDIF_H
+#define __ARCH_ARM_MACH_TEGRA_SPDIF_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+
+/* Offsets from TEGRA_SPDIF_BASE */
+
+#define SPDIF_CTRL_0 0x0
+#define SPDIF_STATUS_0 0x4
+#define SPDIF_STROBE_CTRL_0 0x8
+#define SPDIF_DATA_FIFO_CSR_0 0x0C
+#define SPDIF_DATA_OUT_0 0x40
+#define SPDIF_DATA_IN_0 0x80
+#define SPDIF_CH_STA_RX_A_0 0x100
+#define SPDIF_CH_STA_RX_B_0 0x104
+#define SPDIF_CH_STA_RX_C_0 0x108
+#define SPDIF_CH_STA_RX_D_0 0x10C
+#define SPDIF_CH_STA_RX_E_0 0x110
+#define SPDIF_CH_STA_RX_F_0 0x114
+#define SPDIF_CH_STA_TX_A_0 0x140
+#define SPDIF_CH_STA_TX_B_0 0x144
+#define SPDIF_CH_STA_TX_C_0 0x148
+#define SPDIF_CH_STA_TX_D_0 0x14C
+#define SPDIF_CH_STA_TX_E_0 0x150
+#define SPDIF_CH_STA_TX_F_0 0x154
+#define SPDIF_USR_STA_RX_A_0 0x180
+#define SPDIF_USR_DAT_TX_A_0 0x1C0
+
+/*
+ * Register SPDIF_CTRL_0
+ */
+
+/*
+ * 1=start capturing from left channel,0=start
+ * capturing from right channel.
+ */
+#define SPDIF_CTRL_0_CAP_LC (1<<30)
+
+/* SPDIF receiver(RX): 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_RX_EN (1<<29)
+
+/* SPDIF Transmitter(TX): 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_TX_EN (1<<28)
+
+/* Transmit Channel status: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_TC_EN (1<<27)
+
+/* Transmit user Data: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_TU_EN (1<<26)
+
+/* Interrupt on transmit error: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_IE_TXE (1<<25)
+
+/* Interrupt on receive error: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_IE_RXE (1<<24)
+
+/* Interrupt on invalid preamble: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_IE_P (1<<23)
+
+/* Interrupt on "B" preamble: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_IE_B (1<<22)
+
+/*
+ * Interrupt when block of channel status received:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_IE_C (1<<21)
+
+/*
+ * Interrupt when a valid information unit (IU) recieve:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_IE_U (1<<20)
+
+/*
+ * Interrupt when RX user FIFO attn. level is reached:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_QE_RU (1<<19)
+
+/*
+ * Interrupt when TX user FIFO attn. level is reached:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_QE_TU (1<<18)
+
+/*
+ * Interrupt when RX data FIFO attn. level is reached:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_QE_RX (1<<17)
+
+/*
+ * Interrupt when TX data FIFO attn. level is reached:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_QE_TX (1<<16)
+
+/* Loopback test mode: 1=enable internal loopback, 0=Normal mode. */
+#define SPDIF_CTRL_0_LBK_EN (1<<15)
+
+/*
+ * Pack data mode:
+ * 1=Packeted left/right channel data into a single word,
+ * 0=Single data (16 bit needs to be padded to match the
+ * interface data bit size)
+ */
+#define SPDIF_CTRL_0_PACK (1<<14)
+
+/*
+ * 00=16bit data
+ * 01=20bit data
+ * 10=24bit data
+ * 11=raw data
+ */
+#define SPDIF_BIT_MODE_MODE16BIT (0)
+#define SPDIF_BIT_MODE_MODE20BIT (1)
+#define SPDIF_BIT_MODE_MODE24BIT (2)
+#define SPDIF_BIT_MODE_MODERAW (3)
+#define SPDIF_CTRL_0_BIT_MODE_SHIFT (12)
+
+#define SPDIF_CTRL_0_BIT_MODE_MASK \
+ ((0x3) << SPDIF_CTRL_0_BIT_MODE_SHIFT)
+#define SPDIF_CTRL_0_BIT_MODE_MODE16BIT \
+ (SPDIF_BIT_MODE_MODE16BIT << SPDIF_CTRL_0_BIT_MODE_SHIFT)
+#define SPDIF_CTRL_0_BIT_MODE_MODE20BIT \
+ (SPDIF_BIT_MODE_MODE20BIT << SPDIF_CTRL_0_BIT_MODE_SHIFT)
+#define SPDIF_CTRL_0_BIT_MODE_MODE24BIT \
+ (SPDIF_BIT_MODE_MODE24BIT << SPDIF_CTRL_0_BIT_MODE_SHIFT)
+#define SPDIF_CTRL_0_BIT_MODE_MODERAW \
+ (SPDIF_BIT_MODE_MODERAW << SPDIF_CTRL_0_BIT_MODE_SHIFT)
+
+
+/*
+ * SPDIF Status Register
+ * -------------------------
+ * Note: IS_P, IS_B, IS_C, and IS_U are sticky bits.
+ * Software must write a 1 to the corresponding bit location
+ * to clear the status.
+ */
+
+/* Register SPDIF_STATUS_0 */
+
+/*
+ * Receiver(RX) shifter is busy receiving data. 1=busy, 0=not busy.
+ * This bit is asserted when the receiver first locked onto the
+ * preamble of the data stream after RX_EN is asserted. This bit is
+ * deasserted when either,
+ * (a) the end of a frame is reached after RX_EN is deeasserted, or
+ * (b) the SPDIF data stream becomes inactive.
+ */
+#define SPDIF_STATUS_0_RX_BSY (1<<29)
+
+
+/*
+ * Transmitter(TX) shifter is busy transmitting data.
+ * 1=busy, 0=not busy.
+ * This bit is asserted when TX_EN is asserted.
+ * This bit is deasserted when the end of a frame is reached after
+ * TX_EN is deasserted.
+ */
+#define SPDIF_STATUS_0_TX_BSY (1<<28)
+
+/*
+ * TX is busy shifting out channel status. 1=busy, 0=not busy.
+ * This bit is asserted when both TX_EN and TC_EN are asserted and
+ * data from CH_STA_TX_A register is loaded into the internal shifter.
+ * This bit is deasserted when either,
+ * (a) the end of a frame is reached after TX_EN is deasserted, or
+ * (b) CH_STA_TX_F register is loaded into the internal shifter.
+ */
+#define SPDIF_STATUS_0_TC_BSY (1<<27)
+
+/*
+ * TX User data FIFO busy. 1=busy, 0=not busy.
+ * This bit is asserted when TX_EN and TXU_EN are asserted and
+ * there's data in the TX user FIFO. This bit is deassert when either,
+ * (a) the end of a frame is reached after TX_EN is deasserted, or
+ * (b) there's no data left in the TX user FIFO.
+ */
+#define SPDIF_STATUS_0_TU_BSY (1<<26)
+
+/* Tx FIFO Underrun error status: 1=error, 0=no error */
+#define SPDIF_STATUS_0_TX_ERR (1<<25)
+
+/* Rx FIFO Overrun error status: 1=error, 0=no error */
+#define SPDIF_STATUS_0_RX_ERR (1<<24)
+
+/* Preamble status: 1=bad/missing preamble, 0=Preamble ok */
+#define SPDIF_STATUS_0_IS_P (1<<23)
+
+/* B-preamble detection status: 0=not detected, 1=B-preamble detected */
+#define SPDIF_STATUS_0_IS_B (1<<22)
+
+/*
+ * RX channel block data receive status:
+ * 1=received entire block of channel status,
+ * 0=entire block not recieved yet.
+ */
+#define SPDIF_STATUS_0_IS_C (1<<21)
+
+/* RX User Data Valid flag: 1=valid IU detected, 0 = no IU detected. */
+#define SPDIF_STATUS_0_IS_U (1<<20)
+
+/*
+ * RX User FIFO Status:
+ * 1=attention level reached, 0=attention level not reached.
+ */
+#define SPDIF_STATUS_0_QS_RU (1<<19)
+
+/*
+ * TX User FIFO Status:
+ * 1=attention level reached, 0=attention level not reached.
+ */
+#define SPDIF_STATUS_0_QS_TU (1<<18)
+
+/*
+ * RX Data FIFO Status:
+ * 1=attention level reached, 0=attention level not reached.
+ */
+#define SPDIF_STATUS_0_QS_RX (1<<17)
+
+/*
+ * TX Data FIFO Status:
+ * 1=attention level reached, 0=attention level not reached.
+ */
+#define SPDIF_STATUS_0_QS_TX (1<<16)
+
+
+/* SPDIF FIFO Configuration and Status Register */
+
+/* Register SPDIF_DATA_FIFO_CSR_0 */
+
+#define SPDIF_FIFO_ATN_LVL_ONE_SLOT 0
+#define SPDIF_FIFO_ATN_LVL_FOUR_SLOTS 1
+#define SPDIF_FIFO_ATN_LVL_EIGHT_SLOTS 2
+#define SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS 3
+
+
+/* Clear Receiver User FIFO (RX USR.FIFO) */
+#define SPDIF_DATA_FIFO_CSR_0_RU_CLR (1<<31)
+
+/*
+ * RX USR.FIFO Attention Level:
+ * 00=1-slot-full, 01=2-slots-full, 10=3-slots-full, 11=4-slots-full.
+ */
+
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU1 (0)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU2 (1)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU3 (2)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU4 (3)
+
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIFT (29)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_MASK \
+ (0x3 << SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU1_WORD_FULL \
+ (SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU1 << \
+ SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIF)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU2_WORD_FULL \
+ (SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU2 << \
+ SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIF)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU3_WORD_FULL \
+ (SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU3 << \
+ SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIF)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU4_WORD_FULL \
+ (SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU4 << \
+ SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIF)
+
+/* Number of RX USR.FIFO levels with valid data. */
+#define SPDIF_DATA_FIFO_CSR_0_FULL_COUNT_SHIFT (24)
+#define SPDIF_DATA_FIFO_CSR_0_FULL_COUNT_MASK \
+ (0x1f << SPDIF_DATA_FIFO_CSR_0_FULL_COUNT_SHIFT)
+
+/* Clear Transmitter User FIFO (TX USR.FIFO) */
+#define SPDIF_DATA_FIFO_CSR_0_TU_CLR (1<<23)
+
+/*
+ * TxUSR.FIFO Attention Level:
+ * 11=4-slots-empty, 10=3-slots-empty, 01=2-slots-empty, 00=1-slot-empty.
+ */
+
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU1 (0)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU2 (1)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU3 (2)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU4 (3)
+
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT (21)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_MASK \
+ (0x3 << SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU1_WORD_EMPTY \
+ (SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU1 << \
+ SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU2_WORD_EMPTY \
+ (SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU2 << \
+ SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU3_WORD_EMPTY \
+ (SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU3 << \
+ SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU4_WORD_EMPTY \
+ (SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU4 << \
+ SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT)
+
+/* Number of Tx USR.FIFO levels that could be filled. */
+#define SPDIF_DATA_FIFO_CSR_0_TU_EMPTY_COUNT_SHIFT (16)
+#define SPDIF_DATA_FIFO_CSR_0_TU_EMPTY_COUNT_FIELD \
+ ((0x1f) << SPDIF_DATA_FIFO_CSR_0_TU_EMPTY_COUNT_SHIFT)
+
+/* Clear Receiver Data FIFO (RX DATA.FIFO). */
+#define SPDIF_DATA_FIFO_CSR_0_RX_CLR (1<<15)
+
+/*
+ * Rx FIFO Attention Level:
+ * 11=12-slots-full, 10=8-slots-full, 01=4-slots-full, 00=1-slot-full.
+ */
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT (13)
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_MASK \
+ (0x3 << SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_RX1_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_ONE_SLOT << \
+ SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_RX4_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_FOUR_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_RX8_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_EIGHT_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_RX12_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT)
+
+
+/* Number of RX DATA.FIFO levels with valid data */
+#define SPDIF_DATA_FIFO_CSR_0_RX_DATA_FIFO_FULL_COUNT_SHIFT (8)
+#define SPDIF_DATA_FIFO_CSR_0_RX_DATA_FIFO_FULL_COUNT_FIELD \
+ ((0x1f) << SPDIF_DATA_FIFO_CSR_0_RX_DATA_FIFO_FULL_COUNT_SHIFT)
+
+/* Clear Transmitter Data FIFO (TX DATA.FIFO) */
+#define SPDIF_DATA_FIFO_CSR_0_TX_CLR (1<<7)
+
+/*
+ * Tx FIFO Attention Level:
+ * 11=12-slots-empty, 10=8-slots-empty, 01=4-slots-empty, 00=1-slot-empty
+ */
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT (5)
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_MASK \
+ (0x3 << SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_TX1_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_ONE_SLOT << \
+ SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_TX4_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_FOUR_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_TX8_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_EIGHT_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_TX12_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT)
+
+
+/* Number of Tx DATA.FIFO levels that could be filled. */
+#define SPDIF_DATA_FIFO_CSR_0_TD_EMPTY_COUNT_SHIFT (0)
+#define SPDIF_DATA_FIFO_CSR_0_TD_EMPTY_COUNT_MASK \
+ ((0x1f) << SPDIF_DATA_FIFO_CSR_0_TD_EMPTY_COUNT_SHIFT)
+
+
+#endif /* __ARCH_ARM_MACH_TEGRA_SPDIF_H */
diff --git a/arch/arm/mach-tegra/suspend.c b/arch/arm/mach-tegra/suspend.c
index 0d9898de0c4b..f691e906eb0e 100644
--- a/arch/arm/mach-tegra/suspend.c
+++ b/arch/arm/mach-tegra/suspend.c
@@ -46,6 +46,7 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
+#include <mach/clk.h>
#include <mach/iomap.h>
#include <mach/iovmm.h>
#include <mach/irqs.h>
@@ -358,12 +359,13 @@ unsigned int tegra_suspend_lp2(unsigned int us)
writel(virt_to_phys(tegra_lp2_startup), evp_reset);
set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer,
- clk_get_rate(tegra_pclk));
+ clk_get_rate_all_locked(tegra_pclk));
if (us)
tegra_lp2_set_trigger(us);
suspend_cpu_complex();
+ stop_critical_timings();
flush_cache_all();
/* structure is written by reset code, so the L2 lines
* must be invalidated */
@@ -374,6 +376,7 @@ unsigned int tegra_suspend_lp2(unsigned int us)
/* return from __cortex_a9_restore */
barrier();
restore_cpu_complex();
+ start_critical_timings();
remain = tegra_lp2_timer_remain();
if (us)
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c
index 253de093a8ef..dec27b76eee2 100644
--- a/arch/arm/mach-tegra/tegra2_clocks.c
+++ b/arch/arm/mach-tegra/tegra2_clocks.c
@@ -23,7 +23,7 @@
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/hrtimer.h>
+#include <linux/clk.h>
#include <asm/clkdev.h>
@@ -337,12 +337,12 @@ static int tegra2_super_clk_set_parent(struct clk *c, struct clk *p)
val |= sel->value << shift;
if (c->refcnt)
- clk_enable_locked(p);
+ clk_enable(p);
clk_writel(val, c->reg);
if (c->refcnt && c->parent)
- clk_disable_locked(c->parent);
+ clk_disable(c->parent);
clk_reparent(c, p);
return 0;
@@ -351,11 +351,24 @@ static int tegra2_super_clk_set_parent(struct clk *c, struct clk *p)
return -EINVAL;
}
+/*
+ * Super clocks have "clock skippers" instead of dividers. Dividing using
+ * a clock skipper does not allow the voltage to be scaled down, so instead
+ * adjust the rate of the parent clock. This requires that the parent of a
+ * super clock have no other children, otherwise the rate will change
+ * underneath the other children.
+ */
+static int tegra2_super_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ return clk_set_rate(c->parent, rate);
+}
+
static struct clk_ops tegra_super_ops = {
.init = tegra2_super_clk_init,
.enable = tegra2_super_clk_enable,
.disable = tegra2_super_clk_disable,
.set_parent = tegra2_super_clk_set_parent,
+ .set_rate = tegra2_super_clk_set_rate,
};
/* virtual cpu clock functions */
@@ -389,31 +402,31 @@ static int tegra2_cpu_clk_set_rate(struct clk *c, unsigned long rate)
* Take an extra reference to the main pll so it doesn't turn
* off when we move the cpu off of it
*/
- clk_enable_locked(c->u.cpu.main);
+ clk_enable(c->u.cpu.main);
- ret = clk_set_parent_locked(c->parent, c->u.cpu.backup);
+ ret = clk_set_parent(c->parent, c->u.cpu.backup);
if (ret) {
pr_err("Failed to switch cpu to clock %s\n", c->u.cpu.backup->name);
goto out;
}
- if (rate == c->u.cpu.backup->rate)
+ if (rate == clk_get_rate(c->u.cpu.backup))
goto out;
- ret = clk_set_rate_locked(c->u.cpu.main, rate);
+ ret = clk_set_rate(c->u.cpu.main, rate);
if (ret) {
pr_err("Failed to change cpu pll to %lu\n", rate);
goto out;
}
- ret = clk_set_parent_locked(c->parent, c->u.cpu.main);
+ ret = clk_set_parent(c->parent, c->u.cpu.main);
if (ret) {
pr_err("Failed to switch cpu to clock %s\n", c->u.cpu.main->name);
goto out;
}
out:
- clk_disable_locked(c->u.cpu.main);
+ clk_disable(c->u.cpu.main);
return ret;
}
@@ -465,7 +478,7 @@ static void tegra2_bus_clk_disable(struct clk *c)
static int tegra2_bus_clk_set_rate(struct clk *c, unsigned long rate)
{
u32 val = clk_readl(c->reg);
- unsigned long parent_rate = c->parent->rate;
+ unsigned long parent_rate = clk_get_rate(c->parent);
int i;
for (i = 1; i <= 4; i++) {
if (rate == parent_rate / i) {
@@ -539,14 +552,15 @@ static void tegra2_blink_clk_disable(struct clk *c)
static int tegra2_blink_clk_set_rate(struct clk *c, unsigned long rate)
{
- if (rate >= c->parent->rate) {
+ unsigned long parent_rate = clk_get_rate(c->parent);
+ if (rate >= parent_rate) {
c->div = 1;
pmc_writel(0, c->reg);
} else {
unsigned int on_off;
u32 val;
- on_off = DIV_ROUND_UP(c->parent->rate / 8, rate);
+ on_off = DIV_ROUND_UP(parent_rate / 8, rate);
c->div = on_off * 8;
val = (on_off & PMC_BLINK_TIMER_DATA_ON_MASK) <<
@@ -632,7 +646,7 @@ static int tegra2_pll_clk_set_rate(struct clk *c, unsigned long rate)
pr_debug("%s: %s %lu\n", __func__, c->name, rate);
- input_rate = c->parent->rate;
+ input_rate = clk_get_rate(c->parent);
for (sel = c->u.pll.freq_table; sel->input_rate != 0; sel++) {
if (sel->input_rate == input_rate && sel->output_rate == rate) {
c->mul = sel->n;
@@ -772,9 +786,11 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate)
u32 val;
u32 new_val;
int divider_u71;
+ unsigned long parent_rate = clk_get_rate(c->parent);
+
pr_debug("%s: %s %lu\n", __func__, c->name, rate);
if (c->flags & DIV_U71) {
- divider_u71 = clk_div71_get_divider(c->parent->rate, rate);
+ divider_u71 = clk_div71_get_divider(parent_rate, rate);
if (divider_u71 >= 0) {
val = clk_readl(c->reg);
new_val = val >> c->reg_shift;
@@ -792,7 +808,7 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate)
return 0;
}
} else if (c->flags & DIV_2) {
- if (c->parent->rate == rate * 2)
+ if (parent_rate == rate * 2)
return 0;
}
return -EINVAL;
@@ -801,15 +817,16 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate)
static long tegra2_pll_div_clk_round_rate(struct clk *c, unsigned long rate)
{
int divider;
+ unsigned long parent_rate = clk_get_rate(c->parent);
pr_debug("%s: %s %lu\n", __func__, c->name, rate);
if (c->flags & DIV_U71) {
- divider = clk_div71_get_divider(c->parent->rate, rate);
+ divider = clk_div71_get_divider(parent_rate, rate);
if (divider < 0)
return divider;
- return c->parent->rate * 2 / (divider + 2);
+ return parent_rate * 2 / (divider + 2);
} else if (c->flags & DIV_2) {
- return c->parent->rate / 2;
+ return parent_rate / 2;
}
return -EINVAL;
}
@@ -923,12 +940,12 @@ static int tegra2_periph_clk_set_parent(struct clk *c, struct clk *p)
val |= (sel->value) << PERIPH_CLK_SOURCE_SHIFT;
if (c->refcnt)
- clk_enable_locked(p);
+ clk_enable(p);
clk_writel(val, c->reg);
if (c->refcnt && c->parent)
- clk_disable_locked(c->parent);
+ clk_disable(c->parent);
clk_reparent(c, p);
return 0;
@@ -942,9 +959,10 @@ static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate)
{
u32 val;
int divider;
- pr_debug("%s: %lu\n", __func__, rate);
+ unsigned long parent_rate = clk_get_rate(c->parent);
+
if (c->flags & DIV_U71) {
- divider = clk_div71_get_divider(c->parent->rate, rate);
+ divider = clk_div71_get_divider(parent_rate, rate);
if (divider >= 0) {
val = clk_readl(c->reg);
val &= ~PERIPH_CLK_SOURCE_DIVU71_MASK;
@@ -955,7 +973,7 @@ static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate)
return 0;
}
} else if (c->flags & DIV_U16) {
- divider = clk_div16_get_divider(c->parent->rate, rate);
+ divider = clk_div16_get_divider(parent_rate, rate);
if (divider >= 0) {
val = clk_readl(c->reg);
val &= ~PERIPH_CLK_SOURCE_DIVU16_MASK;
@@ -965,7 +983,7 @@ static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate)
c->mul = 1;
return 0;
}
- } else if (c->parent->rate <= rate) {
+ } else if (parent_rate <= rate) {
c->div = 1;
c->mul = 1;
return 0;
@@ -977,19 +995,20 @@ static long tegra2_periph_clk_round_rate(struct clk *c,
unsigned long rate)
{
int divider;
+ unsigned long parent_rate = clk_get_rate(c->parent);
pr_debug("%s: %s %lu\n", __func__, c->name, rate);
if (c->flags & DIV_U71) {
- divider = clk_div71_get_divider(c->parent->rate, rate);
+ divider = clk_div71_get_divider(parent_rate, rate);
if (divider < 0)
return divider;
- return c->parent->rate * 2 / (divider + 2);
+ return parent_rate * 2 / (divider + 2);
} else if (c->flags & DIV_U16) {
- divider = clk_div16_get_divider(c->parent->rate, rate);
+ divider = clk_div16_get_divider(parent_rate, rate);
if (divider < 0)
return divider;
- return c->parent->rate / (divider + 1);
+ return parent_rate / (divider + 1);
}
return -EINVAL;
}
@@ -1017,7 +1036,7 @@ static void tegra2_clk_double_init(struct clk *c)
static int tegra2_clk_double_set_rate(struct clk *c, unsigned long rate)
{
- if (rate != 2 * c->parent->rate)
+ if (rate != 2 * clk_get_rate(c->parent))
return -EINVAL;
c->mul = 2;
c->div = 1;
@@ -1068,12 +1087,12 @@ static int tegra2_audio_sync_clk_set_parent(struct clk *c, struct clk *p)
val |= sel->value;
if (c->refcnt)
- clk_enable_locked(p);
+ clk_enable(p);
clk_writel(val, c->reg);
if (c->refcnt && c->parent)
- clk_disable_locked(c->parent);
+ clk_disable(c->parent);
clk_reparent(c, p);
return 0;
@@ -1083,30 +1102,10 @@ static int tegra2_audio_sync_clk_set_parent(struct clk *c, struct clk *p)
return -EINVAL;
}
-static int tegra2_audio_sync_clk_set_rate(struct clk *c, unsigned long rate)
-{
- unsigned long parent_rate;
- if (!c->parent) {
- pr_err("%s: clock has no parent\n", __func__);
- return -EINVAL;
- }
- parent_rate = c->parent->rate;
- if (rate != parent_rate) {
- pr_err("%s: %s/%ld differs from parent %s/%ld\n",
- __func__,
- c->name, rate,
- c->parent->name, parent_rate);
- return -EINVAL;
- }
- c->rate = parent_rate;
- return 0;
-}
-
static struct clk_ops tegra_audio_sync_clk_ops = {
.init = tegra2_audio_sync_clk_init,
.enable = tegra2_audio_sync_clk_enable,
.disable = tegra2_audio_sync_clk_disable,
- .set_rate = tegra2_audio_sync_clk_set_rate,
.set_parent = tegra2_audio_sync_clk_set_parent,
};
@@ -1154,12 +1153,13 @@ static void tegra_clk_shared_bus_update(struct clk *bus)
struct clk *c;
unsigned long rate = bus->u.shared_bus.min_rate;
- list_for_each_entry(c, &bus->u.shared_bus.list, u.shared_bus_user.node)
+ list_for_each_entry(c, &bus->u.shared_bus.list,
+ u.shared_bus_user.node) {
if (c->u.shared_bus_user.enabled)
rate = max(c->u.shared_bus_user.rate, rate);
+ }
- if (rate != bus->rate)
- clk_set_rate_locked(bus, rate);
+ clk_set_rate(bus, rate);
};
static void tegra_clk_shared_bus_init(struct clk *c)
@@ -1895,9 +1895,9 @@ struct clk tegra_list_clks[] = {
PERIPH_CLK("sdmmc2", "sdhci-tegra.1", NULL, 9, 0x154, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
PERIPH_CLK("sdmmc3", "sdhci-tegra.2", NULL, 69, 0x1bc, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
PERIPH_CLK("sdmmc4", "sdhci-tegra.3", NULL, 15, 0x164, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
- PERIPH_CLK("vcp", "vcp", NULL, 29, 0, 250000000, mux_clk_m, 0),
- PERIPH_CLK("bsea", "bsea", NULL, 62, 0, 250000000, mux_clk_m, 0),
- PERIPH_CLK("vde", "vde", NULL, 61, 0x1c8, 250000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("vcp", "tegra-avp", "vcp", 29, 0, 250000000, mux_clk_m, 0),
+ PERIPH_CLK("bsea", "tegra-avp", "bsea", 62, 0, 250000000, mux_clk_m, 0),
+ PERIPH_CLK("vde", "tegra-avp", "vde", 61, 0x1c8, 250000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage and process_id */
PERIPH_CLK("csite", "csite", NULL, 73, 0x1d4, 144000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* max rate ??? */
/* FIXME: what is la? */
PERIPH_CLK("la", "la", NULL, 76, 0x1f8, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
diff --git a/arch/arm/mach-tegra/tegra2_dvfs.c b/arch/arm/mach-tegra/tegra2_dvfs.c
index 4f56fb88fc17..265a7b538f7f 100644
--- a/arch/arm/mach-tegra/tegra2_dvfs.c
+++ b/arch/arm/mach-tegra/tegra2_dvfs.c
@@ -18,6 +18,7 @@
*/
#include <linux/kernel.h>
+#include <linux/init.h>
#include <linux/string.h>
#include "clock.h"
@@ -39,29 +40,41 @@ static int cpu_core_millivolts[MAX_DVFS_FREQS];
#define KHZ 1000
#define MHZ 1000000
-#define CPU_DVFS(_clk_name, _process_id, _mult, _freqs...) \
- { \
- .clk_name = _clk_name, \
- .reg_id = CORE_REGULATOR, \
- .cpu = false, \
- .process_id = _process_id, \
- .freqs = {_freqs}, \
- .freqs_mult = _mult, \
- .auto_dvfs = true, \
- .higher = true, \
- .max_millivolts = CORE_MAX_MILLIVOLTS \
- }, \
- { \
- .clk_name = _clk_name, \
- .reg_id = CPU_REGULATOR, \
- .cpu = true, \
- .process_id = _process_id, \
- .freqs = {_freqs}, \
- .freqs_mult = _mult, \
- .auto_dvfs = true, \
- .max_millivolts = CPU_MAX_MILLIVOLTS \
- }
-
+#ifdef CONFIG_TEGRA_CPU_DVFS
+#define CPU_DVFS_CPU(_clk_name, _process_id, _mult, _freqs...) \
+ { \
+ .clk_name = _clk_name, \
+ .reg_id = CPU_REGULATOR, \
+ .cpu = true, \
+ .process_id = _process_id, \
+ .freqs = {_freqs}, \
+ .freqs_mult = _mult, \
+ .auto_dvfs = true, \
+ .max_millivolts = CPU_MAX_MILLIVOLTS \
+ },
+
+#ifdef CONFIG_TEGRA_CORE_DVFS /* CPU_DVFS && CORE_DVFS */
+#define CPU_DVFS_CORE(_clk_name, _process_id, _mult, _freqs...) \
+ { \
+ .clk_name = _clk_name, \
+ .reg_id = CORE_REGULATOR, \
+ .cpu = false, \
+ .process_id = _process_id, \
+ .freqs = {_freqs}, \
+ .freqs_mult = _mult, \
+ .auto_dvfs = true, \
+ .higher = true, \
+ .max_millivolts = CORE_MAX_MILLIVOLTS \
+ },
+#else /* CPU_DVFS && !CORE_DVFS */
+#define CPU_DVFS_CORE(_clk_name, _process_id, _mult, _freqs...)
+#endif
+#else /* !CPU_DVFS */
+#define CPU_DVFS_CPU(_clk_name, _process_id, _mult, _freqs...)
+#define CPU_DVFS_CORE(_clk_name, _process_id, _mult, _freqs...)
+#endif
+
+#ifdef CONFIG_TEGRA_CORE_DVFS
#define CORE_DVFS(_clk_name, _auto, _mult, _freqs...) \
{ \
.clk_name = _clk_name, \
@@ -71,30 +84,48 @@ static int cpu_core_millivolts[MAX_DVFS_FREQS];
.freqs_mult = _mult, \
.auto_dvfs = _auto, \
.max_millivolts = CORE_MAX_MILLIVOLTS \
- }
+ },
+#else
+#define CORE_DVFS(_clk_name, _process_id, _mult, _freqs...)
+#endif
+
+#define CPU_DVFS(_clk_name, _process_id, _mult, _freqs...) \
+ CPU_DVFS_CORE(_clk_name, _process_id, _mult, _freqs) \
+ CPU_DVFS_CPU(_clk_name, _process_id, _mult, _freqs) \
+
static struct dvfs dvfs_init[] = {
/* Cpu voltages (mV): 750, 775, 800, 825, 875, 900, 925, 975, 1000, 1050, 1100 */
- CPU_DVFS("cpu", 0, MHZ, 314, 314, 314, 456, 456, 608, 608, 760, 817, 912, 1000),
- CPU_DVFS("cpu", 1, MHZ, 314, 314, 314, 456, 456, 618, 618, 770, 827, 922, 1000),
- CPU_DVFS("cpu", 2, MHZ, 494, 675, 675, 675, 817, 817, 922, 1000),
- CPU_DVFS("cpu", 3, MHZ, 730, 760, 845, 845, 1000),
+ CPU_DVFS("cpu", 0, MHZ, 314, 314, 314, 456, 456, 608, 608, 760, 817, 912, 1000)
+ CPU_DVFS("cpu", 1, MHZ, 314, 314, 314, 456, 456, 618, 618, 770, 827, 922, 1000)
+ CPU_DVFS("cpu", 2, MHZ, 494, 675, 675, 675, 817, 817, 922, 1000)
+ CPU_DVFS("cpu", 3, MHZ, 730, 760, 845, 845, 1000)
/* Core voltages (mV): 950, 1000, 1100, 1200, 1275 */
- CORE_DVFS("sdmmc1", 1, KHZ, 44000, 52000, 52000, 52000, 52000),
- CORE_DVFS("sdmmc2", 1, KHZ, 44000, 52000, 52000, 52000, 52000),
- CORE_DVFS("sdmmc3", 1, KHZ, 44000, 52000, 52000, 52000, 52000),
- CORE_DVFS("sdmmc4", 1, KHZ, 44000, 52000, 52000, 52000, 52000),
- CORE_DVFS("ndflash", 1, KHZ, 130000, 150000, 158000, 164000, 164000),
- CORE_DVFS("nor", 1, KHZ, 0, 92000, 92000, 92000, 92000),
- CORE_DVFS("ide", 1, KHZ, 0, 0, 100000, 100000, 100000),
- CORE_DVFS("mipi", 1, KHZ, 0, 40000, 40000, 40000, 60000),
- CORE_DVFS("usbd", 1, KHZ, 0, 0, 480000, 480000, 480000),
- CORE_DVFS("usb2", 1, KHZ, 0, 0, 480000, 480000, 480000),
- CORE_DVFS("usb3", 1, KHZ, 0, 0, 480000, 480000, 480000),
- CORE_DVFS("pcie", 1, KHZ, 0, 0, 0, 250000, 250000),
- CORE_DVFS("dsi", 1, KHZ, 100000, 100000, 100000, 500000, 500000),
- CORE_DVFS("tvo", 1, KHZ, 0, 0, 0, 250000, 250000),
+
+#if 0
+ /*
+ * The sdhci core calls the clock ops with a spinlock held, which
+ * conflicts with the sleeping dvfs api.
+ * For now, boards must ensure that the core voltage does not drop
+ * below 1V, or that the sdmmc busses are set to 44 MHz or less.
+ */
+ CORE_DVFS("sdmmc1", 1, KHZ, 44000, 52000, 52000, 52000, 52000)
+ CORE_DVFS("sdmmc2", 1, KHZ, 44000, 52000, 52000, 52000, 52000)
+ CORE_DVFS("sdmmc3", 1, KHZ, 44000, 52000, 52000, 52000, 52000)
+ CORE_DVFS("sdmmc4", 1, KHZ, 44000, 52000, 52000, 52000, 52000)
+#endif
+
+ CORE_DVFS("ndflash", 1, KHZ, 130000, 150000, 158000, 164000, 164000)
+ CORE_DVFS("nor", 1, KHZ, 0, 92000, 92000, 92000, 92000)
+ CORE_DVFS("ide", 1, KHZ, 0, 0, 100000, 100000, 100000)
+ CORE_DVFS("mipi", 1, KHZ, 0, 40000, 40000, 40000, 60000)
+ CORE_DVFS("usbd", 1, KHZ, 0, 0, 480000, 480000, 480000)
+ CORE_DVFS("usb2", 1, KHZ, 0, 0, 480000, 480000, 480000)
+ CORE_DVFS("usb3", 1, KHZ, 0, 0, 480000, 480000, 480000)
+ CORE_DVFS("pcie", 1, KHZ, 0, 0, 0, 250000, 250000)
+ CORE_DVFS("dsi", 1, KHZ, 100000, 100000, 100000, 500000, 500000)
+ CORE_DVFS("tvo", 1, KHZ, 0, 0, 0, 250000, 250000)
/*
* The clock rate for the display controllers that determines the
@@ -102,27 +133,27 @@ static struct dvfs dvfs_init[] = {
* to the display block. Disable auto-dvfs on the display clocks,
* and let the display driver call tegra_dvfs_set_rate manually
*/
- CORE_DVFS("disp1", 0, KHZ, 158000, 158000, 190000, 190000, 190000),
- CORE_DVFS("disp2", 0, KHZ, 158000, 158000, 190000, 190000, 190000),
- CORE_DVFS("hdmi", 0, KHZ, 0, 0, 0, 148500, 148500),
+ CORE_DVFS("disp1", 0, KHZ, 158000, 158000, 190000, 190000, 190000)
+ CORE_DVFS("disp2", 0, KHZ, 158000, 158000, 190000, 190000, 190000)
+ CORE_DVFS("hdmi", 0, KHZ, 0, 0, 0, 148500, 148500)
/*
* These clocks technically depend on the core process id,
* but just use the worst case value for now
*/
- CORE_DVFS("host1x", 1, KHZ, 104500, 133000, 166000, 166000, 166000),
- CORE_DVFS("epp", 1, KHZ, 133000, 171000, 247000, 300000, 300000),
- CORE_DVFS("2d", 1, KHZ, 133000, 171000, 247000, 300000, 300000),
- CORE_DVFS("3d", 1, KHZ, 114000, 161500, 247000, 300000, 300000),
- CORE_DVFS("mpe", 1, KHZ, 104500, 152000, 228000, 250000, 250000),
- CORE_DVFS("vi", 1, KHZ, 85000, 100000, 150000, 150000, 150000),
- CORE_DVFS("sclk", 1, KHZ, 95000, 133000, 190000, 250000, 250000),
- CORE_DVFS("vde", 1, KHZ, 95000, 123500, 209000, 250000, 250000),
+ CORE_DVFS("host1x", 1, KHZ, 104500, 133000, 166000, 166000, 166000)
+ CORE_DVFS("epp", 1, KHZ, 133000, 171000, 247000, 300000, 300000)
+ CORE_DVFS("2d", 1, KHZ, 133000, 171000, 247000, 300000, 300000)
+ CORE_DVFS("3d", 1, KHZ, 114000, 161500, 247000, 300000, 300000)
+ CORE_DVFS("mpe", 1, KHZ, 104500, 152000, 228000, 250000, 250000)
+ CORE_DVFS("vi", 1, KHZ, 85000, 100000, 150000, 150000, 150000)
+ CORE_DVFS("sclk", 1, KHZ, 95000, 133000, 190000, 250000, 250000)
+ CORE_DVFS("vde", 1, KHZ, 95000, 123500, 209000, 250000, 250000)
/* What is this? */
- CORE_DVFS("NVRM_DEVID_CLK_SRC", 1, MHZ, 480, 600, 800, 1067, 1067),
+ CORE_DVFS("NVRM_DEVID_CLK_SRC", 1, MHZ, 480, 600, 800, 1067, 1067)
};
-void tegra2_init_dvfs(void)
+void __init tegra2_init_dvfs(void)
{
int i;
struct clk *c;
diff --git a/arch/arm/mach-tegra/tegra_i2s_audio.c b/arch/arm/mach-tegra/tegra_i2s_audio.c
index 277532c22700..f8a67f1e2043 100644
--- a/arch/arm/mach-tegra/tegra_i2s_audio.c
+++ b/arch/arm/mach-tegra/tegra_i2s_audio.c
@@ -45,6 +45,8 @@
#include <linux/delay.h>
#include <linux/tegra_audio.h>
#include <linux/pm.h>
+#include <linux/workqueue.h>
+
#include <mach/dma.h>
#include <mach/iomap.h>
#include <mach/i2s.h>
@@ -91,6 +93,7 @@ struct audio_stream {
struct tegra_dma_req dma_req;
struct pm_qos_request_list pm_qos;
+ struct work_struct allow_suspend_work;
struct wake_lock wake_lock;
char wake_lock_name[100];
};
@@ -221,17 +224,26 @@ static inline struct audio_driver_state *ads_from_in(
static inline void prevent_suspend(struct audio_stream *as)
{
pr_debug("%s\n", __func__);
+ cancel_work_sync(&as->allow_suspend_work);
wake_lock(&as->wake_lock);
pm_qos_update_request(&as->pm_qos, 0);
}
-static inline void allow_suspend(struct audio_stream *as)
+static void allow_suspend_worker(struct work_struct *w)
{
+ struct audio_stream *as = container_of(w,
+ struct audio_stream, allow_suspend_work);
+
pr_debug("%s\n", __func__);
pm_qos_update_request(&as->pm_qos, PM_QOS_DEFAULT_VALUE);
wake_unlock(&as->wake_lock);
}
+static inline void allow_suspend(struct audio_stream *as)
+{
+ schedule_work(&as->allow_suspend_work);
+}
+
#define I2S_I2S_FIFO_TX_BUSY I2S_I2S_STATUS_FIFO1_BSY
#define I2S_I2S_FIFO_TX_QS I2S_I2S_STATUS_QS_FIFO1
#define I2S_I2S_FIFO_TX_ERR I2S_I2S_STATUS_FIFO1_ERR
@@ -391,13 +403,15 @@ static int i2s_set_dsp_mode(unsigned long base, unsigned int mode)
if (mode != TEGRA_AUDIO_DSP_PCM) {
/* Disable PCM mode */
val = i2s_readl(base, I2S_I2S_PCM_CTRL_0);
- val &= ~(I2S_I2S_PCM_CTRL_TRM_MODE|I2S_I2S_PCM_CTRL_RCV_MODE);
+ val &= ~(I2S_I2S_PCM_CTRL_TRM_MODE |
+ I2S_I2S_PCM_CTRL_RCV_MODE);
i2s_writel(base, val, I2S_I2S_PCM_CTRL_0);
}
if (mode != TEGRA_AUDIO_DSP_NETWORK) {
/* Disable Network mode */
val = i2s_readl(base, I2S_I2S_NW_CTRL_0);
- val &= ~(I2S_I2S_NW_CTRL_TRM_TLPHY_MODE|I2S_I2S_NW_CTRL_RCV_TLPHY_MODE);
+ val &= ~(I2S_I2S_NW_CTRL_TRM_TLPHY_MODE |
+ I2S_I2S_NW_CTRL_RCV_TLPHY_MODE);
i2s_writel(base, val, I2S_I2S_NW_CTRL_0);
}
@@ -406,13 +420,15 @@ static int i2s_set_dsp_mode(unsigned long base, unsigned int mode)
case TEGRA_AUDIO_DSP_NETWORK:
/* Set DSP Network (Telephony) Mode */
val = i2s_readl(base, I2S_I2S_NW_CTRL_0);
- val |= I2S_I2S_NW_CTRL_TRM_TLPHY_MODE|I2S_I2S_NW_CTRL_RCV_TLPHY_MODE;
+ val |= I2S_I2S_NW_CTRL_TRM_TLPHY_MODE |
+ I2S_I2S_NW_CTRL_RCV_TLPHY_MODE;
i2s_writel(base, val, I2S_I2S_NW_CTRL_0);
break;
case TEGRA_AUDIO_DSP_PCM:
/* Set DSP PCM Mode */
val = i2s_readl(base, I2S_I2S_PCM_CTRL_0);
- val |= I2S_I2S_PCM_CTRL_TRM_MODE|I2S_I2S_PCM_CTRL_RCV_MODE;
+ val |= I2S_I2S_PCM_CTRL_TRM_MODE |
+ I2S_I2S_PCM_CTRL_RCV_MODE;
i2s_writel(base, val, I2S_I2S_PCM_CTRL_0);
break;
}
@@ -563,6 +579,58 @@ static inline u32 i2s_get_fifo_full_empty_count(unsigned long base, int fifo)
return val & I2S_I2S_FIFO_SCR_FIFO_FULL_EMPTY_COUNT_MASK;
}
+static int i2s_configure(struct platform_device *pdev)
+{
+ struct tegra_audio_platform_data *pdata = pdev->dev.platform_data;
+ struct audio_driver_state *state = pdata->driver_data;
+ bool master;
+ struct clk *i2s_clk;
+ int master_clk;
+
+ /* dev_info(&pdev->dev, "%s\n", __func__); */
+
+ if (!state)
+ return -ENOMEM;
+
+ /* disable interrupts from I2S */
+ i2s_enable_fifos(state->i2s_base, 0);
+ i2s_fifo_clear(state->i2s_base, I2S_FIFO_TX);
+ i2s_fifo_clear(state->i2s_base, I2S_FIFO_RX);
+ i2s_set_left_right_control_polarity(state->i2s_base, 0); /* default */
+
+ i2s_clk = clk_get(&pdev->dev, NULL);
+ if (!i2s_clk) {
+ dev_err(&pdev->dev, "%s: could not get i2s clock\n",
+ __func__);
+ return -EIO;
+ }
+
+ master = state->bit_format == TEGRA_AUDIO_BIT_FORMAT_DSP ?
+ state->pdata->dsp_master : state->pdata->i2s_master;
+
+
+ master_clk = state->bit_format == TEGRA_AUDIO_BIT_FORMAT_DSP ?
+ state->pdata->dsp_master_clk :
+ state->pdata->i2s_master_clk;
+#define I2S_CLK_FUDGE_FACTOR 2 /* Todo, fix this! */
+ if (master)
+ i2s_set_channel_bit_count(state->i2s_base, master_clk,
+ clk_get_rate(i2s_clk)*I2S_CLK_FUDGE_FACTOR);
+ i2s_set_master(state->i2s_base, master);
+
+ i2s_set_fifo_mode(state->i2s_base, I2S_FIFO_TX, 1);
+ i2s_set_fifo_mode(state->i2s_base, I2S_FIFO_RX, 0);
+
+ if (state->bit_format == TEGRA_AUDIO_BIT_FORMAT_DSP)
+ i2s_set_bit_format(state->i2s_base, I2S_BIT_FORMAT_DSP);
+ else
+ i2s_set_bit_format(state->i2s_base, state->pdata->mode);
+ i2s_set_bit_size(state->i2s_base, state->pdata->bit_size);
+ i2s_set_fifo_format(state->i2s_base, state->pdata->fifo_fmt);
+
+ return 0;
+}
+
static int init_stream_buffer(struct audio_stream *,
struct tegra_audio_buf_config *cfg, unsigned);
@@ -673,8 +741,10 @@ static bool wait_till_stopped(struct audio_stream *as)
{
int rc;
pr_debug("%s: wait for completion\n", __func__);
- rc = wait_for_completion_interruptible(
- &as->stop_completion);
+ rc = wait_for_completion_interruptible_timeout(
+ &as->stop_completion, HZ);
+ if (!rc)
+ pr_err("%s: wait timed out", __func__);
allow_suspend(as);
pr_debug("%s: done: %d\n", __func__, rc);
return true;
@@ -1492,6 +1562,7 @@ static long tegra_audio_ioctl(struct file *file,
goto done;
}
sound_ops->tear_down(ads);
+ i2s_configure(ads->pdev);
sound_ops->setup(ads);
}
@@ -1565,7 +1636,7 @@ static long tegra_audio_in_ioctl(struct file *file,
break;
}
#endif
- if(cfg.stereo && !ads->pdata->stereo_capture) {
+ if (cfg.stereo && !ads->pdata->stereo_capture) {
pr_err("%s: not capable of stereo capture.",
__func__);
rc = -EINVAL;
@@ -2348,14 +2419,14 @@ static int tegra_audio_probe(struct platform_device *pdev)
i2s_clk = clk_get(&pdev->dev, NULL);
if (!i2s_clk) {
- dev_err(&pdev->dev, "%s: could not get i2s1 clock\n",
+ dev_err(&pdev->dev, "%s: could not get i2s clock\n",
__func__);
return -EIO;
}
clk_set_rate(i2s_clk, state->pdata->i2s_clk_rate);
if (clk_enable(i2s_clk)) {
- dev_err(&pdev->dev, "%s: failed to enable i2s1 clock\n",
+ dev_err(&pdev->dev, "%s: failed to enable i2s clock\n",
__func__);
return -EIO;
}
@@ -2377,20 +2448,9 @@ static int tegra_audio_probe(struct platform_device *pdev)
}
clk_enable(audio_sync_clk);
- i2s_enable_fifos(state->i2s_base, 0);
- /* disable interrupts from I2S */
- i2s_fifo_clear(state->i2s_base, I2S_FIFO_TX);
- i2s_fifo_clear(state->i2s_base, I2S_FIFO_RX);
- i2s_set_left_right_control_polarity(state->i2s_base, 0); /* default */
- if (state->pdata->master)
- i2s_set_channel_bit_count(state->i2s_base, 44100,
- clk_get_rate(i2s_clk));
- i2s_set_master(state->i2s_base, state->pdata->master);
- i2s_set_fifo_mode(state->i2s_base, I2S_FIFO_TX, 1);
- i2s_set_fifo_mode(state->i2s_base, I2S_FIFO_RX, 0);
- i2s_set_bit_format(state->i2s_base, state->pdata->mode);
- i2s_set_bit_size(state->i2s_base, state->pdata->bit_size);
- i2s_set_fifo_format(state->i2s_base, state->pdata->fifo_fmt);
+ rc = i2s_configure(pdev);
+ if (rc < 0)
+ return rc;
if ((state->pdata->mask & TEGRA_AUDIO_ENABLE_TX)) {
state->out.opened = 0;
@@ -2412,6 +2472,7 @@ static int tegra_audio_probe(struct platform_device *pdev)
if (rc < 0)
return rc;
+ INIT_WORK(&state->out.allow_suspend_work, allow_suspend_worker);
pm_qos_add_request(&state->out.pm_qos, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
@@ -2455,6 +2516,7 @@ static int tegra_audio_probe(struct platform_device *pdev)
if (rc < 0)
return rc;
+ INIT_WORK(&state->in.allow_suspend_work, allow_suspend_worker);
pm_qos_add_request(&state->in.pm_qos, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
@@ -2534,37 +2596,7 @@ static int tegra_audio_suspend(struct platform_device *pdev, pm_message_t mesg)
static int tegra_audio_resume(struct platform_device *pdev)
{
- struct tegra_audio_platform_data *pdata = pdev->dev.platform_data;
- struct audio_driver_state *state = pdata->driver_data;
-
- /* dev_info(&pdev->dev, "%s\n", __func__); */
-
- if (!state)
- return -ENOMEM;
-
- /* disable interrupts from I2S */
- i2s_fifo_clear(state->i2s_base, I2S_FIFO_TX);
- i2s_fifo_clear(state->i2s_base, I2S_FIFO_RX);
- i2s_enable_fifos(state->i2s_base, 0);
-
- i2s_set_left_right_control_polarity(state->i2s_base, 0); /* default */
-
- if (state->pdata->master)
- i2s_set_channel_bit_count(state->i2s_base, 44100,
- state->pdata->i2s_clk_rate);
- i2s_set_master(state->i2s_base, state->pdata->master);
-
- i2s_set_fifo_mode(state->i2s_base, I2S_FIFO_TX, 1);
- i2s_set_fifo_mode(state->i2s_base, I2S_FIFO_RX, 0);
-
- if (state->bit_format == TEGRA_AUDIO_BIT_FORMAT_DSP)
- i2s_set_bit_format(state->i2s_base, I2S_BIT_FORMAT_DSP);
- else
- i2s_set_bit_format(state->i2s_base, state->pdata->mode);
- i2s_set_bit_size(state->i2s_base, state->pdata->bit_size);
- i2s_set_fifo_format(state->i2s_base, state->pdata->fifo_fmt);
-
- return 0;
+ return i2s_configure(pdev);
}
#endif /* CONFIG_PM */
diff --git a/arch/arm/mach-tegra/tegra_spdif_audio.c b/arch/arm/mach-tegra/tegra_spdif_audio.c
new file mode 100644
index 000000000000..fd96f2abf5eb
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra_spdif_audio.c
@@ -0,0 +1,1422 @@
+/*
+ * arch/arm/mach-tegra/tegra_spdif_audio.c
+ *
+ * S/PDIF audio driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/kfifo.h>
+#include <linux/debugfs.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/sysfs.h>
+#include <linux/pm_qos_params.h>
+#include <linux/wakelock.h>
+#include <linux/delay.h>
+#include <linux/tegra_audio.h>
+#include <linux/workqueue.h>
+
+#include <mach/dma.h>
+#include <mach/iomap.h>
+#include <mach/spdif.h>
+#include <mach/audio.h>
+#include <mach/irqs.h>
+
+#include "clock.h"
+
+#define PCM_BUFFER_MAX_SIZE_ORDER (PAGE_SHIFT + 2)
+#define PCM_BUFFER_DMA_CHUNK_SIZE_ORDER PAGE_SHIFT
+#define PCM_BUFFER_THRESHOLD_ORDER (PCM_BUFFER_MAX_SIZE_ORDER - 1)
+#define PCM_DMA_CHUNK_MIN_SIZE_ORDER 3
+
+#define PCM_IN_BUFFER_PADDING (1<<6) /* bytes */
+
+/* per stream (input/output) */
+struct audio_stream {
+ int opened;
+ struct mutex lock;
+
+ struct tegra_audio_buf_config buf_config;
+ bool active; /* is DMA or PIO in progress? */
+ void *buffer;
+ dma_addr_t buf_phys;
+ struct kfifo fifo;
+ struct completion fifo_completion;
+ struct scatterlist sg;
+
+ struct tegra_audio_error_counts errors;
+
+ int spdif_fifo_atn_level;
+
+ ktime_t last_dma_ts;
+ struct tegra_dma_channel *dma_chan;
+ bool stop;
+ struct completion stop_completion;
+ spinlock_t dma_req_lock; /* guards dma_has_it */
+ int dma_has_it;
+ struct tegra_dma_req dma_req;
+
+ struct pm_qos_request_list pm_qos;
+ struct work_struct allow_suspend_work;
+ struct wake_lock wake_lock;
+ char wake_lock_name[100];
+};
+
+struct spdif_pio_stats {
+ u32 spdif_interrupt_count;
+ u32 tx_fifo_errors;
+ u32 tx_fifo_written;
+};
+
+
+struct audio_driver_state {
+ struct list_head next;
+
+ struct platform_device *pdev;
+ struct tegra_audio_platform_data *pdata;
+ phys_addr_t spdif_phys;
+ unsigned long spdif_base;
+
+ bool using_dma;
+ unsigned long dma_req_sel;
+
+ int irq; /* for pio mode */
+ struct spdif_pio_stats pio_stats;
+ const int *in_divs;
+ int in_divs_len;
+
+ struct miscdevice misc_out;
+ struct miscdevice misc_out_ctl;
+ struct audio_stream out;
+};
+
+static inline int buf_size(struct audio_stream *s)
+{
+ return 1 << s->buf_config.size;
+}
+
+static inline int chunk_size(struct audio_stream *s)
+{
+ return 1 << s->buf_config.chunk;
+}
+
+static inline int threshold_size(struct audio_stream *s)
+{
+ return 1 << s->buf_config.threshold;
+}
+
+static inline struct audio_driver_state *ads_from_misc_out(struct file *file)
+{
+ struct miscdevice *m = file->private_data;
+ struct audio_driver_state *ads =
+ container_of(m, struct audio_driver_state, misc_out);
+ BUG_ON(!ads);
+ return ads;
+}
+
+static inline struct audio_driver_state *ads_from_misc_out_ctl(
+ struct file *file)
+{
+ struct miscdevice *m = file->private_data;
+ struct audio_driver_state *ads =
+ container_of(m, struct audio_driver_state,
+ misc_out_ctl);
+ BUG_ON(!ads);
+ return ads;
+}
+
+static inline struct audio_driver_state *ads_from_out(
+ struct audio_stream *aos)
+{
+ return container_of(aos, struct audio_driver_state, out);
+}
+
+static inline void prevent_suspend(struct audio_stream *as)
+{
+ pr_debug("%s\n", __func__);
+ cancel_work_sync(&as->allow_suspend_work);
+ wake_lock(&as->wake_lock);
+ pm_qos_update_request(&as->pm_qos, 0);
+}
+
+static void allow_suspend_worker(struct work_struct *w)
+{
+ struct audio_stream *as = container_of(w,
+ struct audio_stream, allow_suspend_work);
+
+ pr_debug("%s\n", __func__);
+ pm_qos_update_request(&as->pm_qos, PM_QOS_DEFAULT_VALUE);
+ wake_unlock(&as->wake_lock);
+}
+
+static inline void allow_suspend(struct audio_stream *as)
+{
+ schedule_work(&as->allow_suspend_work);
+}
+
+#define I2S_I2S_FIFO_TX_BUSY I2S_I2S_STATUS_FIFO1_BSY
+#define I2S_I2S_FIFO_TX_QS I2S_I2S_STATUS_QS_FIFO1
+#define I2S_I2S_FIFO_TX_ERR I2S_I2S_STATUS_FIFO1_ERR
+
+#define I2S_I2S_FIFO_RX_BUSY I2S_I2S_STATUS_FIFO2_BSY
+#define I2S_I2S_FIFO_RX_QS I2S_I2S_STATUS_QS_FIFO2
+#define I2S_I2S_FIFO_RX_ERR I2S_I2S_STATUS_FIFO2_ERR
+
+#define I2S_FIFO_ERR (I2S_I2S_STATUS_FIFO1_ERR | I2S_I2S_STATUS_FIFO2_ERR)
+
+
+static inline void spdif_writel(unsigned long base, u32 val, u32 reg)
+{
+ writel(val, base + reg);
+}
+
+static inline u32 spdif_readl(unsigned long base, u32 reg)
+{
+ return readl(base + reg);
+}
+
+static inline void spdif_fifo_write(unsigned long base, u32 data)
+{
+ spdif_writel(base, data, SPDIF_DATA_OUT_0);
+}
+
+static int spdif_fifo_set_attention_level(unsigned long base,
+ unsigned level)
+{
+ u32 val;
+
+ if (level > SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS) {
+ pr_err("%s: invalid fifo level selector %d\n", __func__,
+ level);
+ return -EINVAL;
+ }
+
+ val = spdif_readl(base, SPDIF_DATA_FIFO_CSR_0);
+
+ val &= ~SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_MASK;
+ val |= level << SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT;
+
+
+ spdif_writel(base, val, SPDIF_DATA_FIFO_CSR_0);
+ return 0;
+}
+
+static void spdif_fifo_enable(unsigned long base, int on)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ val &= ~(SPDIF_CTRL_0_TX_EN | SPDIF_CTRL_0_TC_EN);
+ val |= on ? (SPDIF_CTRL_0_TX_EN) : 0;
+ val |= on ? (SPDIF_CTRL_0_TC_EN) : 0;
+
+ spdif_writel(base, val, SPDIF_CTRL_0);
+}
+
+static bool spdif_is_fifo_enabled(unsigned long base)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ return !!(val & SPDIF_CTRL_0_TX_EN);
+}
+
+static void spdif_fifo_clear(unsigned long base)
+{
+ u32 val = spdif_readl(base, SPDIF_DATA_FIFO_CSR_0);
+ val &= ~(SPDIF_DATA_FIFO_CSR_0_TX_CLR | SPDIF_DATA_FIFO_CSR_0_TU_CLR);
+ val |= SPDIF_DATA_FIFO_CSR_0_TX_CLR | SPDIF_DATA_FIFO_CSR_0_TU_CLR;
+ spdif_writel(base, val, SPDIF_DATA_FIFO_CSR_0);
+}
+
+
+static int spdif_set_bit_mode(unsigned long base, unsigned mode)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ val &= ~SPDIF_CTRL_0_BIT_MODE_MASK;
+
+ if (mode > SPDIF_BIT_MODE_MODERAW) {
+ pr_err("%s: invalid bit_size selector %d\n", __func__,
+ mode);
+ return -EINVAL;
+ }
+
+ val |= mode << SPDIF_CTRL_0_BIT_MODE_SHIFT;
+
+ spdif_writel(base, val, SPDIF_CTRL_0);
+ return 0;
+}
+
+static int spdif_set_fifo_packed(unsigned long base, unsigned on)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ val &= ~SPDIF_CTRL_0_PACK;
+ val |= on ? SPDIF_CTRL_0_PACK : 0;
+ spdif_writel(base, val, SPDIF_CTRL_0);
+ return 0;
+}
+
+
+static void spdif_set_fifo_irq_on_err(unsigned long base, int on)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ val &= ~SPDIF_CTRL_0_IE_TXE;
+ val |= on ? SPDIF_CTRL_0_IE_TXE : 0;
+ spdif_writel(base, val, SPDIF_CTRL_0);
+}
+
+
+
+static void spdif_enable_fifos(unsigned long base, int on)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ if (on)
+ val |= SPDIF_CTRL_0_TX_EN | SPDIF_CTRL_0_TC_EN |
+ SPDIF_CTRL_0_IE_TXE;
+ else
+ val &= ~(SPDIF_CTRL_0_TX_EN | SPDIF_CTRL_0_TC_EN |
+ SPDIF_CTRL_0_IE_TXE);
+
+ spdif_writel(base, val, SPDIF_CTRL_0);
+}
+
+static inline u32 spdif_get_status(unsigned long base)
+{
+ return spdif_readl(base, SPDIF_STATUS_0);
+}
+
+static inline u32 spdif_get_control(unsigned long base)
+{
+ return spdif_readl(base, SPDIF_CTRL_0);
+}
+
+static inline void spdif_ack_status(unsigned long base)
+{
+ return spdif_writel(base, spdif_readl(base, SPDIF_STATUS_0),
+ SPDIF_STATUS_0);
+}
+
+static inline u32 spdif_get_fifo_scr(unsigned long base)
+{
+ return spdif_readl(base, SPDIF_DATA_FIFO_CSR_0);
+}
+
+static inline phys_addr_t spdif_get_fifo_phy_base(unsigned long phy_base)
+{
+ return phy_base + SPDIF_DATA_OUT_0;
+}
+
+static inline u32 spdif_get_fifo_full_empty_count(unsigned long base)
+{
+ u32 val = spdif_readl(base, SPDIF_DATA_FIFO_CSR_0);
+ val = val >> SPDIF_DATA_FIFO_CSR_0_TD_EMPTY_COUNT_SHIFT;
+ return val & SPDIF_DATA_FIFO_CSR_0_TD_EMPTY_COUNT_MASK;
+}
+
+
+static int spdif_set_sample_rate(struct audio_driver_state *state,
+ unsigned int sample_rate)
+{
+ unsigned int clock_freq = 0;
+ unsigned int parent_clock_freq = 0;
+ struct clk *spdif_clk;
+
+ unsigned int ch_sta[] = {
+ 0x0, /* 44.1, default values */
+ 0xf << 4, /* bits 36-39, original sample freq -- 44.1 */
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ };
+
+ switch (sample_rate) {
+ case 32000:
+ clock_freq = 4096000; /* 4.0960 MHz */
+ parent_clock_freq = 12288000;
+ ch_sta[0] = 0x3 << 24;
+ ch_sta[1] = 0xC << 4;
+ break;
+ case 44100:
+ clock_freq = 5644800; /* 5.6448 MHz */
+ parent_clock_freq = 11289600;
+ ch_sta[0] = 0x0;
+ ch_sta[1] = 0xF << 4;
+ break;
+ case 48000:
+ clock_freq = 6144000; /* 6.1440MHz */
+ parent_clock_freq = 12288000;
+ ch_sta[0] = 0x2 << 24;
+ ch_sta[1] = 0xD << 4;
+ break;
+ case 88200:
+ clock_freq = 11289600; /* 11.2896 MHz */
+ parent_clock_freq = 11289600;
+ break;
+ case 96000:
+ clock_freq = 12288000; /* 12.288 MHz */
+ parent_clock_freq = 12288000;
+ break;
+ case 176400:
+ clock_freq = 22579200; /* 22.5792 MHz */
+ parent_clock_freq = 11289600;
+ break;
+ case 192000:
+ clock_freq = 24576000; /* 24.5760 MHz */
+ parent_clock_freq = 12288000;
+ break;
+ default:
+ return -1;
+ }
+
+ spdif_clk = clk_get(&state->pdev->dev, NULL);
+ if (!spdif_clk) {
+ dev_err(&state->pdev->dev, "%s: could not get spdif clock\n",
+ __func__);
+ return -EIO;
+ }
+
+ clk_set_rate(spdif_clk, clock_freq);
+ if (clk_enable(spdif_clk)) {
+ dev_err(&state->pdev->dev,
+ "%s: failed to enable spdif_clk clock\n", __func__);
+ return -EIO;
+ }
+ pr_info("%s: spdif_clk rate %ld\n", __func__, clk_get_rate(spdif_clk));
+
+ spdif_writel(state->spdif_base, ch_sta[0], SPDIF_CH_STA_TX_A_0);
+ spdif_writel(state->spdif_base, ch_sta[1], SPDIF_CH_STA_TX_B_0);
+ spdif_writel(state->spdif_base, ch_sta[2], SPDIF_CH_STA_TX_C_0);
+ spdif_writel(state->spdif_base, ch_sta[3], SPDIF_CH_STA_TX_D_0);
+ spdif_writel(state->spdif_base, ch_sta[4], SPDIF_CH_STA_TX_E_0);
+ spdif_writel(state->spdif_base, ch_sta[5], SPDIF_CH_STA_TX_F_0);
+
+ return 0;
+}
+
+static int init_stream_buffer(struct audio_stream *,
+ struct tegra_audio_buf_config *cfg, unsigned);
+
+static int setup_dma(struct audio_driver_state *);
+static void tear_down_dma(struct audio_driver_state *);
+static int start_dma_playback(struct audio_stream *);
+static void stop_dma_playback(struct audio_stream *);
+
+static int setup_pio(struct audio_driver_state *);
+static void tear_down_pio(struct audio_driver_state *);
+static int start_pio_playback(struct audio_stream *);
+static void stop_pio_playback(struct audio_stream *);
+
+
+struct sound_ops {
+ int (*setup)(struct audio_driver_state *);
+ void (*tear_down)(struct audio_driver_state *);
+ int (*start_playback)(struct audio_stream *);
+ void (*stop_playback)(struct audio_stream *);
+};
+
+static const struct sound_ops dma_sound_ops = {
+ .setup = setup_dma,
+ .tear_down = tear_down_dma,
+ .start_playback = start_dma_playback,
+ .stop_playback = stop_dma_playback,
+};
+
+static const struct sound_ops pio_sound_ops = {
+ .setup = setup_pio,
+ .tear_down = tear_down_pio,
+ .start_playback = start_pio_playback,
+ .stop_playback = stop_pio_playback,
+};
+
+static const struct sound_ops *sound_ops = &dma_sound_ops;
+
+static int start_playback(struct audio_stream *aos)
+{
+ int rc;
+ unsigned long flags;
+ spin_lock_irqsave(&aos->dma_req_lock, flags);
+ pr_debug("%s: starting playback\n", __func__);
+ rc = sound_ops->start_playback(aos);
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+ if (!rc)
+ prevent_suspend(aos);
+ return rc;
+}
+
+
+static bool stop_playback_if_necessary(struct audio_stream *aos)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&aos->dma_req_lock, flags);
+ if (kfifo_is_empty(&aos->fifo)) {
+ sound_ops->stop_playback(aos);
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+ allow_suspend(aos);
+ return true;
+ }
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+
+ return false;
+}
+
+/* playback */
+static bool wait_till_stopped(struct audio_stream *as)
+{
+ int rc;
+ pr_debug("%s: wait for completion\n", __func__);
+ rc = wait_for_completion_interruptible_timeout(
+ &as->stop_completion, HZ);
+ if (!rc)
+ pr_err("%s: wait timed out\n", __func__);
+ allow_suspend(as);
+ pr_debug("%s: done: %d\n", __func__, rc);
+ return true;
+}
+
+/* Ask for playback to stop. The _nosync means that
+ * as->lock has to be locked by the caller.
+ */
+static void request_stop_nosync(struct audio_stream *as)
+{
+ pr_debug("%s\n", __func__);
+ if (!as->stop) {
+ as->stop = true;
+ wait_till_stopped(as);
+ if (!completion_done(&as->fifo_completion)) {
+ pr_debug("%s: complete\n", __func__);
+ complete(&as->fifo_completion);
+ }
+ }
+ kfifo_reset(&as->fifo);
+ as->active = false; /* applies to recording only */
+ pr_debug("%s: done\n", __func__);
+}
+
+static void toggle_dma(struct audio_driver_state *ads)
+{
+ pr_info("%s: %s\n", __func__, ads->using_dma ? "pio" : "dma");
+ sound_ops->tear_down(ads);
+ sound_ops = ads->using_dma ? &pio_sound_ops : &dma_sound_ops;
+ sound_ops->setup(ads);
+ ads->using_dma = !ads->using_dma;
+}
+
+/* DMA */
+
+static int resume_dma_playback(struct audio_stream *aos);
+
+static void setup_dma_tx_request(struct tegra_dma_req *req,
+ struct audio_stream *aos);
+
+static int setup_dma(struct audio_driver_state *ads)
+{
+ int rc;
+ pr_info("%s\n", __func__);
+
+ /* setup audio playback */
+ ads->out.buf_phys = dma_map_single(&ads->pdev->dev, ads->out.buffer,
+ 1 << PCM_BUFFER_MAX_SIZE_ORDER, DMA_TO_DEVICE);
+ BUG_ON(!ads->out.buf_phys);
+ setup_dma_tx_request(&ads->out.dma_req, &ads->out);
+ ads->out.dma_chan = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT);
+ if (!ads->out.dma_chan) {
+ pr_err("%s: could not allocate output SPDIF DMA channel: %ld\n",
+ __func__, PTR_ERR(ads->out.dma_chan));
+ rc = -ENODEV;
+ goto fail_tx;
+ }
+ return 0;
+
+
+fail_tx:
+ dma_unmap_single(&ads->pdev->dev, ads->out.buf_phys,
+ 1 << PCM_BUFFER_MAX_SIZE_ORDER, DMA_TO_DEVICE);
+ tegra_dma_free_channel(ads->out.dma_chan);
+ ads->out.dma_chan = 0;
+
+ return rc;
+}
+
+static void tear_down_dma(struct audio_driver_state *ads)
+{
+ pr_info("%s\n", __func__);
+
+ tegra_dma_free_channel(ads->out.dma_chan);
+ ads->out.dma_chan = NULL;
+ dma_unmap_single(&ads->pdev->dev, ads->out.buf_phys,
+ buf_size(&ads->out),
+ DMA_TO_DEVICE);
+ ads->out.buf_phys = 0;
+}
+
+static void dma_tx_complete_callback(struct tegra_dma_req *req)
+{
+ unsigned long flags;
+ struct audio_stream *aos = req->dev;
+ int count = req->bytes_transferred;
+ u64 delta_us;
+ u64 max_delay_us = count * 10000 / (4 * 441);
+
+ pr_debug("%s bytes transferred %d\n", __func__, count);
+
+ aos->dma_has_it = false;
+ delta_us = ktime_to_us(ktime_sub(ktime_get_real(), aos->last_dma_ts));
+
+ if (delta_us > max_delay_us) {
+ pr_debug("%s: too late by %lld us\n", __func__,
+ delta_us - max_delay_us);
+ aos->errors.late_dma++;
+ }
+
+ kfifo_dma_out_finish(&aos->fifo, count);
+ dma_unmap_sg(NULL, &aos->sg, 1, DMA_TO_DEVICE);
+
+ if (!completion_done(&aos->fifo_completion)) {
+ pr_debug("%s: complete (%d avail)\n", __func__,
+ kfifo_avail(&aos->fifo));
+ complete(&aos->fifo_completion);
+ }
+
+ if (stop_playback_if_necessary(aos)) {
+ pr_debug("%s: done (stopped)\n", __func__);
+ if (!completion_done(&aos->stop_completion)) {
+ pr_debug("%s: signalling stop completion\n", __func__);
+ complete(&aos->stop_completion);
+ }
+ return;
+ }
+
+ spin_lock_irqsave(&aos->dma_req_lock, flags);
+ resume_dma_playback(aos);
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+}
+
+static void setup_dma_tx_request(struct tegra_dma_req *req,
+ struct audio_stream *aos)
+{
+ struct audio_driver_state *ads = ads_from_out(aos);
+
+ memset(req, 0, sizeof(*req));
+
+ req->complete = dma_tx_complete_callback;
+ req->dev = aos;
+ req->to_memory = false;
+ req->dest_addr = spdif_get_fifo_phy_base(ads->spdif_phys);
+ req->dest_wrap = 4;
+ req->dest_bus_width = 16;
+ req->source_bus_width = 32;
+ req->source_wrap = 0;
+ req->req_sel = ads->dma_req_sel;
+}
+
+
+/* Called with aos->dma_req_lock taken. */
+static int resume_dma_playback(struct audio_stream *aos)
+{
+ int rc;
+ struct audio_driver_state *ads = ads_from_out(aos);
+ struct tegra_dma_req *req = &aos->dma_req;
+
+ if (aos->dma_has_it) {
+ pr_debug("%s: playback already in progress\n", __func__);
+ return -EALREADY;
+ }
+
+ rc = kfifo_dma_out_prepare(&aos->fifo, &aos->sg,
+ 1, kfifo_len(&aos->fifo));
+ /* stop_playback_if_necessary() already checks to see if the fifo is
+ * empty.
+ */
+ BUG_ON(!rc);
+ rc = dma_map_sg(NULL, &aos->sg, 1, DMA_TO_DEVICE);
+ if (rc < 0) {
+ pr_err("%s: could not map dma memory: %d\n", __func__, rc);
+ return rc;
+ }
+
+#if 0
+ spdif_fifo_clear(ads->spdif_base);
+#endif
+ spdif_fifo_set_attention_level(ads->spdif_base,
+ aos->spdif_fifo_atn_level);
+
+ req->source_addr = sg_dma_address(&aos->sg);
+ req->size = sg_dma_len(&aos->sg);
+ dma_sync_single_for_device(NULL,
+ req->source_addr, req->size, DMA_TO_DEVICE);
+
+ /* Don't send all the data yet. */
+ if (req->size > chunk_size(aos))
+ req->size = chunk_size(aos);
+ pr_debug("%s resume playback (%d in fifo, writing %d)\n",
+ __func__, kfifo_len(&aos->fifo), req->size);
+
+ spdif_fifo_enable(ads->spdif_base, 1);
+
+ aos->last_dma_ts = ktime_get_real();
+ rc = tegra_dma_enqueue_req(aos->dma_chan, req);
+ aos->dma_has_it = !rc;
+ if (!aos->dma_has_it)
+ pr_err("%s: could not enqueue TX DMA req\n", __func__);
+ return rc;
+}
+
+/* Called with aos->dma_req_lock taken. */
+static int start_dma_playback(struct audio_stream *aos)
+{
+ return resume_dma_playback(aos);
+}
+
+/* Called with aos->dma_req_lock taken. */
+static void stop_dma_playback(struct audio_stream *aos)
+{
+ int spin = 0;
+ struct audio_driver_state *ads = ads_from_out(aos);
+ pr_debug("%s\n", __func__);
+ spdif_fifo_enable(ads->spdif_base, 0);
+ while ((spdif_get_status(ads->spdif_base) & SPDIF_STATUS_0_TX_BSY) &&
+ spin < 100) {
+ udelay(10);
+ if (spin++ > 50)
+ pr_info("%s: spin %d\n", __func__, spin);
+ }
+ if (spin == 100)
+ pr_warn("%s: spinny\n", __func__);
+}
+
+/* PIO (non-DMA) */
+
+static int setup_pio(struct audio_driver_state *ads)
+{
+ pr_info("%s\n", __func__);
+ enable_irq(ads->irq);
+ return 0;
+}
+
+static void tear_down_pio(struct audio_driver_state *ads)
+{
+ pr_info("%s\n", __func__);
+ disable_irq(ads->irq);
+}
+
+static int start_pio_playback(struct audio_stream *aos)
+{
+ struct audio_driver_state *ads = ads_from_out(aos);
+
+ if (spdif_is_fifo_enabled(ads->spdif_base)) {
+ pr_debug("%s: playback is already in progress\n", __func__);
+ return -EALREADY;
+ }
+
+ pr_debug("%s\n", __func__);
+
+ spdif_fifo_set_attention_level(ads->spdif_base,
+ aos->spdif_fifo_atn_level);
+#if 0
+ spdif_fifo_clear(ads->spdif_base);
+#endif
+
+ spdif_set_fifo_irq_on_err(ads->spdif_base, 1);
+ spdif_fifo_enable(ads->spdif_base, 1);
+
+ return 0;
+}
+
+static void stop_pio_playback(struct audio_stream *aos)
+{
+ struct audio_driver_state *ads = ads_from_out(aos);
+
+ spdif_set_fifo_irq_on_err(ads->spdif_base, 0);
+ spdif_fifo_enable(ads->spdif_base, 0);
+ while (spdif_get_status(ads->spdif_base) & SPDIF_STATUS_0_TX_BSY)
+ /* spin */;
+
+ pr_info("%s: interrupts %d\n", __func__,
+ ads->pio_stats.spdif_interrupt_count);
+ pr_info("%s: sent %d\n", __func__,
+ ads->pio_stats.tx_fifo_written);
+ pr_info("%s: tx errors %d\n", __func__,
+ ads->pio_stats.tx_fifo_errors);
+
+ memset(&ads->pio_stats, 0, sizeof(ads->pio_stats));
+}
+
+
+static irqreturn_t spdif_interrupt(int irq, void *data)
+{
+ struct audio_driver_state *ads = data;
+ u32 status = spdif_get_status(ads->spdif_base);
+
+ pr_debug("%s: %08x\n", __func__, status);
+
+ ads->pio_stats.spdif_interrupt_count++;
+
+ if (status & SPDIF_CTRL_0_IE_TXE)
+ ads->pio_stats.tx_fifo_errors++;
+
+#if 0
+ if (status & SPDIF_STATUS_0_TX_ERR)
+#endif
+ spdif_ack_status(ads->spdif_base);
+
+ if (status & SPDIF_STATUS_0_QS_TX) {
+ int written;
+ int empty;
+ int len;
+ u16 fifo_buffer[32];
+
+ struct audio_stream *out = &ads->out;
+
+ if (!spdif_is_fifo_enabled(ads->spdif_base)) {
+ pr_debug("%s: tx fifo not enabled, skipping\n",
+ __func__);
+ goto done;
+ }
+
+ pr_debug("%s tx fifo is ready\n", __func__);
+
+ if (!completion_done(&out->fifo_completion)) {
+ pr_debug("%s: tx complete (%d avail)\n", __func__,
+ kfifo_avail(&out->fifo));
+ complete(&out->fifo_completion);
+ }
+
+ if (stop_playback_if_necessary(out)) {
+ pr_debug("%s: done (stopped)\n", __func__);
+ if (!completion_done(&out->stop_completion)) {
+ pr_debug("%s: signalling stop completion\n",
+ __func__);
+ complete(&out->stop_completion);
+ }
+ goto done;
+ }
+
+ empty = spdif_get_fifo_full_empty_count(ads->spdif_base);
+
+ len = kfifo_out(&out->fifo, fifo_buffer,
+ empty * sizeof(u16));
+ len /= sizeof(u16);
+
+ written = 0;
+ while (empty-- && written < len) {
+ ads->pio_stats.tx_fifo_written += written * sizeof(u16);
+ spdif_fifo_write(ads->spdif_base,
+ fifo_buffer[written++]);
+ }
+
+ /* TODO: Should we check to see if we wrote less than the
+ * FIFO threshold and adjust it if so?
+ */
+
+ if (written) {
+ /* start the transaction */
+ pr_debug("%s: enabling fifo (%d samples written)\n",
+ __func__, written);
+ spdif_fifo_enable(ads->spdif_base, 1);
+ }
+ }
+
+done:
+ pr_debug("%s: done %08x\n", __func__,
+ spdif_get_status(ads->spdif_base));
+ return IRQ_HANDLED;
+}
+
+static ssize_t tegra_spdif_write(struct file *file,
+ const char __user *buf, size_t size, loff_t *off)
+{
+ ssize_t rc = 0, total = 0;
+ unsigned nw = 0;
+
+ struct audio_driver_state *ads = ads_from_misc_out(file);
+
+ mutex_lock(&ads->out.lock);
+
+if (!IS_ALIGNED(size, 4)) {
+ pr_err("%s: user size request %d not aligned to 4\n",
+ __func__, size);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ pr_debug("%s: write %d bytes, %d available\n", __func__,
+ size, kfifo_avail(&ads->out.fifo));
+
+again:
+ if (ads->out.stop) {
+ pr_info("%s: playback has been cancelled (%d/%d bytes)\n",
+ __func__, total, size);
+ goto done;
+ }
+
+ rc = kfifo_from_user(&ads->out.fifo, buf + total, size - total, &nw);
+ if (rc < 0) {
+ pr_err("%s: error copying from user\n", __func__);
+ goto done;
+ }
+
+ rc = start_playback(&ads->out);
+ if (rc < 0 && rc != -EALREADY) {
+ pr_err("%s: could not start playback: %d\n", __func__, rc);
+ goto done;
+ }
+
+ total += nw;
+ if (total < size) {
+ pr_debug("%s: sleep (user %d total %d nw %d)\n", __func__,
+ size, total, nw);
+ mutex_unlock(&ads->out.lock);
+ rc = wait_for_completion_interruptible(
+ &ads->out.fifo_completion);
+ mutex_lock(&ads->out.lock);
+ if (rc == -ERESTARTSYS) {
+ pr_warn("%s: interrupted\n", __func__);
+ goto done;
+ }
+ pr_debug("%s: awake\n", __func__);
+ goto again;
+ }
+
+ rc = total;
+ *off += total;
+
+done:
+ mutex_unlock(&ads->out.lock);
+ return rc;
+}
+
+static long tegra_spdif_out_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+ struct audio_driver_state *ads = ads_from_misc_out_ctl(file);
+ struct audio_stream *aos = &ads->out;
+
+ mutex_lock(&aos->lock);
+
+ switch (cmd) {
+ case TEGRA_AUDIO_OUT_SET_BUF_CONFIG: {
+ struct tegra_audio_buf_config cfg;
+ if (copy_from_user(&cfg, (void __user *)arg, sizeof(cfg))) {
+ rc = -EFAULT;
+ break;
+ }
+ if (kfifo_len(&aos->fifo)) {
+ pr_err("%s: playback in progress\n", __func__);
+ rc = -EBUSY;
+ break;
+ }
+ rc = init_stream_buffer(aos, &cfg, 0);
+ if (rc < 0)
+ break;
+ aos->buf_config = cfg;
+ }
+ break;
+ case TEGRA_AUDIO_OUT_GET_BUF_CONFIG:
+ if (copy_to_user((void __user *)arg, &aos->buf_config,
+ sizeof(aos->buf_config)))
+ rc = -EFAULT;
+ break;
+ case TEGRA_AUDIO_OUT_GET_ERROR_COUNT:
+ if (copy_to_user((void __user *)arg, &aos->errors,
+ sizeof(aos->errors)))
+ rc = -EFAULT;
+ if (!rc)
+ memset(&aos->errors, 0, sizeof(aos->errors));
+ break;
+ case TEGRA_AUDIO_OUT_FLUSH:
+ if (kfifo_len(&aos->fifo)) {
+ pr_debug("%s: flushing\n", __func__);
+ request_stop_nosync(aos);
+ pr_debug("%s: flushed\n", __func__);
+ }
+ aos->stop = false;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ mutex_unlock(&aos->lock);
+ return rc;
+}
+
+static int tegra_spdif_out_open(struct inode *inode, struct file *file)
+{
+ int rc = 0;
+ struct audio_driver_state *ads = ads_from_misc_out(file);
+
+ pr_info("%s\n", __func__);
+
+ mutex_lock(&ads->out.lock);
+ if (!ads->out.opened++) {
+ pr_debug("%s: resetting fifo and error count\n", __func__);
+ ads->out.stop = false;
+ memset(&ads->out.errors, 0, sizeof(ads->out.errors));
+ kfifo_reset(&ads->out.fifo);
+
+ rc = spdif_set_sample_rate(ads, 44100);
+ }
+
+ mutex_unlock(&ads->out.lock);
+
+ return rc;
+}
+
+static int tegra_spdif_out_release(struct inode *inode, struct file *file)
+{
+ struct audio_driver_state *ads = ads_from_misc_out(file);
+ struct clk *spdif_clk;
+
+ pr_info("%s\n", __func__);
+
+ mutex_lock(&ads->out.lock);
+ if (ads->out.opened)
+ ads->out.opened--;
+ if (!ads->out.opened) {
+ stop_playback_if_necessary(&ads->out);
+
+ if (wake_lock_active(&ads->out.wake_lock))
+ pr_err("%s: wake lock is still held!\n", __func__);
+ if (kfifo_len(&ads->out.fifo))
+ pr_err("%s: output fifo is not empty (%d bytes left)\n",
+ __func__, kfifo_len(&ads->out.fifo));
+ allow_suspend(&ads->out);
+
+ spdif_clk = clk_get(&ads->pdev->dev, NULL);
+ if (!spdif_clk) {
+ dev_err(&ads->pdev->dev, "%s: could not get spdif "\
+ "clockk\n", __func__);
+ return -EIO;
+ }
+ clk_disable(spdif_clk);
+ }
+ mutex_unlock(&ads->out.lock);
+
+ return 0;
+}
+
+static const struct file_operations tegra_spdif_out_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_spdif_out_open,
+ .release = tegra_spdif_out_release,
+ .write = tegra_spdif_write,
+};
+
+static int tegra_spdif_ctl_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int tegra_spdif_ctl_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations tegra_spdif_out_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_spdif_ctl_open,
+ .release = tegra_spdif_ctl_release,
+ .unlocked_ioctl = tegra_spdif_out_ioctl,
+};
+
+static int init_stream_buffer(struct audio_stream *s,
+ struct tegra_audio_buf_config *cfg,
+ unsigned padding)
+{
+ pr_info("%s (size %d threshold %d chunk %d)\n", __func__,
+ cfg->size, cfg->threshold, cfg->chunk);
+
+ if (cfg->chunk < PCM_DMA_CHUNK_MIN_SIZE_ORDER) {
+ pr_err("%s: chunk %d too small (%d min)\n", __func__,
+ cfg->chunk, PCM_DMA_CHUNK_MIN_SIZE_ORDER);
+ return -EINVAL;
+ }
+
+ if (cfg->chunk > cfg->size) {
+ pr_err("%s: chunk %d > size %d\n", __func__,
+ cfg->chunk, cfg->size);
+ return -EINVAL;
+ }
+
+ if (cfg->threshold > cfg->size) {
+ pr_err("%s: threshold %d > size %d\n", __func__,
+ cfg->threshold, cfg->size);
+ return -EINVAL;
+ }
+
+ if ((1 << cfg->size) < padding) {
+ pr_err("%s: size %d < buffer padding %d (bytes)\n", __func__,
+ cfg->size, padding);
+ return -EINVAL;
+ }
+
+ if (cfg->size > PCM_BUFFER_MAX_SIZE_ORDER) {
+ pr_err("%s: size %d exceeds max %d\n", __func__,
+ cfg->size, PCM_BUFFER_MAX_SIZE_ORDER);
+ return -EINVAL;
+ }
+
+ if (!s->buffer) {
+ pr_debug("%s: allocating buffer (size %d, padding %d)\n",
+ __func__, 1 << cfg->size, padding);
+ s->buffer = kmalloc((1 << cfg->size) + padding,
+ GFP_KERNEL | GFP_DMA);
+ }
+ if (!s->buffer) {
+ pr_err("%s: could not allocate output buffer\n", __func__);
+ return -ENOMEM;
+ }
+
+ kfifo_init(&s->fifo, s->buffer, 1 << cfg->size);
+ sg_init_table(&s->sg, 1);
+ return 0;
+}
+
+
+static int setup_misc_device(struct miscdevice *misc,
+ const struct file_operations *fops,
+ const char *fmt, ...)
+{
+ int rc = 0;
+ va_list args;
+ const int sz = 64;
+
+ va_start(args, fmt);
+
+ memset(misc, 0, sizeof(*misc));
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = kmalloc(sz, GFP_KERNEL);
+ if (!misc->name) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ vsnprintf((char *)misc->name, sz, fmt, args);
+ misc->fops = fops;
+ if (misc_register(misc)) {
+ pr_err("%s: could not register %s\n", __func__, misc->name);
+ kfree(misc->name);
+ rc = -EIO;
+ goto done;
+ }
+
+done:
+ va_end(args);
+ return rc;
+}
+
+static ssize_t dma_toggle_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tegra_audio_platform_data *pdata = dev->platform_data;
+ struct audio_driver_state *ads = pdata->driver_data;
+ return sprintf(buf, "%s\n", ads->using_dma ? "dma" : "pio");
+}
+
+static ssize_t dma_toggle_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int use_dma;
+ struct tegra_audio_platform_data *pdata = dev->platform_data;
+ struct audio_driver_state *ads = pdata->driver_data;
+
+ if (count < 4)
+ return -EINVAL;
+
+ use_dma = 0;
+ if (!strncmp(buf, "dma", 3))
+ use_dma = 1;
+ else if (strncmp(buf, "pio", 3)) {
+ dev_err(dev, "%s: invalid string [%s]\n", __func__, buf);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ads->out.lock);
+ if (kfifo_len(&ads->out.fifo)) {
+ dev_err(dev, "%s: playback or recording in progress.\n",
+ __func__);
+ mutex_unlock(&ads->out.lock);
+ return -EBUSY;
+ }
+ if (!!use_dma ^ !!ads->using_dma)
+ toggle_dma(ads);
+ else
+ dev_info(dev, "%s: no change\n", __func__);
+ mutex_unlock(&ads->out.lock);
+
+ return count;
+}
+
+static DEVICE_ATTR(dma_toggle, 0644, dma_toggle_show, dma_toggle_store);
+
+static ssize_t __attr_fifo_atn_read(char *buf, int atn_lvl)
+{
+ switch (atn_lvl) {
+ case SPDIF_FIFO_ATN_LVL_ONE_SLOT:
+ strncpy(buf, "1\n", 2);
+ return 2;
+ case SPDIF_FIFO_ATN_LVL_FOUR_SLOTS:
+ strncpy(buf, "4\n", 2);
+ return 2;
+ case SPDIF_FIFO_ATN_LVL_EIGHT_SLOTS:
+ strncpy(buf, "8\n", 2);
+ return 2;
+ case SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS:
+ strncpy(buf, "12\n", 3);
+ return 3;
+ default:
+ BUG_ON(1);
+ return -EIO;
+ }
+}
+
+static ssize_t __attr_fifo_atn_write(struct audio_driver_state *ads,
+ struct audio_stream *as,
+ int *fifo_lvl,
+ const char *buf, size_t size)
+{
+ int lvl;
+
+ if (size > 3) {
+ pr_err("%s: buffer size %d too big\n", __func__, size);
+ return -EINVAL;
+ }
+
+ if (sscanf(buf, "%d", &lvl) != 1) {
+ pr_err("%s: invalid input string [%s]\n", __func__, buf);
+ return -EINVAL;
+ }
+
+ switch (lvl) {
+ case 1:
+ lvl = SPDIF_FIFO_ATN_LVL_ONE_SLOT;
+ break;
+ case 4:
+ lvl = SPDIF_FIFO_ATN_LVL_FOUR_SLOTS;
+ break;
+ case 8:
+ lvl = SPDIF_FIFO_ATN_LVL_EIGHT_SLOTS;
+ break;
+ case 12:
+ lvl = SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS;
+ break;
+ default:
+ pr_err("%s: invalid attention level %d\n", __func__, lvl);
+ return -EINVAL;
+ }
+
+ *fifo_lvl = lvl;
+ pr_info("%s: fifo level %d\n", __func__, *fifo_lvl);
+
+ return size;
+}
+
+static ssize_t tx_fifo_atn_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tegra_audio_platform_data *pdata = dev->platform_data;
+ struct audio_driver_state *ads = pdata->driver_data;
+ return __attr_fifo_atn_read(buf, ads->out.spdif_fifo_atn_level);
+}
+
+static ssize_t tx_fifo_atn_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ ssize_t rc;
+ struct tegra_audio_platform_data *pdata = dev->platform_data;
+ struct audio_driver_state *ads = pdata->driver_data;
+ mutex_lock(&ads->out.lock);
+ if (kfifo_len(&ads->out.fifo)) {
+ pr_err("%s: playback in progress.\n", __func__);
+ rc = -EBUSY;
+ goto done;
+ }
+ rc = __attr_fifo_atn_write(ads, &ads->out,
+ &ads->out.spdif_fifo_atn_level,
+ buf, count);
+done:
+ mutex_unlock(&ads->out.lock);
+ return rc;
+}
+
+static DEVICE_ATTR(tx_fifo_atn, 0644, tx_fifo_atn_show, tx_fifo_atn_store);
+
+
+static int tegra_spdif_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct resource *res;
+ struct audio_driver_state *state;
+
+ pr_info("%s\n", __func__);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->pdev = pdev;
+ state->pdata = pdev->dev.platform_data;
+ state->pdata->driver_data = state;
+ BUG_ON(!state->pdata);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mem resource!\n");
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "memory region already claimed!\n");
+ return -ENOMEM;
+ }
+
+ state->spdif_phys = res->start;
+ state->spdif_base = (unsigned long)ioremap(res->start,
+ res->end - res->start + 1);
+ if (!state->spdif_base) {
+ dev_err(&pdev->dev, "cannot remap iomem!\n");
+ return -EIO;
+ }
+
+ state->out.spdif_fifo_atn_level = SPDIF_FIFO_ATN_LVL_FOUR_SLOTS;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no dma resource!\n");
+ return -ENODEV;
+ }
+ state->dma_req_sel = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no irq resource!\n");
+ return -ENODEV;
+ }
+ state->irq = res->start;
+
+ memset(&state->pio_stats, 0, sizeof(state->pio_stats));
+
+ /* disable interrupts from SPDIF */
+ spdif_fifo_clear(state->spdif_base);
+ spdif_enable_fifos(state->spdif_base, 0);
+
+ spdif_set_bit_mode(state->spdif_base, state->pdata->mode);
+ spdif_set_fifo_packed(state->spdif_base, state->pdata->fifo_fmt);
+
+ state->out.opened = 0;
+ state->out.active = false;
+ mutex_init(&state->out.lock);
+ init_completion(&state->out.fifo_completion);
+ init_completion(&state->out.stop_completion);
+ spin_lock_init(&state->out.dma_req_lock);
+ state->out.buf_phys = 0;
+ state->out.dma_chan = NULL;
+ state->out.dma_has_it = false;
+
+ state->out.buffer = 0;
+ state->out.buf_config.size = PCM_BUFFER_MAX_SIZE_ORDER;
+ state->out.buf_config.threshold = PCM_BUFFER_THRESHOLD_ORDER;
+ state->out.buf_config.chunk = PCM_BUFFER_DMA_CHUNK_SIZE_ORDER;
+ rc = init_stream_buffer(&state->out, &state->out.buf_config, 0);
+ if (rc < 0)
+ return rc;
+
+ INIT_WORK(&state->out.allow_suspend_work, allow_suspend_worker);
+ pm_qos_add_request(&state->out.pm_qos, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+ snprintf(state->out.wake_lock_name, sizeof(state->out.wake_lock_name),
+ "tegra-audio-spdif");
+ wake_lock_init(&state->out.wake_lock, WAKE_LOCK_SUSPEND,
+ state->out.wake_lock_name);
+
+ if (request_irq(state->irq, spdif_interrupt,
+ IRQF_DISABLED, state->pdev->name, state) < 0) {
+ dev_err(&pdev->dev,
+ "%s: could not register handler for irq %d\n",
+ __func__, state->irq);
+ return -EIO;
+ }
+
+ rc = setup_misc_device(&state->misc_out,
+ &tegra_spdif_out_fops,
+ "spdif_out");
+ if (rc < 0)
+ return rc;
+
+ rc = setup_misc_device(&state->misc_out_ctl,
+ &tegra_spdif_out_ctl_fops,
+ "spdif_out_ctl");
+ if (rc < 0)
+ return rc;
+
+ state->using_dma = state->pdata->dma_on;
+ if (!state->using_dma)
+ sound_ops = &pio_sound_ops;
+ sound_ops->setup(state);
+
+ rc = device_create_file(&pdev->dev, &dev_attr_dma_toggle);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "%s: could not create sysfs entry %s: %d\n",
+ __func__, dev_attr_dma_toggle.attr.name, rc);
+ return rc;
+ }
+
+ rc = device_create_file(&pdev->dev, &dev_attr_tx_fifo_atn);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "%s: could not create sysfs entry %s: %d\n",
+ __func__, dev_attr_tx_fifo_atn.attr.name, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static struct platform_driver tegra_spdif_driver = {
+ .driver = {
+ .name = "spdif_out",
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra_spdif_probe,
+};
+
+static int __init tegra_spdif_init(void)
+{
+ return platform_driver_register(&tegra_spdif_driver);
+}
+
+module_init(tegra_spdif_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/tegra/Kconfig b/drivers/media/video/tegra/Kconfig
index e69de29bb2d1..ae77e8994dc8 100644
--- a/drivers/media/video/tegra/Kconfig
+++ b/drivers/media/video/tegra/Kconfig
@@ -0,0 +1,10 @@
+source "drivers/media/video/tegra/avp/Kconfig"
+
+config TEGRA_CAMERA
+ bool "Enable support for tegra camera/isp hardware"
+ depends on ARCH_TEGRA
+ default y
+ help
+ Enables support for the Tegra camera interface
+
+ If unsure, say Y
diff --git a/drivers/media/video/tegra/Makefile b/drivers/media/video/tegra/Makefile
index 1c096b91fbbb..ccb4972e8c8f 100644
--- a/drivers/media/video/tegra/Makefile
+++ b/drivers/media/video/tegra/Makefile
@@ -1,4 +1,5 @@
#
# Makefile for the video capture/playback device drivers.
#
-obj-y += tegra_camera.o
+obj-y += avp/
+obj-$(CONFIG_TEGRA_CAMERA) += tegra_camera.o
diff --git a/drivers/media/video/tegra/avp/Kconfig b/drivers/media/video/tegra/avp/Kconfig
new file mode 100644
index 000000000000..fdd208510fcb
--- /dev/null
+++ b/drivers/media/video/tegra/avp/Kconfig
@@ -0,0 +1,25 @@
+config TEGRA_RPC
+ bool "Enable support for Tegra RPC"
+ depends on ARCH_TEGRA
+ default y
+ help
+ Enables support for the RPC mechanism necessary for the Tegra
+ multimedia framework. It is both used to communicate locally on the
+ CPU between multiple multimedia components as well as to communicate
+ with the AVP for offloading media decode.
+
+ Exports the local tegra RPC interface on device node
+ /dev/tegra_rpc. Also provides tegra fd based semaphores needed by
+ the tegra multimedia framework.
+
+ If unsure, say Y
+
+config TEGRA_AVP
+ bool "Enable support for the AVP multimedia offload engine"
+ depends on ARCH_TEGRA && TEGRA_RPC
+ default y
+ help
+ Enables support for the multimedia offload engine used by Tegra
+ multimedia framework.
+
+ If unsure, say Y
diff --git a/drivers/media/video/tegra/avp/Makefile b/drivers/media/video/tegra/avp/Makefile
new file mode 100644
index 000000000000..6d8be11c3f81
--- /dev/null
+++ b/drivers/media/video/tegra/avp/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_TEGRA_RPC) += tegra_rpc.o
+obj-$(CONFIG_TEGRA_RPC) += trpc_local.o
+obj-$(CONFIG_TEGRA_RPC) += trpc_sema.o
+obj-$(CONFIG_TEGRA_AVP) += avp.o
+obj-$(CONFIG_TEGRA_AVP) += avp_svc.o
+obj-$(CONFIG_TEGRA_AVP) += headavp.o
diff --git a/drivers/media/video/tegra/avp/avp.c b/drivers/media/video/tegra/avp/avp.c
new file mode 100644
index 000000000000..de6034be327c
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp.c
@@ -0,0 +1,1683 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/irq.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+
+#include <mach/clk.h>
+#include <mach/io.h>
+#include <mach/iomap.h>
+#include <mach/nvmap.h>
+
+#include "../../../../video/tegra/nvmap/nvmap.h"
+
+#include "headavp.h"
+#include "avp_msg.h"
+#include "trpc.h"
+#include "avp.h"
+
+u32 avp_debug_mask = (AVP_DBG_TRACE_TRPC_CONN |
+ AVP_DBG_TRACE_XPC_CONN |
+ AVP_DBG_TRACE_LIB);
+module_param_named(debug_mask, avp_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+#define TEGRA_AVP_NAME "tegra-avp"
+
+#define TEGRA_AVP_KERNEL_FW "nvrm_avp.bin"
+
+#define TEGRA_AVP_RESET_VECTOR_ADDR \
+ (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x200)
+
+#define TEGRA_AVP_RESUME_ADDR IO_ADDRESS(TEGRA_IRAM_BASE)
+
+#define FLOW_CTRL_HALT_COP_EVENTS IO_ADDRESS(TEGRA_FLOW_CTRL_BASE + 0x4)
+#define FLOW_MODE_STOP (0x2 << 29)
+#define FLOW_MODE_NONE 0x0
+
+#define MBOX_FROM_AVP IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x10)
+#define MBOX_TO_AVP IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x20)
+
+/* Layout of the mailbox registers:
+ * bit 31 - pending message interrupt enable (mailbox full, i.e. valid=1)
+ * bit 30 - message cleared interrupt enable (mailbox empty, i.e. valid=0)
+ * bit 29 - message valid. peer clears this bit after reading msg
+ * bits 27:0 - message data
+ */
+#define MBOX_MSG_PENDING_INT_EN (1 << 31)
+#define MBOX_MSG_READ_INT_EN (1 << 30)
+#define MBOX_MSG_VALID (1 << 29)
+
+#define AVP_MSG_MAX_CMD_LEN 16
+#define AVP_MSG_AREA_SIZE (AVP_MSG_MAX_CMD_LEN + TEGRA_RPC_MAX_MSG_LEN)
+
+struct avp_info {
+ struct clk *cop_clk;
+
+ int mbox_from_avp_pend_irq;
+
+ dma_addr_t msg_area_addr;
+ u32 msg;
+ void *msg_to_avp;
+ void *msg_from_avp;
+ struct mutex to_avp_lock;
+ struct mutex from_avp_lock;
+
+ struct work_struct recv_work;
+ struct workqueue_struct *recv_wq;
+
+ struct trpc_node *rpc_node;
+ struct miscdevice misc_dev;
+ bool opened;
+ struct mutex open_lock;
+
+ spinlock_t state_lock;
+ bool initialized;
+ bool shutdown;
+ bool suspending;
+ bool defer_remote;
+
+ struct mutex libs_lock;
+ struct list_head libs;
+ struct nvmap_client *nvmap_libs;
+
+ /* client for driver allocations, persistent */
+ struct nvmap_client *nvmap_drv;
+ struct nvmap_handle_ref *kernel_handle;
+ void *kernel_data;
+ unsigned long kernel_phys;
+
+ struct nvmap_handle_ref *iram_backup_handle;
+ void *iram_backup_data;
+ unsigned long iram_backup_phys;
+ unsigned long resume_addr;
+
+ struct trpc_endpoint *avp_ep;
+ struct rb_root endpoints;
+
+ struct avp_svc_info *avp_svc;
+};
+
+struct remote_info {
+ u32 loc_id;
+ u32 rem_id;
+ struct kref ref;
+
+ struct trpc_endpoint *trpc_ep;
+ struct rb_node rb_node;
+};
+
+struct lib_item {
+ struct list_head list;
+ u32 handle;
+ char name[TEGRA_AVP_LIB_MAX_NAME];
+};
+
+static struct avp_info *tegra_avp;
+
+static int avp_trpc_send(struct trpc_endpoint *ep, void *buf, size_t len);
+static void avp_trpc_close(struct trpc_endpoint *ep);
+static void libs_cleanup(struct avp_info *avp);
+
+static struct trpc_ep_ops remote_ep_ops = {
+ .send = avp_trpc_send,
+ .close = avp_trpc_close,
+};
+
+static struct remote_info *rinfo_alloc(struct avp_info *avp)
+{
+ struct remote_info *rinfo;
+
+ rinfo = kzalloc(sizeof(struct remote_info), GFP_KERNEL);
+ if (!rinfo)
+ return NULL;
+ kref_init(&rinfo->ref);
+ return rinfo;
+}
+
+static void _rinfo_release(struct kref *ref)
+{
+ struct remote_info *rinfo = container_of(ref, struct remote_info, ref);
+ kfree(rinfo);
+}
+
+static inline void rinfo_get(struct remote_info *rinfo)
+{
+ kref_get(&rinfo->ref);
+}
+
+static inline void rinfo_put(struct remote_info *rinfo)
+{
+ kref_put(&rinfo->ref, _rinfo_release);
+}
+
+static int remote_insert(struct avp_info *avp, struct remote_info *rinfo)
+{
+ struct rb_node **p;
+ struct rb_node *parent;
+ struct remote_info *tmp;
+
+ p = &avp->endpoints.rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ tmp = rb_entry(parent, struct remote_info, rb_node);
+
+ if (rinfo->loc_id < tmp->loc_id)
+ p = &(*p)->rb_left;
+ else if (rinfo->loc_id > tmp->loc_id)
+ p = &(*p)->rb_right;
+ else {
+ pr_info("%s: avp endpoint id=%x (%s) already exists\n",
+ __func__, rinfo->loc_id,
+ trpc_name(rinfo->trpc_ep));
+ return -EEXIST;
+ }
+ }
+ rb_link_node(&rinfo->rb_node, parent, p);
+ rb_insert_color(&rinfo->rb_node, &avp->endpoints);
+ rinfo_get(rinfo);
+ return 0;
+}
+
+static struct remote_info *remote_find(struct avp_info *avp, u32 local_id)
+{
+ struct rb_node *n = avp->endpoints.rb_node;
+ struct remote_info *rinfo;
+
+ while (n) {
+ rinfo = rb_entry(n, struct remote_info, rb_node);
+
+ if (local_id < rinfo->loc_id)
+ n = n->rb_left;
+ else if (local_id > rinfo->loc_id)
+ n = n->rb_right;
+ else
+ return rinfo;
+ }
+ return NULL;
+}
+
+static void remote_remove(struct avp_info *avp, struct remote_info *rinfo)
+{
+ rb_erase(&rinfo->rb_node, &avp->endpoints);
+ rinfo_put(rinfo);
+}
+
+/* test whether or not the trpc endpoint provided is a valid AVP node
+ * endpoint */
+static struct remote_info *validate_trpc_ep(struct avp_info *avp,
+ struct trpc_endpoint *ep)
+{
+ struct remote_info *tmp = trpc_priv(ep);
+ struct remote_info *rinfo;
+
+ if (!tmp)
+ return NULL;
+ rinfo = remote_find(avp, tmp->loc_id);
+ if (rinfo && rinfo == tmp && rinfo->trpc_ep == ep)
+ return rinfo;
+ return NULL;
+}
+
+static inline void mbox_writel(u32 val, void __iomem *mbox)
+{
+ writel(val, mbox);
+}
+
+static inline u32 mbox_readl(void __iomem *mbox)
+{
+ return readl(mbox);
+}
+
+static inline void msg_ack_remote(struct avp_info *avp, u32 cmd, u32 arg)
+{
+ struct msg_ack *ack = avp->msg_from_avp;
+
+ /* must make sure the arg is there first */
+ ack->arg = arg;
+ wmb();
+ ack->cmd = cmd;
+ wmb();
+}
+
+static inline u32 msg_recv_get_cmd(struct avp_info *avp)
+{
+ volatile u32 *cmd = avp->msg_from_avp;
+ rmb();
+ return *cmd;
+}
+
+static inline int __msg_write(struct avp_info *avp, void *hdr, size_t hdr_len,
+ void *buf, size_t len)
+{
+ memcpy(avp->msg_to_avp, hdr, hdr_len);
+ if (buf && len)
+ memcpy(avp->msg_to_avp + hdr_len, buf, len);
+ mbox_writel(avp->msg, MBOX_TO_AVP);
+ return 0;
+}
+
+static inline int msg_write(struct avp_info *avp, void *hdr, size_t hdr_len,
+ void *buf, size_t len)
+{
+ /* rem_ack is a pointer into shared memory that the AVP modifies */
+ volatile u32 *rem_ack = avp->msg_to_avp;
+ unsigned long endtime = jiffies + HZ;
+
+ /* the other side ack's the message by clearing the first word,
+ * wait for it to do so */
+ rmb();
+ while (*rem_ack != 0 && time_before(jiffies, endtime)) {
+ usleep_range(100, 2000);
+ rmb();
+ }
+ if (*rem_ack != 0)
+ return -ETIMEDOUT;
+ __msg_write(avp, hdr, hdr_len, buf, len);
+ return 0;
+}
+
+static inline int msg_check_ack(struct avp_info *avp, u32 cmd, u32 *arg)
+{
+ struct msg_ack ack;
+
+ rmb();
+ memcpy(&ack, avp->msg_to_avp, sizeof(ack));
+ if (ack.cmd != cmd)
+ return -ENOENT;
+ if (arg)
+ *arg = ack.arg;
+ return 0;
+}
+
+/* XXX: add timeout */
+static int msg_wait_ack_locked(struct avp_info *avp, u32 cmd, u32 *arg)
+{
+ /* rem_ack is a pointer into shared memory that the AVP modifies */
+ volatile u32 *rem_ack = avp->msg_to_avp;
+ unsigned long endtime = jiffies + HZ / 5;
+ int ret;
+
+ do {
+ ret = msg_check_ack(avp, cmd, arg);
+ usleep_range(1000, 5000);
+ } while (ret && time_before(jiffies, endtime));
+
+ /* clear out the ack */
+ *rem_ack = 0;
+ wmb();
+ return ret;
+}
+
+static int avp_trpc_send(struct trpc_endpoint *ep, void *buf, size_t len)
+{
+ struct avp_info *avp = tegra_avp;
+ struct remote_info *rinfo;
+ struct msg_port_data msg;
+ int ret;
+ unsigned long flags;
+
+ DBG(AVP_DBG_TRACE_TRPC_MSG, "%s: ep=%p priv=%p buf=%p len=%d\n",
+ __func__, ep, trpc_priv(ep), buf, len);
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (unlikely(avp->suspending && trpc_peer(ep) != avp->avp_ep)) {
+ ret = -EBUSY;
+ goto err_state_locked;
+ } else if (avp->shutdown) {
+ ret = -ENODEV;
+ goto err_state_locked;
+ }
+ rinfo = validate_trpc_ep(avp, ep);
+ if (!rinfo) {
+ ret = -ENOTTY;
+ goto err_state_locked;
+ }
+ rinfo_get(rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ msg.cmd = CMD_MESSAGE;
+ msg.port_id = rinfo->rem_id;
+ msg.msg_len = len;
+
+ mutex_lock(&avp->to_avp_lock);
+ ret = msg_write(avp, &msg, sizeof(msg), buf, len);
+ mutex_unlock(&avp->to_avp_lock);
+
+ DBG(AVP_DBG_TRACE_TRPC_MSG, "%s: msg sent for %s (%x->%x) (%d)\n",
+ __func__, trpc_name(ep), rinfo->loc_id, rinfo->rem_id, ret);
+ rinfo_put(rinfo);
+ return ret;
+
+err_state_locked:
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ return ret;
+}
+
+static int _send_disconnect(struct avp_info *avp, u32 port_id)
+{
+ struct msg_disconnect msg;
+ int ret;
+
+ msg.cmd = CMD_DISCONNECT;
+ msg.port_id = port_id;
+
+ mutex_lock(&avp->to_avp_lock);
+ ret = msg_write(avp, &msg, sizeof(msg), NULL, 0);
+ mutex_unlock(&avp->to_avp_lock);
+
+ DBG(AVP_DBG_TRACE_XPC_CONN, "%s: sent disconnect msg for 0x%x\n",
+ __func__, port_id);
+ return ret;
+}
+
+/* Note: Assumes that the rinfo was previously successfully added to the
+ * endpoints rb_tree. The initial refcnt of 1 is inherited by the port when the
+ * trpc endpoint is created with thi trpc_xxx functions. Thus, on close,
+ * we must drop that reference here.
+ * The avp->endpoints rb_tree keeps its own reference on rinfo objects.
+ *
+ * The try_connect function does not use this on error because it needs to
+ * split the close of trpc_ep port and the put.
+ */
+static inline void remote_close(struct remote_info *rinfo)
+{
+ trpc_close(rinfo->trpc_ep);
+ rinfo_put(rinfo);
+}
+
+static void avp_trpc_close(struct trpc_endpoint *ep)
+{
+ struct avp_info *avp = tegra_avp;
+ struct remote_info *rinfo;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (avp->shutdown) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ return;
+ }
+
+ rinfo = validate_trpc_ep(avp, ep);
+ if (!rinfo) {
+ pr_err("%s: tried to close invalid port '%s' endpoint (%p)\n",
+ __func__, trpc_name(ep), ep);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ return;
+ }
+ rinfo_get(rinfo);
+ remote_remove(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: closing '%s' (%x)\n", __func__,
+ trpc_name(ep), rinfo->rem_id);
+
+ ret = _send_disconnect(avp, rinfo->rem_id);
+ if (ret)
+ pr_err("%s: error while closing remote port '%s' (%x)\n",
+ __func__, trpc_name(ep), rinfo->rem_id);
+ remote_close(rinfo);
+ rinfo_put(rinfo);
+}
+
+/* takes and holds avp->from_avp_lock */
+static void recv_msg_lock(struct avp_info *avp)
+{
+ unsigned long flags;
+
+ mutex_lock(&avp->from_avp_lock);
+ spin_lock_irqsave(&avp->state_lock, flags);
+ avp->defer_remote = true;
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+}
+
+/* MUST be called with avp->from_avp_lock held */
+static void recv_msg_unlock(struct avp_info *avp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ avp->defer_remote = false;
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ mutex_unlock(&avp->from_avp_lock);
+}
+
+static int avp_node_try_connect(struct trpc_node *node,
+ struct trpc_node *src_node,
+ struct trpc_endpoint *from)
+{
+ struct avp_info *avp = tegra_avp;
+ const char *port_name = trpc_name(from);
+ struct remote_info *rinfo;
+ struct msg_connect msg;
+ int ret;
+ unsigned long flags;
+ int len;
+
+ DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: trying connect from %s\n", __func__,
+ port_name);
+
+ if (node != avp->rpc_node || node->priv != avp)
+ return -ENODEV;
+
+ len = strlen(port_name);
+ if (len > XPC_PORT_NAME_LEN) {
+ pr_err("%s: port name (%s) to long\n", __func__, port_name);
+ return -EINVAL;
+ }
+
+ ret = 0;
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (avp->suspending) {
+ ret = -EBUSY;
+ } else if (likely(src_node != avp->rpc_node)) {
+ /* only check for initialized when the source is not ourselves
+ * since we'll end up calling into here during initialization */
+ if (!avp->initialized)
+ ret = -ENODEV;
+ } else if (strncmp(port_name, "RPC_AVP_PORT", XPC_PORT_NAME_LEN)) {
+ /* we only allow connections to ourselves for the cpu-to-avp
+ port */
+ ret = -EINVAL;
+ }
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ if (ret)
+ return ret;
+
+ rinfo = rinfo_alloc(avp);
+ if (!rinfo) {
+ pr_err("%s: cannot alloc mem for rinfo\n", __func__);
+ ret = -ENOMEM;
+ goto err_alloc_rinfo;
+ }
+ rinfo->loc_id = (u32)rinfo;
+
+ msg.cmd = CMD_CONNECT;
+ msg.port_id = rinfo->loc_id;
+ memcpy(msg.name, port_name, len);
+ memset(msg.name + len, 0, XPC_PORT_NAME_LEN - len);
+
+ /* when trying to connect to remote, we need to block remote
+ * messages until we get our ack and can insert it into our lists.
+ * Otherwise, we can get a message from the other side for a port
+ * that we haven't finished setting up.
+ *
+ * 'defer_remote' will force the irq handler to not process messages
+ * at irq context but to schedule work to do so. The work function will
+ * take the from_avp_lock and everything should stay consistent.
+ */
+ recv_msg_lock(avp);
+ mutex_lock(&avp->to_avp_lock);
+ ret = msg_write(avp, &msg, sizeof(msg), NULL, 0);
+ if (ret) {
+ pr_err("%s: remote has not acked last message (%s)\n", __func__,
+ port_name);
+ mutex_unlock(&avp->to_avp_lock);
+ goto err_msg_write;
+ }
+ ret = msg_wait_ack_locked(avp, CMD_RESPONSE, &rinfo->rem_id);
+ mutex_unlock(&avp->to_avp_lock);
+
+ if (ret) {
+ pr_err("%s: remote end won't respond for '%s'\n", __func__,
+ port_name);
+ goto err_wait_ack;
+ }
+ if (!rinfo->rem_id) {
+ pr_err("%s: can't connect to '%s'\n", __func__, port_name);
+ ret = -ECONNREFUSED;
+ goto err_nack;
+ }
+
+ DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: got conn ack '%s' (%x <-> %x)\n",
+ __func__, port_name, rinfo->loc_id, rinfo->rem_id);
+
+ rinfo->trpc_ep = trpc_create_peer(node, from, &remote_ep_ops,
+ rinfo);
+ if (!rinfo->trpc_ep) {
+ pr_err("%s: cannot create peer for %s\n", __func__, port_name);
+ ret = -EINVAL;
+ goto err_create_peer;
+ }
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ ret = remote_insert(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ if (ret)
+ goto err_ep_insert;
+
+ recv_msg_unlock(avp);
+ return 0;
+
+err_ep_insert:
+ trpc_close(rinfo->trpc_ep);
+err_create_peer:
+ _send_disconnect(avp, rinfo->rem_id);
+err_nack:
+err_wait_ack:
+err_msg_write:
+ recv_msg_unlock(avp);
+ rinfo_put(rinfo);
+err_alloc_rinfo:
+ return ret;
+}
+
+static void process_disconnect_locked(struct avp_info *avp,
+ struct msg_data *raw_msg)
+{
+ struct msg_disconnect *disconn_msg = (struct msg_disconnect *)raw_msg;
+ unsigned long flags;
+ struct remote_info *rinfo;
+
+ DBG(AVP_DBG_TRACE_XPC_CONN, "%s: got disconnect (%x)\n", __func__,
+ disconn_msg->port_id);
+
+ if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG)
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, disconn_msg,
+ sizeof(struct msg_disconnect));
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ rinfo = remote_find(avp, disconn_msg->port_id);
+ if (!rinfo) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ pr_warning("%s: got disconnect for unknown port 0x%x\n",
+ __func__, disconn_msg->port_id);
+ goto ack;
+ }
+ rinfo_get(rinfo);
+ remote_remove(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ remote_close(rinfo);
+ rinfo_put(rinfo);
+ack:
+ msg_ack_remote(avp, CMD_ACK, 0);
+}
+
+static void process_connect_locked(struct avp_info *avp,
+ struct msg_data *raw_msg)
+{
+ struct msg_connect *conn_msg = (struct msg_connect *)raw_msg;
+ struct trpc_endpoint *trpc_ep;
+ struct remote_info *rinfo;
+ char name[XPC_PORT_NAME_LEN + 1];
+ int ret;
+ u32 local_port_id = 0;
+ unsigned long flags;
+
+ DBG(AVP_DBG_TRACE_XPC_CONN, "%s: got connect (%x)\n", __func__,
+ conn_msg->port_id);
+ if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG)
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ conn_msg, sizeof(struct msg_connect));
+
+ rinfo = rinfo_alloc(avp);
+ if (!rinfo) {
+ pr_err("%s: cannot alloc mem for rinfo\n", __func__);
+ ret = -ENOMEM;
+ goto ack;
+ }
+ rinfo->loc_id = (u32)rinfo;
+ rinfo->rem_id = conn_msg->port_id;
+
+ memcpy(name, conn_msg->name, XPC_PORT_NAME_LEN);
+ name[XPC_PORT_NAME_LEN] = '\0';
+ trpc_ep = trpc_create_connect(avp->rpc_node, name, &remote_ep_ops,
+ rinfo, 0);
+ if (IS_ERR(trpc_ep)) {
+ pr_err("%s: remote requested unknown port '%s' (%d)\n",
+ __func__, name, (int)PTR_ERR(trpc_ep));
+ goto nack;
+ }
+ rinfo->trpc_ep = trpc_ep;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ ret = remote_insert(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ if (ret)
+ goto err_ep_insert;
+
+ local_port_id = rinfo->loc_id;
+ goto ack;
+
+err_ep_insert:
+ trpc_close(trpc_ep);
+nack:
+ rinfo_put(rinfo);
+ local_port_id = 0;
+ack:
+ msg_ack_remote(avp, CMD_RESPONSE, local_port_id);
+}
+
+static int process_message(struct avp_info *avp, struct msg_data *raw_msg,
+ gfp_t gfp_flags)
+{
+ struct msg_port_data *port_msg = (struct msg_port_data *)raw_msg;
+ struct remote_info *rinfo;
+ unsigned long flags;
+ int len;
+ int ret;
+
+ len = min(port_msg->msg_len, (u32)TEGRA_RPC_MAX_MSG_LEN);
+
+ if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG) {
+ pr_info("%s: got message cmd=%x port=%x len=%d\n", __func__,
+ port_msg->cmd, port_msg->port_id, port_msg->msg_len);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, port_msg,
+ sizeof(struct msg_port_data) + len);
+ }
+
+ if (len != port_msg->msg_len)
+ pr_err("%s: message sent is too long (%d bytes)\n", __func__,
+ port_msg->msg_len);
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ rinfo = remote_find(avp, port_msg->port_id);
+ if (rinfo) {
+ rinfo_get(rinfo);
+ trpc_get(rinfo->trpc_ep);
+ } else {
+ pr_err("%s: port %x not found\n", __func__, port_msg->port_id);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ ret = -ENOENT;
+ goto ack;
+ }
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ ret = trpc_send_msg(avp->rpc_node, rinfo->trpc_ep, port_msg->data,
+ len, gfp_flags);
+ if (ret == -ENOMEM) {
+ trpc_put(rinfo->trpc_ep);
+ rinfo_put(rinfo);
+ goto no_ack;
+ } else if (ret) {
+ pr_err("%s: cannot queue message for port %s/%x (%d)\n",
+ __func__, trpc_name(rinfo->trpc_ep), rinfo->loc_id,
+ ret);
+ } else {
+ DBG(AVP_DBG_TRACE_XPC_MSG, "%s: msg queued\n", __func__);
+ }
+
+ trpc_put(rinfo->trpc_ep);
+ rinfo_put(rinfo);
+ack:
+ msg_ack_remote(avp, CMD_ACK, 0);
+no_ack:
+ return ret;
+}
+
+static void process_avp_message(struct work_struct *work)
+{
+ struct avp_info *avp = container_of(work, struct avp_info, recv_work);
+ struct msg_data *msg = avp->msg_from_avp;
+
+ mutex_lock(&avp->from_avp_lock);
+ rmb();
+ switch (msg->cmd) {
+ case CMD_CONNECT:
+ process_connect_locked(avp, msg);
+ break;
+ case CMD_DISCONNECT:
+ process_disconnect_locked(avp, msg);
+ break;
+ case CMD_MESSAGE:
+ process_message(avp, msg, GFP_KERNEL);
+ break;
+ default:
+ pr_err("%s: unknown cmd (%x) received\n", __func__, msg->cmd);
+ break;
+ }
+ mutex_unlock(&avp->from_avp_lock);
+}
+
+static irqreturn_t avp_mbox_pending_isr(int irq, void *data)
+{
+ struct avp_info *avp = data;
+ struct msg_data *msg = avp->msg_from_avp;
+ u32 mbox_msg;
+ unsigned long flags;
+ int ret;
+
+ mbox_msg = mbox_readl(MBOX_FROM_AVP);
+ mbox_writel(0, MBOX_FROM_AVP);
+
+ DBG(AVP_DBG_TRACE_XPC_IRQ, "%s: got msg %x\n", __func__, mbox_msg);
+
+ /* XXX: re-use previous message? */
+ if (!(mbox_msg & MBOX_MSG_VALID)) {
+ WARN_ON(1);
+ goto done;
+ }
+
+ mbox_msg <<= 4;
+ if (mbox_msg == 0x2f00bad0UL) {
+ pr_info("%s: petting watchdog\n", __func__);
+ goto done;
+ }
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (avp->shutdown) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ goto done;
+ } else if (avp->defer_remote) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ goto defer;
+ }
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ rmb();
+ if (msg->cmd == CMD_MESSAGE) {
+ ret = process_message(avp, msg, GFP_ATOMIC);
+ if (ret != -ENOMEM)
+ goto done;
+ pr_info("%s: deferring message (%d)\n", __func__, ret);
+ }
+defer:
+ queue_work(avp->recv_wq, &avp->recv_work);
+done:
+ return IRQ_HANDLED;
+}
+
+static int avp_reset(struct avp_info *avp, unsigned long reset_addr)
+{
+ unsigned long stub_code_phys = virt_to_phys(_tegra_avp_boot_stub);
+ dma_addr_t stub_data_phys;
+ unsigned long timeout;
+ int ret = 0;
+
+ writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
+
+ _tegra_avp_boot_stub_data.map_phys_addr = avp->kernel_phys;
+ _tegra_avp_boot_stub_data.jump_addr = reset_addr;
+ wmb();
+ stub_data_phys = dma_map_single(NULL, &_tegra_avp_boot_stub_data,
+ sizeof(_tegra_avp_boot_stub_data),
+ DMA_TO_DEVICE);
+
+ writel(stub_code_phys, TEGRA_AVP_RESET_VECTOR_ADDR);
+
+ tegra_periph_reset_assert(avp->cop_clk);
+ udelay(10);
+ tegra_periph_reset_deassert(avp->cop_clk);
+
+ writel(FLOW_MODE_NONE, FLOW_CTRL_HALT_COP_EVENTS);
+
+ /* the AVP firmware will reprogram its reset vector as the kernel
+ * starts, so a dead kernel can be detected by polling this value */
+ timeout = jiffies + msecs_to_jiffies(2000);
+ while (time_before(jiffies, timeout)) {
+ if (readl(TEGRA_AVP_RESET_VECTOR_ADDR) != stub_code_phys)
+ break;
+ cpu_relax();
+ }
+ if (readl(TEGRA_AVP_RESET_VECTOR_ADDR) == stub_code_phys)
+ ret = -EINVAL;
+ WARN_ON(ret);
+ dma_unmap_single(NULL, stub_data_phys,
+ sizeof(_tegra_avp_boot_stub_data),
+ DMA_TO_DEVICE);
+ return ret;
+}
+
+static void avp_halt(struct avp_info *avp)
+{
+ /* ensure the AVP is halted */
+ writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
+ tegra_periph_reset_assert(avp->cop_clk);
+
+ /* set up the initial memory areas and mailbox contents */
+ *((u32 *)avp->msg_from_avp) = 0;
+ *((u32 *)avp->msg_to_avp) = 0xfeedf00d;
+ mbox_writel(0, MBOX_FROM_AVP);
+ mbox_writel(0, MBOX_TO_AVP);
+}
+
+/* Note: CPU_PORT server and AVP_PORT client are registered with the avp
+ * node, but are actually meant to be processed on our side (either
+ * by the svc thread for processing remote calls or by the client
+ * of the char dev for receiving replies for managing remote
+ * libraries/modules. */
+
+static int avp_init(struct avp_info *avp, const char *fw_file)
+{
+ const struct firmware *avp_fw;
+ int ret;
+ struct trpc_endpoint *ep;
+
+ avp->nvmap_libs = nvmap_create_client(nvmap_dev, "avp_libs");
+ if (IS_ERR(avp->nvmap_libs)) {
+ pr_err("%s: cannot create libs nvmap client\n", __func__);
+ ret = PTR_ERR(avp->nvmap_libs);
+ goto err_nvmap_create_libs_client;
+ }
+
+ /* put the address of the shared mem area into the mailbox for AVP
+ * to read out when its kernel boots. */
+ mbox_writel(avp->msg, MBOX_TO_AVP);
+
+ ret = request_firmware(&avp_fw, fw_file, avp->misc_dev.this_device);
+ if (ret) {
+ pr_err("%s: Cannot read firmware '%s'\n", __func__, fw_file);
+ goto err_req_fw;
+ }
+ pr_info("%s: read firmware from '%s' (%d bytes)\n", __func__,
+ fw_file, avp_fw->size);
+ memcpy(avp->kernel_data, avp_fw->data, avp_fw->size);
+ memset(avp->kernel_data + avp_fw->size, 0, SZ_1M - avp_fw->size);
+ wmb();
+ release_firmware(avp_fw);
+
+ ret = avp_reset(avp, AVP_KERNEL_VIRT_BASE);
+ if (ret) {
+ pr_err("%s: cannot reset the AVP.. aborting..\n", __func__);
+ goto err_reset;
+ }
+
+ enable_irq(avp->mbox_from_avp_pend_irq);
+ /* Initialize the avp_svc *first*. This creates RPC_CPU_PORT to be
+ * ready for remote commands. Then, connect to the
+ * remote RPC_AVP_PORT to be able to send library load/unload and
+ * suspend commands to it */
+ ret = avp_svc_start(avp->avp_svc);
+ if (ret)
+ goto err_avp_svc_start;
+
+ ep = trpc_create_connect(avp->rpc_node, "RPC_AVP_PORT", NULL,
+ NULL, -1);
+ if (IS_ERR(ep)) {
+ pr_err("%s: can't connect to RPC_AVP_PORT server\n", __func__);
+ ret = PTR_ERR(ep);
+ goto err_rpc_avp_port;
+ }
+ avp->avp_ep = ep;
+
+ avp->initialized = true;
+ smp_wmb();
+ pr_info("%s: avp init done\n", __func__);
+ return 0;
+
+err_rpc_avp_port:
+ avp_svc_stop(avp->avp_svc);
+err_avp_svc_start:
+ disable_irq(avp->mbox_from_avp_pend_irq);
+err_reset:
+ avp_halt(avp);
+err_req_fw:
+ nvmap_client_put(avp->nvmap_libs);
+err_nvmap_create_libs_client:
+ avp->nvmap_libs = NULL;
+ return ret;
+}
+
+static void avp_uninit(struct avp_info *avp)
+{
+ unsigned long flags;
+ struct rb_node *n;
+ struct remote_info *rinfo;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ avp->initialized = false;
+ avp->shutdown = true;
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ disable_irq(avp->mbox_from_avp_pend_irq);
+ cancel_work_sync(&avp->recv_work);
+
+ avp_halt(avp);
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ while ((n = rb_first(&avp->endpoints)) != NULL) {
+ rinfo = rb_entry(n, struct remote_info, rb_node);
+ rinfo_get(rinfo);
+ remote_remove(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ remote_close(rinfo);
+ rinfo_put(rinfo);
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ }
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ avp_svc_stop(avp->avp_svc);
+
+ if (avp->avp_ep) {
+ trpc_close(avp->avp_ep);
+ avp->avp_ep = NULL;
+ }
+
+ libs_cleanup(avp);
+
+ avp->shutdown = false;
+ smp_wmb();
+}
+
+/* returns the remote lib handle in lib->handle */
+static int _load_lib(struct avp_info *avp, struct tegra_avp_lib *lib)
+{
+ struct svc_lib_attach svc;
+ struct svc_lib_attach_resp resp;
+ const struct firmware *fw;
+ void *args;
+ struct nvmap_handle_ref *lib_handle;
+ void *lib_data;
+ unsigned long lib_phys;
+ int ret;
+
+ pr_info("avp_lib: loading library %s\n", lib->name);
+
+ args = kmalloc(lib->args_len, GFP_KERNEL);
+ if (!args) {
+ pr_err("avp_lib: can't alloc mem for args (%d)\n",
+ lib->args_len);
+ return -ENOMEM;
+ }
+ if (copy_from_user(args, lib->args, lib->args_len)) {
+ pr_err("avp_lib: can't copy lib args\n");
+ ret = -EFAULT;
+ goto err_cp_args;
+ }
+
+ ret = request_firmware(&fw, lib->name, avp->misc_dev.this_device);
+ if (ret) {
+ pr_err("avp_lib: Cannot read firmware '%s'\n", lib->name);
+ goto err_req_fw;
+ }
+
+ lib_handle = nvmap_alloc(avp->nvmap_libs, fw->size, L1_CACHE_BYTES,
+ NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR(lib_handle)) {
+ pr_err("avp_lib: can't nvmap alloc for lib '%s'\n", lib->name);
+ ret = PTR_ERR(lib_handle);
+ goto err_nvmap_alloc;
+ }
+
+ lib_data = nvmap_mmap(lib_handle);
+ if (!lib_data) {
+ pr_err("avp_lib: can't nvmap map for lib '%s'\n", lib->name);
+ ret = -ENOMEM;
+ goto err_nvmap_mmap;
+ }
+
+ lib_phys = nvmap_pin(avp->nvmap_libs, lib_handle);
+ if (IS_ERR((void *)lib_phys)) {
+ pr_err("avp_lib: can't nvmap pin for lib '%s'\n", lib->name);
+ ret = PTR_ERR(lib_handle);
+ goto err_nvmap_pin;
+ }
+
+ memcpy(lib_data, fw->data, fw->size);
+
+ svc.svc_id = SVC_LIBRARY_ATTACH;
+ svc.address = lib_phys;
+ svc.args_len = lib->args_len;
+ svc.lib_size = fw->size;
+ svc.reason = lib->greedy ? AVP_LIB_REASON_ATTACH_GREEDY :
+ AVP_LIB_REASON_ATTACH;
+ memcpy(svc.args, args, lib->args_len);
+ wmb();
+
+ /* send message, wait for reply */
+ ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
+ GFP_KERNEL);
+ if (ret)
+ goto err_send_msg;
+
+ ret = trpc_recv_msg(avp->rpc_node, avp->avp_ep, &resp,
+ sizeof(resp), -1);
+ if (ret != sizeof(resp)) {
+ pr_err("avp_lib: Couldn't get lib load reply (%d)\n", ret);
+ goto err_recv_msg;
+ } else if (resp.err) {
+ pr_err("avp_lib: got remote error (%d) while loading lib %s\n",
+ resp.err, lib->name);
+ ret = -EPROTO;
+ goto err_recv_msg;
+ }
+ lib->handle = resp.lib_id;
+ ret = 0;
+ pr_info("avp_lib: Successfully loaded library %s (lib_id=%x)\n",
+ lib->name, resp.lib_id);
+
+ /* We free the memory here because by this point the AVP has already
+ * requested memory for the library for all the sections since it does
+ * it's own relocation and memory management. So, our allocations were
+ * temporary to hand the library code over to the AVP.
+ */
+
+err_recv_msg:
+err_send_msg:
+ nvmap_unpin(avp->nvmap_libs, lib_handle);
+err_nvmap_pin:
+ nvmap_munmap(lib_handle, lib_data);
+err_nvmap_mmap:
+ nvmap_free(avp->nvmap_libs, lib_handle);
+err_nvmap_alloc:
+ release_firmware(fw);
+err_req_fw:
+err_cp_args:
+ kfree(args);
+ return ret;
+}
+
+static int send_unload_lib_msg(struct avp_info *avp, u32 handle,
+ const char *name)
+{
+ struct svc_lib_detach svc;
+ struct svc_lib_detach_resp resp;
+ int ret;
+
+ svc.svc_id = SVC_LIBRARY_DETACH;
+ svc.reason = AVP_LIB_REASON_DETACH;
+ svc.lib_id = handle;
+
+ ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
+ GFP_KERNEL);
+ if (ret) {
+ pr_err("avp_lib: can't send unload message to avp for '%s'\n",
+ name);
+ goto err;
+ }
+
+ ret = trpc_recv_msg(avp->rpc_node, avp->avp_ep, &resp,
+ sizeof(resp), -1);
+ if (ret != sizeof(resp)) {
+ pr_err("avp_lib: Couldn't get unload reply for '%s' (%d)\n",
+ name, ret);
+ } else if (resp.err) {
+ pr_err("avp_lib: remote error (%d) while unloading lib %s\n",
+ resp.err, name);
+ ret = -EPROTO;
+ } else
+ ret = 0;
+err:
+ return ret;
+}
+
+static struct lib_item *_find_lib_locked(struct avp_info *avp, u32 handle)
+{
+ struct lib_item *item;
+
+ list_for_each_entry(item, &avp->libs, list) {
+ if (item->handle == handle)
+ return item;
+ }
+ return NULL;
+}
+
+static int _insert_lib_locked(struct avp_info *avp, u32 handle, char *name)
+{
+ struct lib_item *item;
+
+ item = kzalloc(sizeof(struct lib_item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+ item->handle = handle;
+ strlcpy(item->name, name, TEGRA_AVP_LIB_MAX_NAME);
+ list_add_tail(&item->list, &avp->libs);
+ return 0;
+}
+
+static void _delete_lib_locked(struct avp_info *avp, struct lib_item *item)
+{
+ list_del(&item->list);
+ kfree(item);
+}
+
+static int handle_load_lib_ioctl(struct avp_info *avp, unsigned long arg)
+{
+ struct tegra_avp_lib lib;
+ int ret;
+
+ if (copy_from_user(&lib, (void __user *)arg, sizeof(lib)))
+ return -EFAULT;
+ lib.name[TEGRA_AVP_LIB_MAX_NAME - 1] = '\0';
+
+ if (lib.args_len > TEGRA_AVP_LIB_MAX_ARGS) {
+ pr_err("%s: library args too long (%d)\n", __func__,
+ lib.args_len);
+ return -E2BIG;
+ }
+
+ mutex_lock(&avp->libs_lock);
+ ret = _load_lib(avp, &lib);
+ if (ret)
+ goto err_load_lib;
+
+ if (copy_to_user((void __user *)arg, &lib, sizeof(lib))) {
+ /* TODO: probably need to free the library from remote
+ * we just loaded */
+ ret = -EFAULT;
+ goto err_copy_to_user;
+ }
+ ret = _insert_lib_locked(avp, lib.handle, lib.name);
+ if (ret) {
+ pr_err("%s: can't insert lib (%d)\n", __func__, ret);
+ goto err_insert_lib;
+ }
+
+ mutex_unlock(&avp->libs_lock);
+ return 0;
+
+err_insert_lib:
+ send_unload_lib_msg(avp, lib.handle, lib.name);
+err_copy_to_user:
+ mutex_unlock(&avp->libs_lock);
+err_load_lib:
+ return ret;
+}
+
+static int handle_unload_lib_ioctl(struct avp_info *avp, unsigned long arg)
+{
+ struct lib_item *item;
+ int ret;
+
+ mutex_lock(&avp->libs_lock);
+ item = _find_lib_locked(avp, (u32)arg);
+ if (!item) {
+ pr_err("avp_lib: avp lib with handle 0x%x not found\n",
+ (u32)arg);
+ ret = -ENOENT;
+ goto err_find;
+ }
+ ret = send_unload_lib_msg(avp, item->handle, item->name);
+ if (!ret)
+ DBG(AVP_DBG_TRACE_LIB, "avp_lib: unloaded '%s'\n", item->name);
+ else
+ pr_err("avp_lib: can't unload lib '%s'/0x%x (%d)\n", item->name,
+ item->handle, ret);
+ _delete_lib_locked(avp, item);
+
+err_find:
+ mutex_unlock(&avp->libs_lock);
+ return ret;
+}
+
+static void libs_cleanup(struct avp_info *avp)
+{
+ struct lib_item *lib;
+ struct lib_item *lib_tmp;
+
+ mutex_lock(&avp->libs_lock);
+ list_for_each_entry_safe(lib, lib_tmp, &avp->libs, list) {
+ _delete_lib_locked(avp, lib);
+ }
+ mutex_unlock(&avp->libs_lock);
+
+ nvmap_client_put(avp->nvmap_libs);
+ avp->nvmap_libs = NULL;
+}
+
+static long tegra_avp_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct avp_info *avp = tegra_avp;
+ int ret;
+
+ if (_IOC_TYPE(cmd) != TEGRA_AVP_IOCTL_MAGIC ||
+ _IOC_NR(cmd) < TEGRA_AVP_IOCTL_MIN_NR ||
+ _IOC_NR(cmd) > TEGRA_AVP_IOCTL_MAX_NR)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case TEGRA_AVP_IOCTL_LOAD_LIB:
+ ret = handle_load_lib_ioctl(avp, arg);
+ break;
+ case TEGRA_AVP_IOCTL_UNLOAD_LIB:
+ ret = handle_unload_lib_ioctl(avp, arg);
+ break;
+ default:
+ pr_err("avp_lib: Unknown tegra_avp ioctl 0x%x\n", _IOC_NR(cmd));
+ ret = -ENOTTY;
+ break;
+ }
+ return ret;
+}
+
+static int tegra_avp_open(struct inode *inode, struct file *file)
+{
+ struct avp_info *avp = tegra_avp;
+ int ret = 0;
+
+ nonseekable_open(inode, file);
+
+ mutex_lock(&avp->open_lock);
+ /* only one userspace client at a time */
+ if (avp->opened) {
+ pr_err("%s: already have client, aborting\n", __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = avp_init(avp, TEGRA_AVP_KERNEL_FW);
+ avp->opened = !ret;
+out:
+ mutex_unlock(&avp->open_lock);
+ return ret;
+}
+
+static int tegra_avp_release(struct inode *inode, struct file *file)
+{
+ struct avp_info *avp = tegra_avp;
+ int ret = 0;
+
+ pr_info("%s: release\n", __func__);
+ mutex_lock(&avp->open_lock);
+ if (!avp->opened) {
+ pr_err("%s: releasing while in invalid state\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ avp_uninit(avp);
+
+ avp->opened = false;
+out:
+ mutex_unlock(&avp->open_lock);
+ return ret;
+}
+
+static int avp_enter_lp0(struct avp_info *avp)
+{
+ volatile u32 *avp_suspend_done =
+ avp->iram_backup_data + TEGRA_IRAM_SIZE;
+ struct svc_enter_lp0 svc;
+ unsigned long endtime;
+ int ret;
+
+ svc.svc_id = SVC_ENTER_LP0;
+ svc.src_addr = (u32)TEGRA_IRAM_BASE;
+ svc.buf_addr = (u32)avp->iram_backup_phys;
+ svc.buf_size = TEGRA_IRAM_SIZE;
+
+ *avp_suspend_done = 0;
+ wmb();
+
+ ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
+ GFP_KERNEL);
+ if (ret) {
+ pr_err("%s: cannot send AVP suspend message\n", __func__);
+ return ret;
+ }
+
+ endtime = jiffies + msecs_to_jiffies(1000);
+ rmb();
+ while ((*avp_suspend_done == 0) && time_before(jiffies, endtime)) {
+ udelay(10);
+ rmb();
+ }
+
+ rmb();
+ if (*avp_suspend_done == 0) {
+ pr_err("%s: AVP failed to suspend\n", __func__);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return ret;
+}
+
+static int tegra_avp_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct avp_info *avp = tegra_avp;
+ unsigned long flags;
+ int ret;
+
+ pr_info("%s()+\n", __func__);
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (!avp->initialized) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ return 0;
+ }
+ avp->suspending = true;
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ ret = avp_enter_lp0(avp);
+ if (ret)
+ goto err;
+
+ avp->resume_addr = readl(TEGRA_AVP_RESUME_ADDR);
+ if (!avp->resume_addr) {
+ pr_err("%s: AVP failed to set it's resume address\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ disable_irq(avp->mbox_from_avp_pend_irq);
+
+ pr_info("avp_suspend: resume_addr=%lx\n", avp->resume_addr);
+ avp->resume_addr &= 0xfffffffeUL;
+ pr_info("%s()-\n", __func__);
+
+ return 0;
+
+err:
+ /* TODO: we need to kill the AVP so that when we come back
+ * it could be reinitialized.. We'd probably need to kill
+ * the users of it so they don't have the wrong state.
+ */
+ return ret;
+}
+
+static int tegra_avp_resume(struct platform_device *pdev)
+{
+ struct avp_info *avp = tegra_avp;
+ int ret = 0;
+
+ pr_info("%s()+\n", __func__);
+ smp_rmb();
+ if (!avp->initialized)
+ goto out;
+
+ BUG_ON(!avp->resume_addr);
+
+ avp_reset(avp, avp->resume_addr);
+ avp->resume_addr = 0;
+ avp->suspending = false;
+ smp_wmb();
+ enable_irq(avp->mbox_from_avp_pend_irq);
+
+ pr_info("%s()-\n", __func__);
+
+out:
+ return ret;
+}
+
+static const struct file_operations tegra_avp_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_avp_open,
+ .release = tegra_avp_release,
+ .unlocked_ioctl = tegra_avp_ioctl,
+};
+
+static struct trpc_node avp_trpc_node = {
+ .name = "avp-remote",
+ .type = TRPC_NODE_REMOTE,
+ .try_connect = avp_node_try_connect,
+};
+
+static int tegra_avp_probe(struct platform_device *pdev)
+{
+ void *msg_area;
+ struct avp_info *avp;
+ int ret = 0;
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "mbox_from_avp_pending");
+ if (irq < 0) {
+ pr_err("%s: invalid platform data\n", __func__);
+ return -EINVAL;
+ }
+
+ avp = kzalloc(sizeof(struct avp_info), GFP_KERNEL);
+ if (!avp) {
+ pr_err("%s: cannot allocate avp_info\n", __func__);
+ return -ENOMEM;
+ }
+
+ avp->nvmap_drv = nvmap_create_client(nvmap_dev, "avp_core");
+ if (IS_ERR(avp->nvmap_drv)) {
+ pr_err("%s: cannot create drv nvmap client\n", __func__);
+ ret = PTR_ERR(avp->nvmap_drv);
+ goto err_nvmap_create_drv_client;
+ }
+
+ avp->kernel_handle = nvmap_alloc(avp->nvmap_drv, SZ_1M, SZ_1M,
+ NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR(avp->kernel_handle)) {
+ pr_err("%s: cannot create handle\n", __func__);
+ ret = PTR_ERR(avp->kernel_handle);
+ goto err_nvmap_alloc;
+ }
+
+ avp->kernel_data = nvmap_mmap(avp->kernel_handle);
+ if (!avp->kernel_data) {
+ pr_err("%s: cannot map kernel handle\n", __func__);
+ ret = -ENOMEM;
+ goto err_nvmap_mmap;
+ }
+
+ avp->kernel_phys = nvmap_pin(avp->nvmap_drv, avp->kernel_handle);
+ if (IS_ERR((void *)avp->kernel_phys)) {
+ pr_err("%s: cannot pin kernel handle\n", __func__);
+ ret = PTR_ERR((void *)avp->kernel_phys);
+ goto err_nvmap_pin;
+ }
+
+ /* allocate an extra 4 bytes at the end which AVP uses to signal to
+ * us that it is done suspending.
+ */
+ avp->iram_backup_handle =
+ nvmap_alloc(avp->nvmap_drv, TEGRA_IRAM_SIZE + 4,
+ L1_CACHE_BYTES, NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR(avp->iram_backup_handle)) {
+ pr_err("%s: cannot create handle for iram backup\n", __func__);
+ ret = PTR_ERR(avp->iram_backup_handle);
+ goto err_iram_nvmap_alloc;
+ }
+ avp->iram_backup_data = nvmap_mmap(avp->iram_backup_handle);
+ if (!avp->iram_backup_data) {
+ pr_err("%s: cannot map iram backup handle\n", __func__);
+ ret = -ENOMEM;
+ goto err_iram_nvmap_mmap;
+ }
+ avp->iram_backup_phys = nvmap_pin(avp->nvmap_drv,
+ avp->iram_backup_handle);
+ if (IS_ERR((void *)avp->iram_backup_phys)) {
+ pr_err("%s: cannot pin iram backup handle\n", __func__);
+ ret = PTR_ERR((void *)avp->iram_backup_phys);
+ goto err_iram_nvmap_pin;
+ }
+
+ avp->mbox_from_avp_pend_irq = irq;
+ avp->endpoints = RB_ROOT;
+ spin_lock_init(&avp->state_lock);
+ mutex_init(&avp->open_lock);
+ mutex_init(&avp->to_avp_lock);
+ mutex_init(&avp->from_avp_lock);
+ INIT_WORK(&avp->recv_work, process_avp_message);
+
+ mutex_init(&avp->libs_lock);
+ INIT_LIST_HEAD(&avp->libs);
+
+ avp->recv_wq = alloc_workqueue("avp-msg-recv",
+ WQ_NON_REENTRANT | WQ_HIGHPRI, 1);
+ if (!avp->recv_wq) {
+ pr_err("%s: can't create recve workqueue\n", __func__);
+ ret = -ENOMEM;
+ goto err_create_wq;
+ }
+
+ avp->cop_clk = clk_get(&pdev->dev, "cop");
+ if (IS_ERR(avp->cop_clk)) {
+ pr_err("%s: Couldn't get cop clock\n", TEGRA_AVP_NAME);
+ ret = -ENOENT;
+ goto err_get_cop_clk;
+ }
+
+ msg_area = dma_alloc_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2,
+ &avp->msg_area_addr, GFP_KERNEL);
+ if (!msg_area) {
+ pr_err("%s: cannot allocate msg_area\n", __func__);
+ ret = -ENOMEM;
+ goto err_alloc_msg_area;
+ }
+ memset(msg_area, 0, AVP_MSG_AREA_SIZE * 2);
+ avp->msg = ((avp->msg_area_addr >> 4) |
+ MBOX_MSG_VALID | MBOX_MSG_PENDING_INT_EN);
+ avp->msg_to_avp = msg_area;
+ avp->msg_from_avp = msg_area + AVP_MSG_AREA_SIZE;
+
+ avp_halt(avp);
+
+ avp_trpc_node.priv = avp;
+ ret = trpc_node_register(&avp_trpc_node);
+ if (ret) {
+ pr_err("%s: Can't register avp rpc node\n", __func__);
+ goto err_node_reg;
+ }
+ avp->rpc_node = &avp_trpc_node;
+
+ avp->avp_svc = avp_svc_init(pdev, avp->rpc_node);
+ if (IS_ERR(avp->avp_svc)) {
+ pr_err("%s: Cannot initialize avp_svc\n", __func__);
+ ret = PTR_ERR(avp->avp_svc);
+ goto err_avp_svc_init;
+ }
+
+ avp->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ avp->misc_dev.name = "tegra_avp";
+ avp->misc_dev.fops = &tegra_avp_fops;
+
+ ret = misc_register(&avp->misc_dev);
+ if (ret) {
+ pr_err("%s: Unable to register misc device!\n", TEGRA_AVP_NAME);
+ goto err_misc_reg;
+ }
+
+ ret = request_irq(irq, avp_mbox_pending_isr, 0, TEGRA_AVP_NAME, avp);
+ if (ret) {
+ pr_err("%s: cannot register irq handler\n", __func__);
+ goto err_req_irq_pend;
+ }
+ disable_irq(avp->mbox_from_avp_pend_irq);
+
+ tegra_avp = avp;
+
+ pr_info("%s: driver registered, kernel %lx(%p), msg area %lx/%lx\n",
+ __func__, avp->kernel_phys, avp->kernel_data,
+ (unsigned long)avp->msg_area_addr,
+ (unsigned long)avp->msg_area_addr + AVP_MSG_AREA_SIZE);
+
+ return 0;
+
+err_req_irq_pend:
+ misc_deregister(&avp->misc_dev);
+err_misc_reg:
+ avp_svc_destroy(avp->avp_svc);
+err_avp_svc_init:
+ trpc_node_unregister(avp->rpc_node);
+err_node_reg:
+ dma_free_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2, msg_area,
+ avp->msg_area_addr);
+err_alloc_msg_area:
+ clk_put(avp->cop_clk);
+err_get_cop_clk:
+ destroy_workqueue(avp->recv_wq);
+err_create_wq:
+ nvmap_unpin(avp->nvmap_drv, avp->iram_backup_handle);
+err_iram_nvmap_pin:
+ nvmap_munmap(avp->iram_backup_handle, avp->iram_backup_data);
+err_iram_nvmap_mmap:
+ nvmap_free(avp->nvmap_drv, avp->iram_backup_handle);
+err_iram_nvmap_alloc:
+ nvmap_unpin(avp->nvmap_drv, avp->kernel_handle);
+err_nvmap_pin:
+ nvmap_munmap(avp->kernel_handle, avp->kernel_data);
+err_nvmap_mmap:
+ nvmap_free(avp->nvmap_drv, avp->kernel_handle);
+err_nvmap_alloc:
+ nvmap_client_put(avp->nvmap_drv);
+err_nvmap_create_drv_client:
+ kfree(avp);
+ tegra_avp = NULL;
+ return ret;
+}
+
+static int tegra_avp_remove(struct platform_device *pdev)
+{
+ struct avp_info *avp = tegra_avp;
+
+ if (!avp)
+ return 0;
+
+ mutex_lock(&avp->open_lock);
+ if (avp->opened) {
+ mutex_unlock(&avp->open_lock);
+ return -EBUSY;
+ }
+ /* ensure that noone can open while we tear down */
+ avp->opened = true;
+ mutex_unlock(&avp->open_lock);
+
+ misc_deregister(&avp->misc_dev);
+
+ avp_halt(avp);
+
+ avp_svc_destroy(avp->avp_svc);
+ trpc_node_unregister(avp->rpc_node);
+ dma_free_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2, avp->msg_to_avp,
+ avp->msg_area_addr);
+ clk_put(avp->cop_clk);
+ destroy_workqueue(avp->recv_wq);
+ nvmap_unpin(avp->nvmap_drv, avp->iram_backup_handle);
+ nvmap_munmap(avp->iram_backup_handle, avp->iram_backup_data);
+ nvmap_free(avp->nvmap_drv, avp->iram_backup_handle);
+ nvmap_unpin(avp->nvmap_drv, avp->kernel_handle);
+ nvmap_munmap(avp->kernel_handle, avp->kernel_data);
+ nvmap_free(avp->nvmap_drv, avp->kernel_handle);
+ nvmap_client_put(avp->nvmap_drv);
+ kfree(avp);
+ tegra_avp = NULL;
+ return 0;
+}
+
+static struct platform_driver tegra_avp_driver = {
+ .probe = tegra_avp_probe,
+ .remove = tegra_avp_remove,
+ .suspend = tegra_avp_suspend,
+ .resume = tegra_avp_resume,
+ .driver = {
+ .name = TEGRA_AVP_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_avp_init(void)
+{
+ return platform_driver_register(&tegra_avp_driver);
+}
+
+static void __exit tegra_avp_exit(void)
+{
+ platform_driver_unregister(&tegra_avp_driver);
+}
+
+module_init(tegra_avp_init);
+module_exit(tegra_avp_exit);
diff --git a/drivers/media/video/tegra/avp/avp.h b/drivers/media/video/tegra/avp/avp.h
new file mode 100644
index 000000000000..f5e4e910097b
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MEDIA_VIDEO_TEGRA_AVP_H
+#define __MEDIA_VIDEO_TEGRA_AVP_H
+
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "trpc.h"
+
+enum {
+ AVP_DBG_TRACE_XPC = 1U << 0,
+ AVP_DBG_TRACE_XPC_IRQ = 1U << 1,
+ AVP_DBG_TRACE_XPC_MSG = 1U << 2,
+ AVP_DBG_TRACE_XPC_CONN = 1U << 3,
+ AVP_DBG_TRACE_SVC = 1U << 4,
+ AVP_DBG_TRACE_TRPC_MSG = 1U << 5,
+ AVP_DBG_TRACE_TRPC_CONN = 1U << 6,
+ AVP_DBG_TRACE_LIB = 1U << 7,
+};
+
+extern u32 avp_debug_mask;
+#define DBG(flag, args...) \
+ do { if (unlikely(avp_debug_mask & (flag))) pr_info(args); } while (0)
+
+struct avp_svc_info;
+
+struct avp_svc_info *avp_svc_init(struct platform_device *pdev,
+ struct trpc_node *rpc_node);
+void avp_svc_destroy(struct avp_svc_info *avp_svc);
+int avp_svc_start(struct avp_svc_info *svc);
+void avp_svc_stop(struct avp_svc_info *svc);
+
+#endif
diff --git a/drivers/media/video/tegra/avp/avp_msg.h b/drivers/media/video/tegra/avp/avp_msg.h
new file mode 100644
index 000000000000..54d3a63793f1
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp_msg.h
@@ -0,0 +1,342 @@
+/* drivers/media/video/tegra/avp/avp_msg.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MEDIA_VIDEO_TEGRA_AVP_MSG_H
+#define __MEDIA_VIDEO_TEGRA_AVP_MSG_H
+
+#include <linux/tegra_avp.h>
+#include <linux/types.h>
+
+/* Note: the port name string is not NUL terminated, so make sure to
+ * allocate appropriate space locally when operating on the string */
+#define XPC_PORT_NAME_LEN 16
+
+#define SVC_ARGS_MAX_LEN 220
+#define SVC_MAX_STRING_LEN 200
+
+#define AVP_ERR_ENOTSUP 0x2
+#define AVP_ERR_EINVAL 0x4
+#define AVP_ERR_ENOMEM 0x6
+#define AVP_ERR_EACCES 0x00030010
+
+enum {
+ SVC_NVMAP_CREATE = 0,
+ SVC_NVMAP_CREATE_RESPONSE = 1,
+ SVC_NVMAP_FREE = 3,
+ SVC_NVMAP_ALLOC = 4,
+ SVC_NVMAP_ALLOC_RESPONSE = 5,
+ SVC_NVMAP_PIN = 6,
+ SVC_NVMAP_PIN_RESPONSE = 7,
+ SVC_NVMAP_UNPIN = 8,
+ SVC_NVMAP_UNPIN_RESPONSE = 9,
+ SVC_NVMAP_GET_ADDRESS = 10,
+ SVC_NVMAP_GET_ADDRESS_RESPONSE = 11,
+ SVC_NVMAP_FROM_ID = 12,
+ SVC_NVMAP_FROM_ID_RESPONSE = 13,
+ SVC_MODULE_CLOCK = 14,
+ SVC_MODULE_CLOCK_RESPONSE = 15,
+ SVC_MODULE_RESET = 16,
+ SVC_MODULE_RESET_RESPONSE = 17,
+ SVC_POWER_REGISTER = 18,
+ SVC_POWER_UNREGISTER = 19,
+ SVC_POWER_STARVATION = 20,
+ SVC_POWER_BUSY_HINT = 21,
+ SVC_POWER_BUSY_HINT_MULTI = 22,
+ SVC_DFS_GETSTATE = 23,
+ SVC_DFS_GETSTATE_RESPONSE = 24,
+ SVC_POWER_RESPONSE = 25,
+ SVC_POWER_MAXFREQ = 26,
+ SVC_ENTER_LP0 = 27,
+ SVC_ENTER_LP0_RESPONSE = 28,
+ SVC_PRINTF = 29,
+ SVC_LIBRARY_ATTACH = 30,
+ SVC_LIBRARY_ATTACH_RESPONSE = 31,
+ SVC_LIBRARY_DETACH = 32,
+ SVC_LIBRARY_DETACH_RESPONSE = 33,
+ SVC_AVP_WDT_RESET = 34,
+ SVC_DFS_GET_CLK_UTIL = 35,
+ SVC_DFS_GET_CLK_UTIL_RESPONSE = 36,
+};
+
+struct svc_msg {
+ u32 svc_id;
+ u8 data[0];
+};
+
+struct svc_common_resp {
+ u32 svc_id;
+ u32 err;
+};
+
+struct svc_printf {
+ u32 svc_id;
+ const char str[SVC_MAX_STRING_LEN];
+};
+
+struct svc_enter_lp0 {
+ u32 svc_id;
+ u32 src_addr;
+ u32 buf_addr;
+ u32 buf_size;
+};
+
+/* nvmap messages */
+struct svc_nvmap_create {
+ u32 svc_id;
+ u32 size;
+};
+
+struct svc_nvmap_create_resp {
+ u32 svc_id;
+ u32 handle_id;
+ u32 err;
+};
+
+enum {
+ AVP_NVMAP_HEAP_EXTERNAL = 1,
+ AVP_NVMAP_HEAP_GART = 2,
+ AVP_NVMAP_HEAP_EXTERNAL_CARVEOUT = 3,
+ AVP_NVMAP_HEAP_IRAM = 4,
+};
+
+struct svc_nvmap_alloc {
+ u32 svc_id;
+ u32 handle_id;
+ u32 heaps[4];
+ u32 num_heaps;
+ u32 align;
+ u32 mapping_type;
+};
+
+struct svc_nvmap_free {
+ u32 svc_id;
+ u32 handle_id;
+};
+
+struct svc_nvmap_pin {
+ u32 svc_id;
+ u32 handle_id;
+};
+
+struct svc_nvmap_pin_resp {
+ u32 svc_id;
+ u32 addr;
+};
+
+struct svc_nvmap_unpin {
+ u32 svc_id;
+ u32 handle_id;
+};
+
+struct svc_nvmap_from_id {
+ u32 svc_id;
+ u32 handle_id;
+};
+
+struct svc_nvmap_get_addr {
+ u32 svc_id;
+ u32 handle_id;
+ u32 offs;
+};
+
+struct svc_nvmap_get_addr_resp {
+ u32 svc_id;
+ u32 addr;
+};
+
+/* library management messages */
+enum {
+ AVP_LIB_REASON_ATTACH = 0,
+ AVP_LIB_REASON_DETACH = 1,
+ AVP_LIB_REASON_ATTACH_GREEDY = 2,
+};
+
+struct svc_lib_attach {
+ u32 svc_id;
+ u32 address;
+ u32 args_len;
+ u32 lib_size;
+ u8 args[SVC_ARGS_MAX_LEN];
+ u32 reason;
+};
+
+struct svc_lib_attach_resp {
+ u32 svc_id;
+ u32 err;
+ u32 lib_id;
+};
+
+struct svc_lib_detach {
+ u32 svc_id;
+ u32 reason;
+ u32 lib_id;
+};
+
+struct svc_lib_detach_resp {
+ u32 svc_id;
+ u32 err;
+};
+
+/* hw module management from the AVP side */
+enum {
+ AVP_MODULE_ID_AVP = 2,
+ AVP_MODULE_ID_VCP = 3,
+ AVP_MODULE_ID_BSEA = 27,
+ AVP_MODULE_ID_VDE = 28,
+ AVP_MODULE_ID_MPE = 29,
+};
+
+struct svc_module_ctrl {
+ u32 svc_id;
+ u32 module_id;
+ u32 client_id;
+ u8 enable;
+};
+
+/* power messages */
+struct svc_pwr_register {
+ u32 svc_id;
+ u32 client_id;
+ u32 unused;
+};
+
+struct svc_pwr_register_resp {
+ u32 svc_id;
+ u32 err;
+ u32 client_id;
+};
+
+struct svc_pwr_starve_hint {
+ u32 svc_id;
+ u32 dfs_clk_id;
+ u32 client_id;
+ u8 starving;
+};
+
+struct svc_pwr_busy_hint {
+ u32 svc_id;
+ u32 dfs_clk_id;
+ u32 client_id;
+ u32 boost_ms; /* duration */
+ u32 boost_freq; /* in khz */
+};
+
+struct svc_pwr_max_freq {
+ u32 svc_id;
+ u32 module_id;
+};
+
+struct svc_pwr_max_freq_resp {
+ u32 svc_id;
+ u32 freq;
+};
+
+/* dfs related messages */
+enum {
+ AVP_DFS_STATE_INVALID = 0,
+ AVP_DFS_STATE_DISABLED = 1,
+ AVP_DFS_STATE_STOPPED = 2,
+ AVP_DFS_STATE_CLOSED_LOOP = 3,
+ AVP_DFS_STATE_PROFILED_LOOP = 4,
+};
+
+struct svc_dfs_get_state_resp {
+ u32 svc_id;
+ u32 state;
+};
+
+enum {
+ AVP_DFS_CLK_CPU = 1,
+ AVP_DFS_CLK_AVP = 2,
+ AVP_DFS_CLK_SYSTEM = 3,
+ AVP_DFS_CLK_AHB = 4,
+ AVP_DFS_CLK_APB = 5,
+ AVP_DFS_CLK_VDE = 6,
+ /* external memory controller */
+ AVP_DFS_CLK_EMC = 7,
+};
+
+struct avp_clk_usage {
+ u32 min;
+ u32 max;
+ u32 curr_min;
+ u32 curr_max;
+ u32 curr;
+ u32 avg; /* average activity.. whatever that means */
+};
+
+struct svc_dfs_get_clk_util {
+ u32 svc_id;
+ u32 dfs_clk_id;
+};
+
+/* all units are in kHz */
+struct svc_dfs_get_clk_util_resp {
+ u32 svc_id;
+ u32 err;
+ struct avp_clk_usage usage;
+};
+
+/************************/
+
+enum {
+ CMD_ACK = 0,
+ CMD_CONNECT = 2,
+ CMD_DISCONNECT = 3,
+ CMD_MESSAGE = 4,
+ CMD_RESPONSE = 5,
+};
+
+struct msg_data {
+ u32 cmd;
+ u8 data[0];
+};
+
+struct msg_ack {
+ u32 cmd;
+ u32 arg;
+};
+
+struct msg_connect {
+ u32 cmd;
+ u32 port_id;
+ /* not NUL terminated, just 0 padded */
+ char name[XPC_PORT_NAME_LEN];
+};
+
+struct msg_connect_reply {
+ u32 cmd;
+ u32 port_id;
+};
+
+struct msg_disconnect {
+ u32 cmd;
+ u32 port_id;
+};
+
+struct msg_disconnect_reply {
+ u32 cmd;
+ u32 ack;
+};
+
+struct msg_port_data {
+ u32 cmd;
+ u32 port_id;
+ u32 msg_len;
+ u8 data[0];
+};
+
+#endif
diff --git a/drivers/media/video/tegra/avp/avp_svc.c b/drivers/media/video/tegra/avp/avp_svc.c
new file mode 100644
index 000000000000..ea593f7d3704
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp_svc.c
@@ -0,0 +1,701 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+
+#include <mach/clk.h>
+#include <mach/nvmap.h>
+
+#include "../../../../video/tegra/nvmap/nvmap.h"
+
+#include "avp_msg.h"
+#include "trpc.h"
+#include "avp.h"
+
+enum {
+ CLK_REQUEST_VCP = 0,
+ CLK_REQUEST_BSEA = 1,
+ CLK_REQUEST_VDE = 2,
+ NUM_CLK_REQUESTS,
+};
+
+struct avp_module {
+ const char *name;
+ u32 clk_req;
+};
+
+static struct avp_module avp_modules[] = {
+ [AVP_MODULE_ID_VCP] = {
+ .name = "vcp",
+ .clk_req = CLK_REQUEST_VCP,
+ },
+ [AVP_MODULE_ID_BSEA] = {
+ .name = "bsea",
+ .clk_req = CLK_REQUEST_BSEA,
+ },
+ [AVP_MODULE_ID_VDE] = {
+ .name = "vde",
+ .clk_req = CLK_REQUEST_VDE,
+ },
+};
+#define NUM_AVP_MODULES ARRAY_SIZE(avp_modules)
+
+struct avp_clk {
+ struct clk *clk;
+ int refcnt;
+ struct avp_module *mod;
+};
+
+struct avp_svc_info {
+ struct avp_clk clks[NUM_CLK_REQUESTS];
+ /* used for dvfs */
+ struct clk *sclk;
+
+ struct mutex clk_lock;
+
+ struct trpc_endpoint *cpu_ep;
+ struct task_struct *svc_thread;
+
+ /* client for remote allocations, for easy tear down */
+ struct nvmap_client *nvmap_remote;
+ struct trpc_node *rpc_node;
+};
+
+static void do_svc_nvmap_create(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_create *msg = (struct svc_nvmap_create *)_msg;
+ struct svc_nvmap_create_resp resp;
+ struct nvmap_handle_ref *handle;
+ u32 handle_id = 0;
+ u32 err = 0;
+
+ handle = nvmap_create_handle(avp_svc->nvmap_remote, msg->size);
+ if (unlikely(IS_ERR(handle))) {
+ pr_err("avp_svc: error creating handle (%d bytes) for remote\n",
+ msg->size);
+ err = AVP_ERR_ENOMEM;
+ } else
+ handle_id = (u32)nvmap_ref_to_id(handle);
+
+ resp.svc_id = SVC_NVMAP_CREATE_RESPONSE;
+ resp.err = err;
+ resp.handle_id = handle_id;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+ /* TODO: do we need to put the handle if send_msg failed? */
+}
+
+static void do_svc_nvmap_alloc(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_alloc *msg = (struct svc_nvmap_alloc *)_msg;
+ struct svc_common_resp resp;
+ struct nvmap_handle *handle;
+ u32 err = 0;
+ u32 heap_mask = 0;
+ int i;
+ size_t align;
+
+ handle = nvmap_get_handle_id(avp_svc->nvmap_remote, msg->handle_id);
+ if (IS_ERR(handle)) {
+ pr_err("avp_svc: unknown remote handle 0x%x\n", msg->handle_id);
+ err = AVP_ERR_EACCES;
+ goto out;
+ }
+
+ if (msg->num_heaps > 4) {
+ pr_err("avp_svc: invalid remote alloc request (%d heaps?!)\n",
+ msg->num_heaps);
+ /* TODO: should we error out instead ? */
+ msg->num_heaps = 0;
+ }
+ if (msg->num_heaps == 0)
+ heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC | NVMAP_HEAP_SYSMEM;
+
+ for (i = 0; i < msg->num_heaps; i++) {
+ switch (msg->heaps[i]) {
+ case AVP_NVMAP_HEAP_EXTERNAL:
+ heap_mask |= NVMAP_HEAP_SYSMEM;
+ break;
+ case AVP_NVMAP_HEAP_GART:
+ heap_mask |= NVMAP_HEAP_IOVMM;
+ break;
+ case AVP_NVMAP_HEAP_EXTERNAL_CARVEOUT:
+ heap_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
+ break;
+ case AVP_NVMAP_HEAP_IRAM:
+ heap_mask |= NVMAP_HEAP_CARVEOUT_IRAM;
+ break;
+ default:
+ break;
+ }
+ }
+
+ align = max_t(size_t, L1_CACHE_BYTES, msg->align);
+ err = nvmap_alloc_handle_id(avp_svc->nvmap_remote, msg->handle_id,
+ heap_mask, align, 0);
+ nvmap_handle_put(handle);
+ if (err) {
+ pr_err("avp_svc: can't allocate for handle 0x%x (%d)\n",
+ msg->handle_id, err);
+ err = AVP_ERR_ENOMEM;
+ }
+
+out:
+ resp.svc_id = SVC_NVMAP_ALLOC_RESPONSE;
+ resp.err = err;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_free(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_free *msg = (struct svc_nvmap_free *)_msg;
+
+ nvmap_free_handle_id(avp_svc->nvmap_remote, msg->handle_id);
+}
+
+static void do_svc_nvmap_pin(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_pin *msg = (struct svc_nvmap_pin *)_msg;
+ struct svc_nvmap_pin_resp resp;
+ struct nvmap_handle_ref *handle;
+ unsigned long addr = ~0UL;
+ unsigned long id = msg->handle_id;
+ int err;
+
+ handle = nvmap_duplicate_handle_id(avp_svc->nvmap_remote, id);
+ if (IS_ERR(handle)) {
+ pr_err("avp_svc: can't dup handle %lx\n", id);
+ goto out;
+ }
+ err = nvmap_pin_ids(avp_svc->nvmap_remote, 1, &id);
+ if (err) {
+ pr_err("avp_svc: can't pin for handle %lx (%d)\n", id, err);
+ goto out;
+ }
+ addr = nvmap_handle_address(avp_svc->nvmap_remote, id);
+
+out:
+ resp.svc_id = SVC_NVMAP_PIN_RESPONSE;
+ resp.addr = addr;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_unpin(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_unpin *msg = (struct svc_nvmap_unpin *)_msg;
+ struct svc_common_resp resp;
+ unsigned long id = msg->handle_id;
+
+ nvmap_unpin_ids(avp_svc->nvmap_remote, 1, &id);
+ nvmap_free_handle_id(avp_svc->nvmap_remote, id);
+
+ resp.svc_id = SVC_NVMAP_UNPIN_RESPONSE;
+ resp.err = 0;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_from_id(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_from_id *msg = (struct svc_nvmap_from_id *)_msg;
+ struct svc_common_resp resp;
+ struct nvmap_handle_ref *handle;
+ int err = 0;
+
+ handle = nvmap_duplicate_handle_id(avp_svc->nvmap_remote,
+ msg->handle_id);
+ if (IS_ERR(handle)) {
+ pr_err("avp_svc: can't duplicate handle for id 0x%x (%d)\n",
+ msg->handle_id, (int)PTR_ERR(handle));
+ err = AVP_ERR_ENOMEM;
+ }
+
+ resp.svc_id = SVC_NVMAP_FROM_ID_RESPONSE;
+ resp.err = err;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_get_addr(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_get_addr *msg = (struct svc_nvmap_get_addr *)_msg;
+ struct svc_nvmap_get_addr_resp resp;
+
+ resp.svc_id = SVC_NVMAP_GET_ADDRESS_RESPONSE;
+ resp.addr = nvmap_handle_address(avp_svc->nvmap_remote, msg->handle_id);
+ resp.addr += msg->offs;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_pwr_register(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_pwr_register *msg = (struct svc_pwr_register *)_msg;
+ struct svc_pwr_register_resp resp;
+
+ resp.svc_id = SVC_POWER_RESPONSE;
+ resp.err = 0;
+ resp.client_id = msg->client_id;
+
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static struct avp_module *find_avp_module(struct avp_svc_info *avp_svc, u32 id)
+{
+ if (id < NUM_AVP_MODULES && avp_modules[id].name)
+ return &avp_modules[id];
+ return NULL;
+}
+
+static void do_svc_module_reset(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_module_ctrl *msg = (struct svc_module_ctrl *)_msg;
+ struct svc_common_resp resp;
+ struct avp_module *mod;
+ struct avp_clk *aclk;
+
+ mod = find_avp_module(avp_svc, msg->module_id);
+ if (!mod) {
+ if (msg->module_id == AVP_MODULE_ID_AVP)
+ pr_err("avp_svc: AVP suicidal?!?!\n");
+ else
+ pr_err("avp_svc: Unknown module reset requested: %d\n",
+ msg->module_id);
+ /* other side doesn't handle errors for reset */
+ resp.err = 0;
+ goto send_response;
+ }
+ pr_info("avp_svc: module reset: %s\n", mod->name);
+
+ aclk = &avp_svc->clks[mod->clk_req];
+ tegra_periph_reset_assert(aclk->clk);
+ udelay(10);
+ tegra_periph_reset_deassert(aclk->clk);
+ resp.err = 0;
+
+send_response:
+ resp.svc_id = SVC_MODULE_RESET_RESPONSE;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_module_clock(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_module_ctrl *msg = (struct svc_module_ctrl *)_msg;
+ struct svc_common_resp resp;
+ struct avp_module *mod;
+ struct avp_clk *aclk;
+
+ mod = find_avp_module(avp_svc, msg->module_id);
+ if (!mod) {
+ pr_err("avp_svc: unknown module clock requested: %d\n",
+ msg->module_id);
+ resp.err = AVP_ERR_EINVAL;
+ goto send_response;
+ }
+ pr_info("avp_svc: module clock: %s %s\n", mod->name,
+ msg->enable ? "on" : "off");
+
+ mutex_lock(&avp_svc->clk_lock);
+ aclk = &avp_svc->clks[mod->clk_req];
+ if (msg->enable) {
+ if (aclk->refcnt++ == 0) {
+ clk_enable(avp_svc->sclk);
+ clk_enable(aclk->clk);
+ }
+ } else {
+ if (unlikely(aclk->refcnt == 0)) {
+ pr_err("avp_svc: unbalanced clock disable for '%s'\n",
+ aclk->mod->name);
+ } else if (--aclk->refcnt == 0) {
+ clk_disable(aclk->clk);
+ clk_disable(avp_svc->sclk);
+ }
+ }
+ mutex_unlock(&avp_svc->clk_lock);
+ resp.err = 0;
+
+send_response:
+ resp.svc_id = SVC_MODULE_CLOCK_RESPONSE;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_null_response(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len, u32 resp_svc_id)
+{
+ struct svc_common_resp resp;
+ resp.svc_id = resp_svc_id;
+ resp.err = 0;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_dfs_get_state(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_dfs_get_state_resp resp;
+ resp.svc_id = SVC_DFS_GETSTATE_RESPONSE;
+ resp.state = AVP_DFS_STATE_STOPPED;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_dfs_get_clk_util(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_dfs_get_clk_util_resp resp;
+
+ resp.svc_id = SVC_DFS_GET_CLK_UTIL_RESPONSE;
+ resp.err = 0;
+ memset(&resp.usage, 0, sizeof(struct avp_clk_usage));
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_pwr_max_freq(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_pwr_max_freq_resp resp;
+
+ resp.svc_id = SVC_POWER_MAXFREQ;
+ resp.freq = 0;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_printf(struct avp_svc_info *avp_svc, struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_printf *msg = (struct svc_printf *)_msg;
+ char tmp_str[SVC_MAX_STRING_LEN];
+
+ /* ensure we null terminate the source */
+ strlcpy(tmp_str, msg->str, SVC_MAX_STRING_LEN);
+ pr_info("[AVP]: %s", tmp_str);
+}
+
+static int dispatch_svc_message(struct avp_svc_info *avp_svc,
+ struct svc_msg *msg,
+ size_t len)
+{
+ int ret = 0;
+
+ switch (msg->svc_id) {
+ case SVC_NVMAP_CREATE:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_create\n", __func__);
+ do_svc_nvmap_create(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_ALLOC:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_alloc\n", __func__);
+ do_svc_nvmap_alloc(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_FREE:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_free\n", __func__);
+ do_svc_nvmap_free(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_PIN:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_pin\n", __func__);
+ do_svc_nvmap_pin(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_UNPIN:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_unpin\n", __func__);
+ do_svc_nvmap_unpin(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_FROM_ID:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_from_id\n", __func__);
+ do_svc_nvmap_from_id(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_GET_ADDRESS:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_get_addr\n", __func__);
+ do_svc_nvmap_get_addr(avp_svc, msg, len);
+ break;
+ case SVC_POWER_REGISTER:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power_register\n", __func__);
+ do_svc_pwr_register(avp_svc, msg, len);
+ break;
+ case SVC_POWER_UNREGISTER:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power_unregister\n", __func__);
+ /* nothing to do */
+ break;
+ case SVC_POWER_BUSY_HINT_MULTI:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power_busy_hint_multi\n",
+ __func__);
+ /* nothing to do */
+ break;
+ case SVC_POWER_BUSY_HINT:
+ case SVC_POWER_STARVATION:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power busy/starve hint\n",
+ __func__);
+ do_svc_null_response(avp_svc, msg, len, SVC_POWER_RESPONSE);
+ break;
+ case SVC_POWER_MAXFREQ:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power get_max_freq\n",
+ __func__);
+ do_svc_pwr_max_freq(avp_svc, msg, len);
+ break;
+ case SVC_DFS_GETSTATE:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got dfs_get_state\n", __func__);
+ do_svc_dfs_get_state(avp_svc, msg, len);
+ break;
+ case SVC_MODULE_RESET:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got module_reset\n", __func__);
+ do_svc_module_reset(avp_svc, msg, len);
+ break;
+ case SVC_MODULE_CLOCK:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got module_clock\n", __func__);
+ do_svc_module_clock(avp_svc, msg, len);
+ break;
+ case SVC_DFS_GET_CLK_UTIL:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got get_clk_util\n", __func__);
+ do_svc_dfs_get_clk_util(avp_svc, msg, len);
+ break;
+ case SVC_PRINTF:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got remote printf\n", __func__);
+ do_svc_printf(avp_svc, msg, len);
+ break;
+ case SVC_AVP_WDT_RESET:
+ pr_err("avp_svc: AVP has been reset by watchdog\n");
+ break;
+ default:
+ pr_err("avp_svc: invalid SVC call 0x%x\n", msg->svc_id);
+ ret = -ENOMSG;
+ break;
+ }
+
+ return ret;
+}
+
+static int avp_svc_thread(void *data)
+{
+ struct avp_svc_info *avp_svc = data;
+ u8 buf[TEGRA_RPC_MAX_MSG_LEN];
+ struct svc_msg *msg = (struct svc_msg *)buf;
+ int ret;
+
+ BUG_ON(!avp_svc->cpu_ep);
+
+ ret = trpc_wait_peer(avp_svc->cpu_ep, -1);
+ if (ret) {
+ /* XXX: teardown?! */
+ pr_err("%s: no connection from AVP (%d)\n", __func__, ret);
+ goto err;
+ }
+
+ pr_info("%s: got remote peer\n", __func__);
+
+ while (!kthread_should_stop()) {
+ DBG(AVP_DBG_TRACE_SVC, "%s: waiting for message\n", __func__);
+ ret = trpc_recv_msg(avp_svc->rpc_node, avp_svc->cpu_ep, buf,
+ TEGRA_RPC_MAX_MSG_LEN, -1);
+ DBG(AVP_DBG_TRACE_SVC, "%s: got message\n", __func__);
+ if (ret < 0) {
+ pr_err("%s: couldn't receive msg\n", __func__);
+ /* XXX: port got closed? we should exit? */
+ goto err;
+ } else if (!ret) {
+ pr_err("%s: received msg of len 0?!\n", __func__);
+ continue;
+ }
+ dispatch_svc_message(avp_svc, msg, ret);
+ }
+
+err:
+ trpc_put(avp_svc->cpu_ep);
+ pr_info("%s: done\n", __func__);
+ return ret;
+}
+
+int avp_svc_start(struct avp_svc_info *avp_svc)
+{
+ struct trpc_endpoint *ep;
+ int ret;
+
+ avp_svc->nvmap_remote = nvmap_create_client(nvmap_dev, "avp_remote");
+ if (IS_ERR(avp_svc->nvmap_remote)) {
+ pr_err("%s: cannot create remote nvmap client\n", __func__);
+ ret = PTR_ERR(avp_svc->nvmap_remote);
+ goto err_nvmap_create_remote_client;
+ }
+
+ ep = trpc_create(avp_svc->rpc_node, "RPC_CPU_PORT", NULL, NULL);
+ if (IS_ERR(ep)) {
+ pr_err("%s: can't create RPC_CPU_PORT\n", __func__);
+ ret = PTR_ERR(ep);
+ goto err_cpu_port_create;
+ }
+
+ /* TODO: protect this */
+ avp_svc->cpu_ep = ep;
+
+ /* the service thread should get an extra reference for the port */
+ trpc_get(avp_svc->cpu_ep);
+ avp_svc->svc_thread = kthread_run(avp_svc_thread, avp_svc,
+ "avp_svc_thread");
+ if (IS_ERR_OR_NULL(avp_svc->svc_thread)) {
+ avp_svc->svc_thread = NULL;
+ pr_err("%s: can't create svc thread\n", __func__);
+ ret = -ENOMEM;
+ goto err_kthread;
+ }
+ return 0;
+
+err_kthread:
+ trpc_close(avp_svc->cpu_ep);
+ trpc_put(avp_svc->cpu_ep);
+ avp_svc->cpu_ep = NULL;
+err_cpu_port_create:
+ nvmap_client_put(avp_svc->nvmap_remote);
+err_nvmap_create_remote_client:
+ avp_svc->nvmap_remote = NULL;
+ return ret;
+}
+
+void avp_svc_stop(struct avp_svc_info *avp_svc)
+{
+ int ret;
+ int i;
+
+ trpc_close(avp_svc->cpu_ep);
+ ret = kthread_stop(avp_svc->svc_thread);
+ if (ret == -EINTR) {
+ /* the thread never started, drop it's extra reference */
+ trpc_put(avp_svc->cpu_ep);
+ }
+ avp_svc->cpu_ep = NULL;
+
+ nvmap_client_put(avp_svc->nvmap_remote);
+ avp_svc->nvmap_remote = NULL;
+
+ mutex_lock(&avp_svc->clk_lock);
+ for (i = 0; i < NUM_CLK_REQUESTS; i++) {
+ struct avp_clk *aclk = &avp_svc->clks[i];
+ BUG_ON(aclk->refcnt < 0);
+ if (aclk->refcnt > 0) {
+ pr_info("%s: remote left clock '%s' on\n", __func__,
+ aclk->mod->name);
+ clk_disable(aclk->clk);
+ }
+ aclk->refcnt = 0;
+ }
+ mutex_unlock(&avp_svc->clk_lock);
+}
+
+struct avp_svc_info *avp_svc_init(struct platform_device *pdev,
+ struct trpc_node *rpc_node)
+{
+ struct avp_svc_info *avp_svc;
+ int ret;
+ int i;
+ int cnt = 0;
+
+ BUG_ON(!rpc_node);
+
+ avp_svc = kzalloc(sizeof(struct avp_svc_info), GFP_KERNEL);
+ if (!avp_svc) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ BUILD_BUG_ON(NUM_CLK_REQUESTS > BITS_PER_LONG);
+
+ for (i = 0; i < NUM_AVP_MODULES; i++) {
+ struct avp_module *mod = &avp_modules[i];
+ struct clk *clk;
+ if (!mod->name)
+ continue;
+ BUG_ON(mod->clk_req >= NUM_CLK_REQUESTS ||
+ cnt++ >= NUM_CLK_REQUESTS);
+
+ clk = clk_get(&pdev->dev, mod->name);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ pr_err("avp_svc: Couldn't get required clocks\n");
+ goto err_get_clks;
+ }
+ avp_svc->clks[mod->clk_req].clk = clk;
+ avp_svc->clks[mod->clk_req].mod = mod;
+ avp_svc->clks[mod->clk_req].refcnt = 0;
+ }
+
+ avp_svc->sclk = clk_get(&pdev->dev, "sclk");
+ if (IS_ERR(avp_svc->sclk)) {
+ pr_err("avp_svc: Couldn't get sclk for dvfs\n");
+ ret = -ENOENT;
+ goto err_get_clks;
+ }
+ avp_svc->rpc_node = rpc_node;
+
+ mutex_init(&avp_svc->clk_lock);
+
+ return avp_svc;
+
+err_get_clks:
+ for (i = 0; i < NUM_CLK_REQUESTS; i++)
+ if (avp_svc->clks[i].clk)
+ clk_put(avp_svc->clks[i].clk);
+ if (!IS_ERR_OR_NULL(avp_svc->sclk))
+ clk_put(avp_svc->sclk);
+err_alloc:
+ return ERR_PTR(ret);
+}
+
+void avp_svc_destroy(struct avp_svc_info *avp_svc)
+{
+ int i;
+
+ for (i = 0; i < NUM_CLK_REQUESTS; i++)
+ clk_put(avp_svc->clks[i].clk);
+ clk_put(avp_svc->sclk);
+
+ kfree(avp_svc);
+}
diff --git a/drivers/media/video/tegra/avp/headavp.S b/drivers/media/video/tegra/avp/headavp.S
new file mode 100644
index 000000000000..5304067f0d83
--- /dev/null
+++ b/drivers/media/video/tegra/avp/headavp.S
@@ -0,0 +1,66 @@
+/*
+ * arch/arm/mach-tegra/headavp.S
+ *
+ * AVP kernel launcher stub; programs the AVP MMU and jumps to the
+ * kernel code. Must use ONLY ARMv4 instructions, and must be compiled
+ * in ARM mode.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include "headavp.h"
+
+#define PTE0_COMPARE 0
+/* the default translation will translate any VA within
+ * 0x0010:0000..0x001f:ffff to the (megabyte-aligned) value written to
+ * _tegra_avp_boot_stub_data.map_phys_addr
+ */
+#define PTE0_DEFAULT (AVP_KERNEL_VIRT_BASE | 0x3ff0)
+
+#define PTE0_TRANSLATE 4
+
+#define TRANSLATE_DATA (1 << 11)
+#define TRANSLATE_CODE (1 << 10)
+#define TRANSLATE_WR (1 << 9)
+#define TRANSLATE_RD (1 << 8)
+#define TRANSLATE_HIT (1 << 7)
+#define TRANSLATE_EN (1 << 2)
+
+#define TRANSLATE_OPT (TRANSLATE_DATA | TRANSLATE_CODE | TRANSLATE_WR | \
+ TRANSLATE_RD | TRANSLATE_HIT)
+
+ENTRY(_tegra_avp_boot_stub)
+ adr r4, _tegra_avp_boot_stub_data
+ ldmia r4, {r0-r3}
+ str r2, [r0, #PTE0_COMPARE]
+ bic r3, r3, #0xff0
+ bic r3, r3, #0x00f
+ orr r3, r3, #TRANSLATE_OPT
+ orr r3, r3, #TRANSLATE_EN
+ str r3, [r0, #PTE0_TRANSLATE]
+ bx r1
+ b .
+ENDPROC(_tegra_avp_boot_stub)
+ .type _tegra_avp_boot_stub_data, %object
+ENTRY(_tegra_avp_boot_stub_data)
+ .long AVP_MMU_TLB_BASE
+ .long 0xdeadbeef
+ .long PTE0_DEFAULT
+ .long 0xdeadd00d
+ .size _tegra_avp_boot_stub_data, . - _tegra_avp_boot_stub_data
diff --git a/drivers/media/video/tegra/avp/headavp.h b/drivers/media/video/tegra/avp/headavp.h
new file mode 100644
index 000000000000..2bcc3297bfa4
--- /dev/null
+++ b/drivers/media/video/tegra/avp/headavp.h
@@ -0,0 +1,41 @@
+/*
+ * arch/arm/mach-tegra/headavp.h
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MACH_TEGRA_HEADAVP_H
+#define _MACH_TEGRA_HEADAVP_H
+
+#define AVP_MMU_TLB_BASE 0xF000F000
+#define AVP_KERNEL_VIRT_BASE 0x00100000
+
+#ifndef __ASSEMBLY__
+
+struct tegra_avp_boot_stub_data {
+ unsigned long mmu_tlb_base;
+ unsigned long jump_addr;
+ unsigned long map_virt_addr;
+ unsigned long map_phys_addr;
+};
+
+extern void _tegra_avp_boot_stub(void);
+extern struct tegra_avp_boot_stub_data _tegra_avp_boot_stub_data;
+
+#endif
+
+#endif
diff --git a/drivers/media/video/tegra/avp/tegra_rpc.c b/drivers/media/video/tegra/avp/tegra_rpc.c
new file mode 100644
index 000000000000..c4e707e9a89e
--- /dev/null
+++ b/drivers/media/video/tegra/avp/tegra_rpc.c
@@ -0,0 +1,738 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * Based on original NVRM code from NVIDIA, and a partial rewrite by:
+ * Gary King <gking@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "trpc.h"
+
+struct trpc_port;
+struct trpc_endpoint {
+ struct list_head msg_list;
+ wait_queue_head_t msg_waitq;
+
+ struct trpc_endpoint *out;
+ struct trpc_port *port;
+
+ struct trpc_node *owner;
+
+ struct completion *connect_done;
+ bool ready;
+ struct trpc_ep_ops *ops;
+ void *priv;
+};
+
+struct trpc_port {
+ char name[TEGRA_RPC_MAX_NAME_LEN];
+
+ /* protects peer and closed state */
+ spinlock_t lock;
+ struct trpc_endpoint peers[2];
+ bool closed;
+
+ /* private */
+ struct kref ref;
+ struct rb_node rb_node;
+};
+
+enum {
+ TRPC_TRACE_MSG = 1U << 0,
+ TRPC_TRACE_CONN = 1U << 1,
+ TRPC_TRACE_PORT = 1U << 2,
+};
+
+static u32 trpc_debug_mask = TRPC_TRACE_CONN | TRPC_TRACE_PORT;
+module_param_named(debug_mask, trpc_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+#define DBG(flag, args...) \
+ do { if (trpc_debug_mask & (flag)) pr_info(args); } while (0)
+
+struct tegra_rpc_info {
+ struct kmem_cache *msg_cache;
+
+ spinlock_t ports_lock;
+ struct rb_root ports;
+
+ struct list_head node_list;
+ struct mutex node_lock;
+};
+
+struct trpc_msg {
+ struct list_head list;
+
+ size_t len;
+ u8 payload[TEGRA_RPC_MAX_MSG_LEN];
+};
+
+static struct tegra_rpc_info *tegra_rpc;
+
+static struct trpc_msg *dequeue_msg_locked(struct trpc_endpoint *ep);
+
+/* a few accessors for the outside world to keep the trpc_endpoint struct
+ * definition private to this module */
+void *trpc_priv(struct trpc_endpoint *ep)
+{
+ return ep->priv;
+}
+
+struct trpc_endpoint *trpc_peer(struct trpc_endpoint *ep)
+{
+ return ep->out;
+}
+
+const char *trpc_name(struct trpc_endpoint *ep)
+{
+ return ep->port->name;
+}
+
+static inline bool is_connected(struct trpc_port *port)
+{
+ return port->peers[0].ready && port->peers[1].ready;
+}
+
+static inline bool is_closed(struct trpc_port *port)
+{
+ return port->closed;
+}
+
+static void rpc_port_free(struct tegra_rpc_info *info, struct trpc_port *port)
+{
+ struct trpc_msg *msg;
+ int i;
+
+ for (i = 0; i < 2; ++i) {
+ struct list_head *list = &port->peers[i].msg_list;
+ while (!list_empty(list)) {
+ msg = list_first_entry(list, struct trpc_msg, list);
+ list_del(&msg->list);
+ kmem_cache_free(info->msg_cache, msg);
+ }
+ }
+ kfree(port);
+}
+
+static void _rpc_port_release(struct kref *kref)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_port *port = container_of(kref, struct trpc_port, ref);
+ unsigned long flags;
+
+ DBG(TRPC_TRACE_PORT, "%s: releasing port '%s' (%p)\n", __func__,
+ port->name, port);
+ spin_lock_irqsave(&info->ports_lock, flags);
+ rb_erase(&port->rb_node, &info->ports);
+ spin_unlock_irqrestore(&info->ports_lock, flags);
+ rpc_port_free(info, port);
+}
+
+/* note that the refcount is actually on the port and not on the endpoint */
+void trpc_put(struct trpc_endpoint *ep)
+{
+ kref_put(&ep->port->ref, _rpc_port_release);
+}
+
+void trpc_get(struct trpc_endpoint *ep)
+{
+ kref_get(&ep->port->ref);
+}
+
+/* Searches the rb_tree for a port with the provided name. If one is not found,
+ * the new port in inserted. Otherwise, the existing port is returned.
+ * Must be called with the ports_lock held */
+static struct trpc_port *rpc_port_find_insert(struct tegra_rpc_info *info,
+ struct trpc_port *port)
+{
+ struct rb_node **p;
+ struct rb_node *parent;
+ struct trpc_port *tmp;
+ int ret = 0;
+
+ p = &info->ports.rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ tmp = rb_entry(parent, struct trpc_port, rb_node);
+
+ ret = strncmp(port->name, tmp->name, TEGRA_RPC_MAX_NAME_LEN);
+ if (ret < 0)
+ p = &(*p)->rb_left;
+ else if (ret > 0)
+ p = &(*p)->rb_right;
+ else
+ return tmp;
+ }
+ rb_link_node(&port->rb_node, parent, p);
+ rb_insert_color(&port->rb_node, &info->ports);
+ DBG(TRPC_TRACE_PORT, "%s: inserted port '%s' (%p)\n", __func__,
+ port->name, port);
+ return port;
+}
+
+static int nodes_try_connect(struct tegra_rpc_info *info,
+ struct trpc_node *src,
+ struct trpc_endpoint *from)
+{
+ struct trpc_node *node;
+ int ret;
+
+ mutex_lock(&info->node_lock);
+ list_for_each_entry(node, &info->node_list, list) {
+ if (!node->try_connect)
+ continue;
+ ret = node->try_connect(node, src, from);
+ if (!ret) {
+ mutex_unlock(&info->node_lock);
+ return 0;
+ }
+ }
+ mutex_unlock(&info->node_lock);
+ return -ECONNREFUSED;
+}
+
+static struct trpc_port *rpc_port_alloc(const char *name)
+{
+ struct trpc_port *port;
+ int i;
+
+ port = kzalloc(sizeof(struct trpc_port), GFP_KERNEL);
+ if (!port) {
+ pr_err("%s: can't alloc rpc_port\n", __func__);
+ return NULL;
+ }
+ BUILD_BUG_ON(2 != ARRAY_SIZE(port->peers));
+
+ spin_lock_init(&port->lock);
+ kref_init(&port->ref);
+ strlcpy(port->name, name, TEGRA_RPC_MAX_NAME_LEN);
+ for (i = 0; i < 2; i++) {
+ struct trpc_endpoint *ep = port->peers + i;
+ INIT_LIST_HEAD(&ep->msg_list);
+ init_waitqueue_head(&ep->msg_waitq);
+ ep->port = port;
+ }
+ port->peers[0].out = &port->peers[1];
+ port->peers[1].out = &port->peers[0];
+
+ return port;
+}
+
+/* must be holding the ports lock */
+static inline void handle_port_connected(struct trpc_port *port)
+{
+ int i;
+
+ DBG(TRPC_TRACE_CONN, "tegra_rpc: port '%s' connected\n", port->name);
+
+ for (i = 0; i < 2; i++)
+ if (port->peers[i].connect_done)
+ complete(port->peers[i].connect_done);
+}
+
+static inline void _ready_ep(struct trpc_endpoint *ep,
+ struct trpc_node *owner,
+ struct trpc_ep_ops *ops,
+ void *priv)
+{
+ ep->ready = true;
+ ep->owner = owner;
+ ep->ops = ops;
+ ep->priv = priv;
+}
+
+/* this keeps a reference on the port */
+static struct trpc_endpoint *_create_peer(struct tegra_rpc_info *info,
+ struct trpc_node *owner,
+ struct trpc_endpoint *ep,
+ struct trpc_ep_ops *ops,
+ void *priv)
+{
+ struct trpc_port *port = ep->port;
+ struct trpc_endpoint *peer = ep->out;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ BUG_ON(port->closed);
+ if (peer->ready || !ep->ready) {
+ peer = NULL;
+ goto out;
+ }
+ _ready_ep(peer, owner, ops, priv);
+ if (WARN_ON(!is_connected(port)))
+ pr_warning("%s: created peer but no connection established?!\n",
+ __func__);
+ else
+ handle_port_connected(port);
+ trpc_get(peer);
+out:
+ spin_unlock_irqrestore(&port->lock, flags);
+ return peer;
+}
+
+/* Exported code. This is out interface to the outside world */
+struct trpc_endpoint *trpc_create(struct trpc_node *owner, const char *name,
+ struct trpc_ep_ops *ops, void *priv)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_endpoint *ep;
+ struct trpc_port *new_port;
+ struct trpc_port *port;
+ unsigned long flags;
+
+ BUG_ON(!owner);
+
+ /* we always allocate a new port even if one already might exist. This
+ * is slightly inefficient, but it allows us to do the allocation
+ * without holding our ports_lock spinlock. */
+ new_port = rpc_port_alloc(name);
+ if (!new_port) {
+ pr_err("%s: can't allocate memory for '%s'\n", __func__, name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_irqsave(&info->ports_lock, flags);
+ port = rpc_port_find_insert(info, new_port);
+ if (port != new_port) {
+ rpc_port_free(info, new_port);
+ /* There was already a port by that name in the rb_tree,
+ * so just try to create its peer[1], i.e. peer for peer[0]
+ */
+ ep = _create_peer(info, owner, &port->peers[0], ops, priv);
+ if (!ep) {
+ pr_err("%s: port '%s' is not in a connectable state\n",
+ __func__, port->name);
+ ep = ERR_PTR(-EINVAL);
+ }
+ goto out;
+ }
+ /* don't need to grab the individual port lock here since we must be
+ * holding the ports_lock to add the new element, and never dropped
+ * it, and thus noone could have gotten a reference to this port
+ * and thus the state couldn't have been touched */
+ ep = &port->peers[0];
+ _ready_ep(ep, owner, ops, priv);
+out:
+ spin_unlock_irqrestore(&info->ports_lock, flags);
+ return ep;
+}
+
+struct trpc_endpoint *trpc_create_peer(struct trpc_node *owner,
+ struct trpc_endpoint *ep,
+ struct trpc_ep_ops *ops,
+ void *priv)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_endpoint *peer;
+ unsigned long flags;
+
+ BUG_ON(!owner);
+
+ spin_lock_irqsave(&info->ports_lock, flags);
+ peer = _create_peer(info, owner, ep, ops, priv);
+ spin_unlock_irqrestore(&info->ports_lock, flags);
+ return peer;
+}
+
+/* timeout == -1, waits forever
+ * timeout == 0, return immediately
+ */
+int trpc_connect(struct trpc_endpoint *from, long timeout)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_port *port = from->port;
+ struct trpc_node *src = from->owner;
+ int ret;
+ bool no_retry = !timeout;
+ unsigned long endtime = jiffies + msecs_to_jiffies(timeout);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ /* XXX: add state for connections and ports to prevent invalid
+ * states like multiple connections, etc. ? */
+ if (unlikely(is_closed(port))) {
+ ret = -ECONNRESET;
+ pr_err("%s: can't connect to %s, closed\n", __func__,
+ port->name);
+ goto out;
+ } else if (is_connected(port)) {
+ ret = 0;
+ goto out;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ do {
+ ret = nodes_try_connect(info, src, from);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (is_connected(port)) {
+ ret = 0;
+ goto out;
+ } else if (no_retry) {
+ goto out;
+ } else if (signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ usleep_range(5000, 20000);
+ } while (timeout < 0 || time_before(jiffies, endtime));
+
+ return -ETIMEDOUT;
+
+out:
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+/* convenience function for doing this common pattern in a single call */
+struct trpc_endpoint *trpc_create_connect(struct trpc_node *src,
+ char *name,
+ struct trpc_ep_ops *ops,
+ void *priv,
+ long timeout)
+{
+ struct trpc_endpoint *ep;
+ int ret;
+
+ ep = trpc_create(src, name, ops, priv);
+ if (IS_ERR(ep))
+ return ep;
+
+ ret = trpc_connect(ep, timeout);
+ if (ret) {
+ trpc_close(ep);
+ return ERR_PTR(ret);
+ }
+
+ return ep;
+}
+
+void trpc_close(struct trpc_endpoint *ep)
+{
+ struct trpc_port *port = ep->port;
+ struct trpc_endpoint *peer = ep->out;
+ bool need_close_op = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ BUG_ON(!ep->ready);
+ ep->ready = false;
+ port->closed = true;
+ if (peer->ready) {
+ need_close_op = true;
+ /* the peer may be waiting for a message */
+ wake_up_all(&peer->msg_waitq);
+ if (peer->connect_done)
+ complete(peer->connect_done);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ if (need_close_op && peer->ops && peer->ops->close)
+ peer->ops->close(peer);
+ trpc_put(ep);
+}
+
+int trpc_wait_peer(struct trpc_endpoint *ep, long timeout)
+{
+ struct trpc_port *port = ep->port;
+ DECLARE_COMPLETION_ONSTACK(event);
+ int ret;
+ unsigned long flags;
+
+ if (timeout < 0)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else if (timeout > 0)
+ timeout = msecs_to_jiffies(timeout);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (ep->connect_done) {
+ ret = -EBUSY;
+ goto done;
+ } else if (is_connected(port)) {
+ ret = 0;
+ goto done;
+ } else if (is_closed(port)) {
+ ret = -ECONNRESET;
+ goto done;
+ } else if (!timeout) {
+ ret = -EAGAIN;
+ goto done;
+ }
+ ep->connect_done = &event;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ ret = wait_for_completion_interruptible_timeout(&event, timeout);
+
+ spin_lock_irqsave(&port->lock, flags);
+ ep->connect_done = NULL;
+
+ if (is_connected(port)) {
+ ret = 0;
+ } else {
+ if (is_closed(port))
+ ret = -ECONNRESET;
+ else if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ else if (!ret)
+ ret = -ETIMEDOUT;
+ }
+
+done:
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+static inline int _ep_id(struct trpc_endpoint *ep)
+{
+ return ep - ep->port->peers;
+}
+
+static int queue_msg(struct trpc_node *src, struct trpc_endpoint *from,
+ void *buf, size_t len, gfp_t gfp_flags)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_endpoint *peer = from->out;
+ struct trpc_port *port = from->port;
+ struct trpc_msg *msg;
+ unsigned long flags;
+ int ret;
+
+ BUG_ON(len > TEGRA_RPC_MAX_MSG_LEN);
+ /* shouldn't be enqueueing to the endpoint */
+ BUG_ON(peer->ops && peer->ops->send);
+
+ DBG(TRPC_TRACE_MSG, "%s: queueing message for %s.%d\n", __func__,
+ port->name, _ep_id(peer));
+
+ msg = kmem_cache_alloc(info->msg_cache, gfp_flags);
+ if (!msg) {
+ pr_err("%s: can't alloc memory for msg\n", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(msg->payload, buf, len);
+ msg->len = len;
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (is_closed(port)) {
+ pr_err("%s: cannot send message for closed port %s.%d\n",
+ __func__, port->name, _ep_id(peer));
+ ret = -ECONNRESET;
+ goto err;
+ } else if (!is_connected(port)) {
+ pr_err("%s: cannot send message for unconnected port %s.%d\n",
+ __func__, port->name, _ep_id(peer));
+ ret = -ENOTCONN;
+ goto err;
+ }
+
+ list_add_tail(&msg->list, &peer->msg_list);
+ if (peer->ops && peer->ops->notify_recv)
+ peer->ops->notify_recv(peer);
+ wake_up_all(&peer->msg_waitq);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return 0;
+
+err:
+ spin_unlock_irqrestore(&port->lock, flags);
+ kmem_cache_free(info->msg_cache, msg);
+ return ret;
+}
+
+/* Returns -ENOMEM if failed to allocate memory for the message. */
+int trpc_send_msg(struct trpc_node *src, struct trpc_endpoint *from,
+ void *buf, size_t len, gfp_t gfp_flags)
+{
+ struct trpc_endpoint *peer = from->out;
+ struct trpc_port *port = from->port;
+
+ BUG_ON(len > TEGRA_RPC_MAX_MSG_LEN);
+
+ DBG(TRPC_TRACE_MSG, "%s: sending message from %s.%d to %s.%d\n",
+ __func__, port->name, _ep_id(from), port->name, _ep_id(peer));
+
+ if (peer->ops && peer->ops->send) {
+ might_sleep();
+ return peer->ops->send(peer, buf, len);
+ } else {
+ might_sleep_if(gfp_flags & __GFP_WAIT);
+ return queue_msg(src, from, buf, len, gfp_flags);
+ }
+}
+
+static inline struct trpc_msg *dequeue_msg_locked(struct trpc_endpoint *ep)
+{
+ struct trpc_msg *msg = NULL;
+
+ if (!list_empty(&ep->msg_list)) {
+ msg = list_first_entry(&ep->msg_list, struct trpc_msg, list);
+ list_del_init(&msg->list);
+ }
+
+ return msg;
+}
+
+static bool __should_wake(struct trpc_endpoint *ep)
+{
+ struct trpc_port *port = ep->port;
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&port->lock, flags);
+ ret = !list_empty(&ep->msg_list) || is_closed(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+int trpc_recv_msg(struct trpc_node *src, struct trpc_endpoint *ep,
+ void *buf, size_t buf_len, long timeout)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_port *port = ep->port;
+ struct trpc_msg *msg;
+ size_t len;
+ long ret;
+ unsigned long flags;
+
+ BUG_ON(buf_len > TEGRA_RPC_MAX_MSG_LEN);
+
+ spin_lock_irqsave(&port->lock, flags);
+ /* we allow closed ports to finish receiving already-queued messages */
+ msg = dequeue_msg_locked(ep);
+ if (msg) {
+ goto got_msg;
+ } else if (is_closed(port)) {
+ ret = -ECONNRESET;
+ goto out;
+ } else if (!is_connected(port)) {
+ ret = -ENOTCONN;
+ goto out;
+ }
+
+ if (timeout == 0) {
+ ret = 0;
+ goto out;
+ } else if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ } else {
+ timeout = msecs_to_jiffies(timeout);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ DBG(TRPC_TRACE_MSG, "%s: waiting for message for %s.%d\n", __func__,
+ port->name, _ep_id(ep));
+
+ ret = wait_event_interruptible_timeout(ep->msg_waitq, __should_wake(ep),
+ timeout);
+
+ DBG(TRPC_TRACE_MSG, "%s: woke up for %s\n", __func__, port->name);
+ spin_lock_irqsave(&port->lock, flags);
+ msg = dequeue_msg_locked(ep);
+ if (!msg) {
+ if (is_closed(port))
+ ret = -ECONNRESET;
+ else if (!ret)
+ ret = -ETIMEDOUT;
+ else if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ else
+ pr_err("%s: error (%d) while receiving msg for '%s'\n",
+ __func__, (int)ret, port->name);
+ goto out;
+ }
+
+got_msg:
+ spin_unlock_irqrestore(&port->lock, flags);
+ len = min(buf_len, msg->len);
+ memcpy(buf, msg->payload, len);
+ kmem_cache_free(info->msg_cache, msg);
+ return len;
+
+out:
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+int trpc_node_register(struct trpc_node *node)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+
+ if (!info)
+ return -ENOMEM;
+
+ pr_info("%s: Adding '%s' to node list\n", __func__, node->name);
+
+ mutex_lock(&info->node_lock);
+ if (node->type == TRPC_NODE_LOCAL)
+ list_add(&node->list, &info->node_list);
+ else
+ list_add_tail(&node->list, &info->node_list);
+ mutex_unlock(&info->node_lock);
+ return 0;
+}
+
+void trpc_node_unregister(struct trpc_node *node)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+
+ mutex_lock(&info->node_lock);
+ list_del(&node->list);
+ mutex_unlock(&info->node_lock);
+}
+
+static int __init tegra_rpc_init(void)
+{
+ struct tegra_rpc_info *rpc_info;
+ int ret;
+
+ rpc_info = kzalloc(sizeof(struct tegra_rpc_info), GFP_KERNEL);
+ if (!rpc_info) {
+ pr_err("%s: error allocating rpc_info\n", __func__);
+ return -ENOMEM;
+ }
+
+ rpc_info->ports = RB_ROOT;
+ spin_lock_init(&rpc_info->ports_lock);
+ INIT_LIST_HEAD(&rpc_info->node_list);
+ mutex_init(&rpc_info->node_lock);
+
+ rpc_info->msg_cache = KMEM_CACHE(trpc_msg, 0);
+ if (!rpc_info->msg_cache) {
+ pr_err("%s: unable to create message cache\n", __func__);
+ ret = -ENOMEM;
+ goto err_kmem_cache;
+ }
+ tegra_rpc = rpc_info;
+
+ return 0;
+
+err_kmem_cache:
+ kfree(rpc_info);
+ return ret;
+}
+
+subsys_initcall(tegra_rpc_init);
diff --git a/drivers/media/video/tegra/avp/trpc.h b/drivers/media/video/tegra/avp/trpc.h
new file mode 100644
index 000000000000..859c94e7460c
--- /dev/null
+++ b/drivers/media/video/tegra/avp/trpc.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARM_MACH_TEGRA_RPC_H
+#define __ARM_MACH_TEGRA_RPC_H
+
+#include <linux/list.h>
+#include <linux/tegra_rpc.h>
+
+struct trpc_endpoint;
+struct trpc_ep_ops {
+ /* send is allowed to sleep */
+ int (*send)(struct trpc_endpoint *ep, void *buf, size_t len);
+ /* notify_recv is NOT allowed to sleep */
+ void (*notify_recv)(struct trpc_endpoint *ep);
+ /* close is allowed to sleep */
+ void (*close)(struct trpc_endpoint *ep);
+};
+
+enum {
+ TRPC_NODE_LOCAL,
+ TRPC_NODE_REMOTE,
+};
+
+struct trpc_node {
+ struct list_head list;
+ const char *name;
+ int type;
+ void *priv;
+
+ int (*try_connect)(struct trpc_node *node,
+ struct trpc_node *src,
+ struct trpc_endpoint *from);
+};
+
+struct trpc_endpoint *trpc_peer(struct trpc_endpoint *ep);
+void *trpc_priv(struct trpc_endpoint *ep);
+const char *trpc_name(struct trpc_endpoint *ep);
+
+void trpc_put(struct trpc_endpoint *ep);
+void trpc_get(struct trpc_endpoint *ep);
+
+int trpc_send_msg(struct trpc_node *src, struct trpc_endpoint *ep, void *buf,
+ size_t len, gfp_t gfp_flags);
+int trpc_recv_msg(struct trpc_node *src, struct trpc_endpoint *ep,
+ void *buf, size_t len, long timeout);
+struct trpc_endpoint *trpc_create(struct trpc_node *owner, const char *name,
+ struct trpc_ep_ops *ops, void *priv);
+struct trpc_endpoint *trpc_create_connect(struct trpc_node *src, char *name,
+ struct trpc_ep_ops *ops, void *priv,
+ long timeout);
+int trpc_connect(struct trpc_endpoint *from, long timeout);
+struct trpc_endpoint *trpc_create_peer(struct trpc_node *owner,
+ struct trpc_endpoint *ep,
+ struct trpc_ep_ops *ops,
+ void *priv);
+void trpc_close(struct trpc_endpoint *ep);
+int trpc_wait_peer(struct trpc_endpoint *ep, long timeout);
+
+int trpc_node_register(struct trpc_node *node);
+void trpc_node_unregister(struct trpc_node *node);
+
+#endif
diff --git a/drivers/media/video/tegra/avp/trpc_local.c b/drivers/media/video/tegra/avp/trpc_local.c
new file mode 100644
index 000000000000..5a941a78fc40
--- /dev/null
+++ b/drivers/media/video/tegra/avp/trpc_local.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * Based on original NVRM code from NVIDIA, and a partial rewrite by
+ * Gary King <gking@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "trpc.h"
+#include "trpc_sema.h"
+
+struct rpc_info {
+ struct trpc_endpoint *rpc_ep;
+ struct file *sema_file;
+};
+
+/* ports names reserved for system functions, i.e. communicating with the
+ * AVP */
+static const char reserved_ports[][TEGRA_RPC_MAX_NAME_LEN] = {
+ "RPC_AVP_PORT",
+ "RPC_CPU_PORT",
+};
+static int num_reserved_ports = ARRAY_SIZE(reserved_ports);
+
+static void rpc_notify_recv(struct trpc_endpoint *ep);
+
+/* TODO: do we need to do anything when port is closed from the other side? */
+static struct trpc_ep_ops ep_ops = {
+ .notify_recv = rpc_notify_recv,
+};
+
+static struct trpc_node rpc_node = {
+ .name = "local",
+ .type = TRPC_NODE_LOCAL,
+};
+
+static void rpc_notify_recv(struct trpc_endpoint *ep)
+{
+ struct rpc_info *info = trpc_priv(ep);
+
+ if (WARN_ON(!info))
+ return;
+ if (info->sema_file)
+ trpc_sema_signal(info->sema_file);
+}
+
+static int local_rpc_open(struct inode *inode, struct file *file)
+{
+ struct rpc_info *info;
+
+ info = kzalloc(sizeof(struct rpc_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ nonseekable_open(inode, file);
+ file->private_data = info;
+ return 0;
+}
+
+static int local_rpc_release(struct inode *inode, struct file *file)
+{
+ struct rpc_info *info = file->private_data;
+
+ if (info->rpc_ep)
+ trpc_close(info->rpc_ep);
+ if (info->sema_file)
+ fput(info->sema_file);
+ kfree(info);
+ file->private_data = NULL;
+ return 0;
+}
+
+static int __get_port_desc(struct tegra_rpc_port_desc *desc,
+ unsigned int cmd, unsigned long arg)
+{
+ unsigned int size = _IOC_SIZE(cmd);
+
+ if (size != sizeof(struct tegra_rpc_port_desc))
+ return -EINVAL;
+ if (copy_from_user(desc, (void __user *)arg, sizeof(*desc)))
+ return -EFAULT;
+
+ desc->name[TEGRA_RPC_MAX_NAME_LEN - 1] = '\0';
+ return 0;
+}
+
+static char uniq_name[] = "aaaaaaaa+";
+static const int uniq_len = sizeof(uniq_name) - 1;
+static DEFINE_MUTEX(uniq_lock);
+
+static void _gen_port_name(char *new_name)
+{
+ int i;
+
+ mutex_lock(&uniq_lock);
+ for (i = 0; i < uniq_len - 1; i++) {
+ ++uniq_name[i];
+ if (uniq_name[i] != 'z')
+ break;
+ uniq_name[i] = 'a';
+ }
+ strlcpy(new_name, uniq_name, TEGRA_RPC_MAX_NAME_LEN);
+ mutex_unlock(&uniq_lock);
+}
+
+static int _validate_port_name(const char *name)
+{
+ int i;
+
+ for (i = 0; i < num_reserved_ports; i++)
+ if (!strncmp(name, reserved_ports[i], TEGRA_RPC_MAX_NAME_LEN))
+ return -EINVAL;
+ return 0;
+}
+
+static long local_rpc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct rpc_info *info = file->private_data;
+ struct tegra_rpc_port_desc desc;
+ struct trpc_endpoint *ep;
+ int ret = 0;
+
+ if (_IOC_TYPE(cmd) != TEGRA_RPC_IOCTL_MAGIC ||
+ _IOC_NR(cmd) < TEGRA_RPC_IOCTL_MIN_NR ||
+ _IOC_NR(cmd) > TEGRA_RPC_IOCTL_MAX_NR) {
+ ret = -ENOTTY;
+ goto err;
+ }
+
+ switch (cmd) {
+ case TEGRA_RPC_IOCTL_PORT_CREATE:
+ if (info->rpc_ep) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = __get_port_desc(&desc, cmd, arg);
+ if (ret)
+ goto err;
+ if (desc.name[0]) {
+ ret = _validate_port_name(desc.name);
+ if (ret)
+ goto err;
+ } else {
+ _gen_port_name(desc.name);
+ }
+ if (desc.notify_fd != -1) {
+ /* grab a reference to the trpc_sema fd */
+ info->sema_file = trpc_sema_get_from_fd(desc.notify_fd);
+ if (IS_ERR(info->sema_file)) {
+ ret = PTR_ERR(info->sema_file);
+ info->sema_file = NULL;
+ goto err;
+ }
+ }
+ ep = trpc_create(&rpc_node, desc.name, &ep_ops, info);
+ if (IS_ERR(ep)) {
+ ret = PTR_ERR(ep);
+ if (info->sema_file)
+ fput(info->sema_file);
+ info->sema_file = NULL;
+ goto err;
+ }
+ info->rpc_ep = ep;
+ break;
+ case TEGRA_RPC_IOCTL_PORT_GET_NAME:
+ if (!info->rpc_ep) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if (copy_to_user((void __user *)arg,
+ trpc_name(info->rpc_ep),
+ TEGRA_RPC_MAX_NAME_LEN)) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ case TEGRA_RPC_IOCTL_PORT_CONNECT:
+ if (!info->rpc_ep) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = trpc_connect(info->rpc_ep, (long)arg);
+ if (ret) {
+ pr_err("%s: can't connect to '%s' (%d)\n", __func__,
+ trpc_name(info->rpc_ep), ret);
+ goto err;
+ }
+ break;
+ case TEGRA_RPC_IOCTL_PORT_LISTEN:
+ if (!info->rpc_ep) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = trpc_wait_peer(info->rpc_ep, (long)arg);
+ if (ret) {
+ pr_err("%s: error waiting for peer for '%s' (%d)\n",
+ __func__, trpc_name(info->rpc_ep), ret);
+ goto err;
+ }
+ break;
+ default:
+ pr_err("%s: unknown cmd %d\n", __func__, _IOC_NR(cmd));
+ ret = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (ret && ret != -ERESTARTSYS)
+ pr_err("tegra_rpc: pid=%d ioctl=%x/%lx (%x) ret=%d\n",
+ current->pid, cmd, arg, _IOC_NR(cmd), ret);
+ return (long)ret;
+}
+
+static ssize_t local_rpc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct rpc_info *info = file->private_data;
+ u8 data[TEGRA_RPC_MAX_MSG_LEN];
+ int ret;
+
+ if (!info)
+ return -EINVAL;
+ else if (count > TEGRA_RPC_MAX_MSG_LEN)
+ return -EINVAL;
+
+ if (copy_from_user(data, buf, count))
+ return -EFAULT;
+
+ ret = trpc_send_msg(&rpc_node, info->rpc_ep, data, count,
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t local_rpc_read(struct file *file, char __user *buf, size_t max,
+ loff_t *ppos)
+{
+ struct rpc_info *info = file->private_data;
+ int ret;
+ u8 data[TEGRA_RPC_MAX_MSG_LEN];
+
+ if (max > TEGRA_RPC_MAX_MSG_LEN)
+ return -EINVAL;
+
+ ret = trpc_recv_msg(&rpc_node, info->rpc_ep, data,
+ TEGRA_RPC_MAX_MSG_LEN, 0);
+ if (ret == 0)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else if (ret > max)
+ return -ENOSPC;
+ else if (copy_to_user(buf, data, ret))
+ return -EFAULT;
+
+ return ret;
+}
+
+static const struct file_operations local_rpc_misc_fops = {
+ .owner = THIS_MODULE,
+ .open = local_rpc_open,
+ .release = local_rpc_release,
+ .unlocked_ioctl = local_rpc_ioctl,
+ .write = local_rpc_write,
+ .read = local_rpc_read,
+};
+
+static struct miscdevice local_rpc_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "tegra_rpc",
+ .fops = &local_rpc_misc_fops,
+};
+
+int __init rpc_local_init(void)
+{
+ int ret;
+
+ ret = trpc_sema_init();
+ if (ret) {
+ pr_err("%s: error in trpc_sema_init\n", __func__);
+ goto err_sema_init;
+ }
+
+ ret = misc_register(&local_rpc_misc_device);
+ if (ret) {
+ pr_err("%s: can't register misc device\n", __func__);
+ goto err_misc;
+ }
+
+ ret = trpc_node_register(&rpc_node);
+ if (ret) {
+ pr_err("%s: can't register rpc node\n", __func__);
+ goto err_node_reg;
+ }
+ return 0;
+
+err_node_reg:
+ misc_deregister(&local_rpc_misc_device);
+err_misc:
+err_sema_init:
+ return ret;
+}
+
+module_init(rpc_local_init);
diff --git a/drivers/media/video/tegra/avp/trpc_sema.c b/drivers/media/video/tegra/avp/trpc_sema.c
new file mode 100644
index 000000000000..b8772573d956
--- /dev/null
+++ b/drivers/media/video/tegra/avp/trpc_sema.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/tegra_sema.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "trpc_sema.h"
+
+struct trpc_sema {
+ wait_queue_head_t wq;
+ spinlock_t lock;
+ int count;
+};
+
+static int rpc_sema_minor = -1;
+
+static inline bool is_trpc_sema_file(struct file *file)
+{
+ dev_t rdev = file->f_dentry->d_inode->i_rdev;
+
+ if (MAJOR(rdev) == MISC_MAJOR && MINOR(rdev) == rpc_sema_minor)
+ return true;
+ return false;
+}
+
+struct file *trpc_sema_get_from_fd(int fd)
+{
+ struct file *file;
+
+ file = fget(fd);
+ if (unlikely(file == NULL)) {
+ pr_err("%s: fd %d is invalid\n", __func__, fd);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!is_trpc_sema_file(file)) {
+ pr_err("%s: fd (%d) is not a trpc_sema file\n", __func__, fd);
+ fput(file);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return file;
+}
+
+int trpc_sema_signal(struct file *file)
+{
+ struct trpc_sema *info = file->private_data;
+ unsigned long flags;
+
+ if (!info)
+ return -EINVAL;
+
+ spin_lock_irqsave(&info->lock, flags);
+ info->count++;
+ wake_up_interruptible_all(&info->wq);
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
+}
+
+static int trpc_sema_wait(struct trpc_sema *info, long *timeleft)
+{
+ unsigned long flags;
+ int ret = 0;
+ unsigned long endtime;
+ long timeout = *timeleft;
+
+ *timeleft = 0;
+ if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ } else if (timeout > 0) {
+ timeout = msecs_to_jiffies(timeout);
+ endtime = jiffies + timeout;
+ }
+
+again:
+ if (timeout)
+ ret = wait_event_interruptible_timeout(info->wq,
+ info->count > 0,
+ timeout);
+ spin_lock_irqsave(&info->lock, flags);
+ if (info->count > 0) {
+ info->count--;
+ ret = 0;
+ } else if (ret == 0 || timeout == 0) {
+ ret = -ETIMEDOUT;
+ } else if (ret < 0) {
+ ret = -EINTR;
+ if (timeout != MAX_SCHEDULE_TIMEOUT &&
+ time_before(jiffies, endtime))
+ *timeleft = jiffies_to_msecs(endtime - jiffies);
+ else
+ *timeleft = 0;
+ } else {
+ /* we woke up but someone else got the semaphore and we have
+ * time left, try again */
+ timeout = ret;
+ spin_unlock_irqrestore(&info->lock, flags);
+ goto again;
+ }
+ spin_unlock_irqrestore(&info->lock, flags);
+ return ret;
+}
+
+static int trpc_sema_open(struct inode *inode, struct file *file)
+{
+ struct trpc_sema *info;
+
+ info = kzalloc(sizeof(struct trpc_sema), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ nonseekable_open(inode, file);
+ init_waitqueue_head(&info->wq);
+ spin_lock_init(&info->lock);
+ file->private_data = info;
+ return 0;
+}
+
+static int trpc_sema_release(struct inode *inode, struct file *file)
+{
+ struct trpc_sema *info = file->private_data;
+
+ file->private_data = NULL;
+ kfree(info);
+ return 0;
+}
+
+static long trpc_sema_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct trpc_sema *info = file->private_data;
+ int ret;
+ long timeout;
+
+ if (_IOC_TYPE(cmd) != TEGRA_SEMA_IOCTL_MAGIC ||
+ _IOC_NR(cmd) < TEGRA_SEMA_IOCTL_MIN_NR ||
+ _IOC_NR(cmd) > TEGRA_SEMA_IOCTL_MAX_NR)
+ return -ENOTTY;
+ else if (!info)
+ return -EINVAL;
+
+ switch (cmd) {
+ case TEGRA_SEMA_IOCTL_WAIT:
+ if (copy_from_user(&timeout, (void __user *)arg, sizeof(long)))
+ return -EFAULT;
+ ret = trpc_sema_wait(info, &timeout);
+ if (ret != -EINTR)
+ break;
+ if (copy_to_user((void __user *)arg, &timeout, sizeof(long)))
+ ret = -EFAULT;
+ break;
+ case TEGRA_SEMA_IOCTL_SIGNAL:
+ ret = trpc_sema_signal(file);
+ break;
+ default:
+ pr_err("%s: Unknown tegra_sema ioctl 0x%x\n", __func__,
+ _IOC_NR(cmd));
+ ret = -ENOTTY;
+ break;
+ }
+ return ret;
+}
+
+static const struct file_operations trpc_sema_misc_fops = {
+ .owner = THIS_MODULE,
+ .open = trpc_sema_open,
+ .release = trpc_sema_release,
+ .unlocked_ioctl = trpc_sema_ioctl,
+};
+
+static struct miscdevice trpc_sema_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "tegra_sema",
+ .fops = &trpc_sema_misc_fops,
+};
+
+int __init trpc_sema_init(void)
+{
+ int ret;
+
+ if (rpc_sema_minor >= 0) {
+ pr_err("%s: trpc_sema already registered\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = misc_register(&trpc_sema_misc_device);
+ if (ret) {
+ pr_err("%s: can't register misc device\n", __func__);
+ return ret;
+ }
+
+ rpc_sema_minor = trpc_sema_misc_device.minor;
+ pr_info("%s: registered misc dev %d:%d\n", __func__, MISC_MAJOR,
+ rpc_sema_minor);
+
+ return 0;
+}
diff --git a/drivers/media/video/tegra/avp/trpc_sema.h b/drivers/media/video/tegra/avp/trpc_sema.h
new file mode 100644
index 000000000000..566bbdbe739e
--- /dev/null
+++ b/drivers/media/video/tegra/avp/trpc_sema.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARM_MACH_TEGRA_RPC_SEMA_H
+#define __ARM_MACH_TEGRA_RPC_SEMA_H
+
+#include <linux/types.h>
+#include <linux/fs.h>
+
+struct file *trpc_sema_get_from_fd(int fd);
+int trpc_sema_signal(struct file *file);
+int __init trpc_sema_init(void);
+
+#endif
diff --git a/drivers/media/video/tegra/tegra_camera.c b/drivers/media/video/tegra/tegra_camera.c
index 76cf2fcb55bc..f310d0f5619f 100644
--- a/drivers/media/video/tegra/tegra_camera.c
+++ b/drivers/media/video/tegra/tegra_camera.c
@@ -1,5 +1,5 @@
/*
- * drivers/media/video/tegra/isp.c
+ * drivers/media/video/tegra/tegra_camera.c
*
* Copyright (C) 2010 Google, Inc.
*
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index b7e97c3bbfa5..4493802a458e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -30,9 +30,12 @@
#define DRIVER_NAME "sdhci-tegra"
+#define SDHCI_VENDOR_CLOCK_CNTRL 0x100
+
struct tegra_sdhci_host {
struct sdhci_host *sdhci;
struct clk *clk;
+ int clk_enabled;
};
static irqreturn_t carddetect_irq(int irq, void *data)
@@ -56,8 +59,31 @@ static int tegra_sdhci_enable_dma(struct sdhci_host *host)
return 0;
}
+static void tegra_sdhci_enable_clock(struct tegra_sdhci_host *host, int enable)
+{
+ if (enable && !host->clk_enabled) {
+ clk_enable(host->clk);
+ sdhci_writeb(host->sdhci, 1, SDHCI_VENDOR_CLOCK_CNTRL);
+ host->clk_enabled = 1;
+ } else if (!enable && host->clk_enabled) {
+ sdhci_writeb(host->sdhci, 0, SDHCI_VENDOR_CLOCK_CNTRL);
+ clk_disable(host->clk);
+ host->clk_enabled = 0;
+ }
+}
+
+static void tegra_sdhci_set_clock(struct sdhci_host *sdhci, unsigned int clock)
+{
+ struct tegra_sdhci_host *host = sdhci_priv(sdhci);
+ pr_info("tegra sdhci clock %s %u enabled=%d\n",
+ mmc_hostname(sdhci->mmc), clock, host->clk_enabled);
+
+ tegra_sdhci_enable_clock(host, clock);
+}
+
static struct sdhci_ops tegra_sdhci_ops = {
.enable_dma = tegra_sdhci_enable_dma,
+ .set_clock = tegra_sdhci_set_clock,
};
static int __devinit tegra_sdhci_probe(struct platform_device *pdev)
@@ -105,6 +131,7 @@ static int __devinit tegra_sdhci_probe(struct platform_device *pdev)
if (rc != 0)
goto err_clkput;
+ host->clk_enabled = 1;
sdhci->hw_name = "tegra";
sdhci->ops = &tegra_sdhci_ops;
sdhci->irq = irq;
@@ -205,6 +232,7 @@ static int tegra_sdhci_resume(struct platform_device *pdev)
struct tegra_sdhci_host *host = platform_get_drvdata(pdev);
int ret;
+ tegra_sdhci_enable_clock(host, 1);
ret = sdhci_resume_host(host->sdhci);
if (ret)
pr_err("%s: failed, error = %d\n", __func__, ret);
diff --git a/drivers/net/wireless/bcm4329/dhd.h b/drivers/net/wireless/bcm4329/dhd.h
index 4e5ffcbd5237..59aa9f10ab1f 100644
--- a/drivers/net/wireless/bcm4329/dhd.h
+++ b/drivers/net/wireless/bcm4329/dhd.h
@@ -219,6 +219,9 @@ extern int dhd_os_wake_unlock(dhd_pub_t *pub);
extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub);
extern int dhd_os_wake_lock_timeout_enable(dhd_pub_t *pub);
+extern void dhd_os_start_lock(dhd_pub_t *pub);
+extern void dhd_os_start_unlock(dhd_pub_t *pub);
+
typedef struct dhd_if_event {
uint8 ifidx;
uint8 action;
@@ -348,6 +351,8 @@ typedef enum cust_gpio_modes {
} cust_gpio_modes_t;
extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
extern int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+extern int net_os_send_hang_message(struct net_device *dev);
+
/*
* Insmod parameters for debug/test
*/
@@ -397,6 +402,10 @@ extern uint dhd_sdiod_drive_strength;
/* Override to force tx queueing all the time */
extern uint dhd_force_tx_queueing;
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define KEEP_ALIVE_PERIOD 55000
+#define NULL_PKT_STR "null_pkt"
+
#ifdef SDTEST
/* Echo packet generator (SDIO), pkts/s */
extern uint dhd_pktgen;
diff --git a/drivers/net/wireless/bcm4329/dhd_common.c b/drivers/net/wireless/bcm4329/dhd_common.c
index 1da80f40f985..dbd4b922a262 100644
--- a/drivers/net/wireless/bcm4329/dhd_common.c
+++ b/drivers/net/wireless/bcm4329/dhd_common.c
@@ -21,7 +21,7 @@
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_common.c,v 1.5.6.8.2.6.6.69.4.4 2010/09/22 21:21:15 Exp $
+ * $Id: dhd_common.c,v 1.5.6.8.2.6.6.69.4.10 2010/10/29 19:58:08 Exp $
*/
#include <typedefs.h>
#include <osl.h>
@@ -71,6 +71,11 @@ extern int dhd_wl_ioctl(dhd_pub_t *dhd, uint cmd, char *buf, uint buflen);
void dhd_iscan_lock(void);
void dhd_iscan_unlock(void);
+#if defined(KEEP_ALIVE)
+extern bool ap_fw_loaded;
+int dhd_keep_alive_onoff(dhd_pub_t *dhd, int ka_on);
+#endif /* KEEP_ALIVE */
+
/* Packet alignment for most efficient SDIO (can change based on platform) */
#ifndef DHD_SDALIGN
#define DHD_SDALIGN 32
@@ -1392,6 +1397,19 @@ dhd_preinit_ioctls(dhd_pub_t *dhd)
}
#endif /* PKT_FILTER_SUPPORT */
+#if defined(KEEP_ALIVE)
+ {
+ /* Set Keep Alive : be sure to use FW with -keepalive */
+ int res;
+
+ if (ap_fw_loaded == FALSE) {
+ if ((res = dhd_keep_alive_onoff(dhd, 1)) < 0)
+ DHD_ERROR(("%s set keeplive failed %d\n", \
+ __FUNCTION__, res));
+ }
+ }
+#endif
+
dhd_os_proto_unblock(dhd);
return 0;
@@ -1446,7 +1464,7 @@ dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
dhd_pub_t *dhd = dhd_bus_pub(dhdp);
dhd_iscan_lock();
- /* If iscan_delete is null then delete the entire
+ /* If iscan_delete is null then delete the entire
* chain or else delete specific one provided
*/
if (!iscan_delete) {
@@ -1885,6 +1903,11 @@ dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr)
if (scan_fr != 0)
pfn_param.scan_freq = htod32(scan_fr);
+ if (pfn_param.scan_freq > PNO_SCAN_MAX_FW) {
+ DHD_ERROR(("%s pno freq above %d sec\n", __FUNCTION__, PNO_SCAN_MAX_FW));
+ return err;
+ }
+
bcm_mkiovar("pfn_set", (char *)&pfn_param, sizeof(pfn_param), iovbuf, sizeof(iovbuf));
dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
@@ -1933,6 +1956,53 @@ int dhd_pno_get_status(dhd_pub_t *dhd)
#endif /* PNO_SUPPORT */
+#if defined(KEEP_ALIVE)
+int dhd_keep_alive_onoff(dhd_pub_t *dhd, int ka_on)
+{
+ char buf[256];
+ char *buf_ptr = buf;
+ wl_keep_alive_pkt_t keep_alive_pkt;
+ char * str;
+ int str_len, buf_len;
+ int res = 0;
+ int keep_alive_period = KEEP_ALIVE_PERIOD; /* in ms */
+
+ DHD_TRACE(("%s: ka:%d\n", __FUNCTION__, ka_on));
+
+ if (ka_on) { /* on suspend */
+ keep_alive_pkt.period_msec = keep_alive_period;
+
+ } else {
+ /* on resume, turn off keep_alive packets */
+ keep_alive_pkt.period_msec = 0;
+ }
+
+ /* IOC var name */
+ str = "keep_alive";
+ str_len = strlen(str);
+ strncpy(buf, str, str_len);
+ buf[str_len] = '\0';
+ buf_len = str_len + 1;
+
+ /* set ptr to IOCTL payload after the var name */
+ buf_ptr += buf_len; /* include term Z */
+
+ /* copy Keep-alive attributes from local var keep_alive_pkt */
+ str = NULL_PKT_STR;
+ keep_alive_pkt.len_bytes = strlen(str);
+
+ memcpy(buf_ptr, &keep_alive_pkt, WL_KEEP_ALIVE_FIXED_LEN);
+ buf_ptr += WL_KEEP_ALIVE_FIXED_LEN;
+
+ /* copy packet data */
+ memcpy(buf_ptr, str, keep_alive_pkt.len_bytes);
+ buf_len += (WL_KEEP_ALIVE_FIXED_LEN + keep_alive_pkt.len_bytes);
+
+ res = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, buf_len);
+ return res;
+}
+#endif /* defined(KEEP_ALIVE) */
+
#if defined(CSCAN)
/* Androd ComboSCAN support */
diff --git a/drivers/net/wireless/bcm4329/dhd_linux.c b/drivers/net/wireless/bcm4329/dhd_linux.c
index d7e72c8d12d2..e95e3b1e0f08 100644
--- a/drivers/net/wireless/bcm4329/dhd_linux.c
+++ b/drivers/net/wireless/bcm4329/dhd_linux.c
@@ -22,7 +22,7 @@
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_linux.c,v 1.65.4.9.2.12.2.104 2010/08/20 19:15:40 Exp $
+ * $Id: dhd_linux.c,v 1.65.4.9.2.12.2.104.4.27 2010/10/29 02:31:24 Exp $
*/
#ifdef CONFIG_WIFI_CONTROL_FUNC
@@ -282,6 +282,8 @@ typedef struct dhd_info {
int hang_was_sent;
+ struct mutex wl_start_lock;
+
/* Thread to issue ioctl for multicast */
long sysioc_pid;
struct semaphore sysioc_sem;
@@ -303,11 +305,6 @@ typedef struct dhd_info {
char firmware_path[MOD_PARAM_PATHLEN];
char nvram_path[MOD_PARAM_PATHLEN];
-#if defined(CONFIG_HAS_EARLYSUSPEND)
-#define KEEP_ALIVE
-#define KEEP_ALIVE_PERIOD 55000
-#endif
-
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
struct semaphore dhd_registration_sem;
#define DHD_REGISTRATION_TIMEOUT 8000 /* msec : allowed time to finished dhd registration */
@@ -525,57 +522,6 @@ static void dhd_set_packet_filter(int value, dhd_pub_t *dhd)
}
-#if defined(KEEP_ALIVE)
-
-/* wl cmd# ./wl keep_alive 45000 0x6e756c6c5f706b74 */
-#define NULL_PKT_STR "null_pkt"
-
-static int dhd_keep_alive_onoff(dhd_pub_t *dhd, int ka_on)
-{
- char buf[256];
- char *buf_ptr = buf;
- wl_keep_alive_pkt_t keep_alive_pkt;
- char * str;
- int str_len, buf_len;
- int res = 0;
- int keep_alive_period = KEEP_ALIVE_PERIOD; /* in ms */
-
- DHD_TRACE(("%s: ka:%d\n", __FUNCTION__, ka_on));
-
- if (ka_on) { /* on suspend */
- keep_alive_pkt.period_msec = keep_alive_period;
-
- } else {
- /* on resume, turn off keep_alive packets */
- keep_alive_pkt.period_msec = 0;
- }
-
- /* IOC var name */
- str = "keep_alive";
- str_len = strlen(str);
- strncpy(buf, str, str_len);
- buf[str_len] = '\0';
- buf_len = str_len + 1;
-
- /* set ptr to IOCTL payload after the var name */
- buf_ptr += buf_len; /* include term Z */
-
- /* copy Keep-alive attributes from local var keep_alive_pkt */
- str = NULL_PKT_STR;
- keep_alive_pkt.len_bytes = strlen(str);
-
- memcpy(buf_ptr, &keep_alive_pkt, WL_KEEP_ALIVE_FIXED_LEN);
- buf_ptr += WL_KEEP_ALIVE_FIXED_LEN;
-
- /* copy packet data */
- memcpy(buf_ptr, str, keep_alive_pkt.len_bytes);
- buf_len += (WL_KEEP_ALIVE_FIXED_LEN + keep_alive_pkt.len_bytes);
-
- res = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, buf_len);
- return res;
-}
-#endif /* defined(KEEP_ALIVE) */
-
#if defined(CONFIG_HAS_EARLYSUSPEND)
static int dhd_set_suspend(int value, dhd_pub_t *dhd)
@@ -588,10 +534,6 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd)
uint roamvar = 1;
#endif /* CUSTOMER_HW2 */
-#if defined(KEEP_ALIVE)
- int ioc_res;
-#endif
-
DHD_TRACE(("%s: enter, value = %d in_suspend = %d\n",
__FUNCTION__, value, dhd->in_suspend));
@@ -619,15 +561,11 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd)
4, iovbuf, sizeof(iovbuf));
dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
#ifdef CUSTOMER_HW2
- /* Disable build-in roaming to allowed ext supplicant to take of roaming */
+ /* Disable build-in roaming during suspend */
bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
#endif /* CUSTOMER_HW2 */
-#if defined(KEEP_ALIVE)
- if ((ioc_res = dhd_keep_alive_onoff(dhd, 1)) < 0)
- DHD_ERROR(("%s result:%d\n", __FUNCTION__, ioc_res));
-#endif
} else {
/* Kernel resumed */
@@ -650,11 +588,6 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd)
bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
#endif /* CUSTOMER_HW2 */
-
-#if defined(KEEP_ALIVE)
- if ((ioc_res = dhd_keep_alive_onoff(dhd, 0)) < 0)
- DHD_ERROR(("%s result:%d\n", __FUNCTION__, ioc_res));
-#endif
}
}
@@ -1065,9 +998,11 @@ _dhd_sysioc_thread(void *data)
DAEMONIZE("dhd_sysioc");
while (down_interruptible(&dhd->sysioc_sem) == 0) {
+ dhd_os_start_lock(&dhd->pub);
dhd_os_wake_lock(&dhd->pub);
for (i = 0; i < DHD_MAX_IFS; i++) {
if (dhd->iflist[i]) {
+ DHD_TRACE(("%s: interface %d\n",__FUNCTION__, i));
#ifdef SOFTAP
in_ap = (ap_net_dev != NULL);
#endif /* SOFTAP */
@@ -1102,6 +1037,7 @@ _dhd_sysioc_thread(void *data)
}
}
dhd_os_wake_unlock(&dhd->pub);
+ dhd_os_start_unlock(&dhd->pub);
}
complete_and_exit(&dhd->sysioc_exited, 0);
}
@@ -1115,6 +1051,7 @@ dhd_set_mac_address(struct net_device *dev, void *addr)
struct sockaddr *sa = (struct sockaddr *)addr;
int ifidx;
+ DHD_TRACE(("%s: Enter\n",__FUNCTION__));
ifidx = dhd_net2idx(dhd, dev);
if (ifidx == DHD_BAD_IF)
return -1;
@@ -1133,6 +1070,7 @@ dhd_set_multicast_list(struct net_device *dev)
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
int ifidx;
+ DHD_TRACE(("%s: Enter\n",__FUNCTION__));
ifidx = dhd_net2idx(dhd, dev);
if (ifidx == DHD_BAD_IF)
return;
@@ -2119,6 +2057,7 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
#endif
+ mutex_init(&dhd->wl_start_lock);
/* Link to info module */
dhd->pub.info = dhd;
@@ -2970,20 +2909,26 @@ void dhd_wait_event_wakeup(dhd_pub_t *dhd)
int
dhd_dev_reset(struct net_device *dev, uint8 flag)
{
+ int ret;
+
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
/* Turning off watchdog */
if (flag)
dhd_os_wd_timer(&dhd->pub, 0);
- dhd_bus_devreset(&dhd->pub, flag);
+ ret = dhd_bus_devreset(&dhd->pub, flag);
+ if (ret) {
+ DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
+ return ret;
+ }
/* Turning on watchdog back */
if (!flag)
dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
- DHD_ERROR(("%s: WLAN OFF DONE\n", __FUNCTION__));
+ DHD_ERROR(("%s: WLAN OFF DONE:\n", __FUNCTION__));
- return 1;
+ return ret;
}
int net_os_set_suspend_disable(struct net_device *dev, int val)
@@ -3289,3 +3234,19 @@ int net_os_send_hang_message(struct net_device *dev)
}
return ret;
}
+
+void dhd_os_start_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd)
+ mutex_lock(&dhd->wl_start_lock);
+}
+
+void dhd_os_start_unlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd)
+ mutex_unlock(&dhd->wl_start_lock);
+}
diff --git a/drivers/net/wireless/bcm4329/include/epivers.h b/drivers/net/wireless/bcm4329/include/epivers.h
index b2a7eb0c8138..002aff6897de 100644
--- a/drivers/net/wireless/bcm4329/include/epivers.h
+++ b/drivers/net/wireless/bcm4329/include/epivers.h
@@ -33,16 +33,16 @@
#define EPI_RC_NUMBER 248
-#define EPI_INCREMENTAL_NUMBER 10
+#define EPI_INCREMENTAL_NUMBER 11
#define EPI_BUILD_NUMBER 0
-#define EPI_VERSION 4, 218, 248, 10
+#define EPI_VERSION 4, 218, 248, 11
-#define EPI_VERSION_NUM 0x04daf80a
+#define EPI_VERSION_NUM 0x04daf80b
-#define EPI_VERSION_STR "4.218.248.10"
-#define EPI_ROUTER_VERSION_STR "4.219.248.10"
+#define EPI_VERSION_STR "4.218.248.11"
+#define EPI_ROUTER_VERSION_STR "4.219.248.11"
#endif
diff --git a/drivers/net/wireless/bcm4329/wl_iw.c b/drivers/net/wireless/bcm4329/wl_iw.c
index 82164edc6534..6d83a259bf95 100644
--- a/drivers/net/wireless/bcm4329/wl_iw.c
+++ b/drivers/net/wireless/bcm4329/wl_iw.c
@@ -97,7 +97,7 @@ typedef const struct si_pub si_t;
#define WL_SOFTAP(x) printk x
static struct net_device *priv_dev;
static bool ap_cfg_running = FALSE;
-static bool ap_fw_loaded = FALSE;
+bool ap_fw_loaded = FALSE;
struct net_device *ap_net_dev = NULL;
struct semaphore ap_eth_sema;
static int wl_iw_set_ap_security(struct net_device *dev, struct ap_profile *ap);
@@ -110,8 +110,7 @@ static int wl_iw_softap_deassoc_stations(struct net_device *dev);
} while (0)
static int g_onoff = G_WLAN_SET_ON;
-wl_iw_extra_params_t g_wl_iw_params;
-static struct mutex wl_start_lock;
+wl_iw_extra_params_t g_wl_iw_params;
static struct mutex wl_cache_lock;
extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status,
@@ -1449,6 +1448,7 @@ wl_iw_send_priv_event(
int
wl_control_wl_start(struct net_device *dev)
{
+ wl_iw_t *iw;
int ret = 0;
WL_TRACE(("Enter %s \n", __FUNCTION__));
@@ -1458,7 +1458,8 @@ wl_control_wl_start(struct net_device *dev)
return -1;
}
- mutex_lock(&wl_start_lock);
+ iw = *(wl_iw_t **)netdev_priv(dev);
+ dhd_os_start_lock(iw->pub);
if (g_onoff == G_WLAN_SET_OFF) {
dhd_customer_gpio_wlan_ctrl(WLAN_RESET_ON);
@@ -1467,7 +1468,7 @@ wl_control_wl_start(struct net_device *dev)
sdioh_start(NULL, 0);
#endif
- dhd_dev_reset(dev, 0);
+ ret = dhd_dev_reset(dev, 0);
#if defined(BCMLXSDMMC)
sdioh_start(NULL, 1);
@@ -1479,7 +1480,7 @@ wl_control_wl_start(struct net_device *dev)
}
WL_TRACE(("Exited %s \n", __FUNCTION__));
- mutex_unlock(&wl_start_lock);
+ dhd_os_start_unlock(iw->pub);
return ret;
}
@@ -1490,7 +1491,9 @@ wl_iw_control_wl_off(
struct iw_request_info *info
)
{
+ wl_iw_t *iw;
int ret = 0;
+
WL_TRACE(("Enter %s\n", __FUNCTION__));
if (!dev) {
@@ -1498,7 +1501,8 @@ wl_iw_control_wl_off(
return -1;
}
- mutex_lock(&wl_start_lock);
+ iw = *(wl_iw_t **)netdev_priv(dev);
+ dhd_os_start_lock(iw->pub);
#ifdef SOFTAP
ap_cfg_running = FALSE;
@@ -1537,7 +1541,7 @@ wl_iw_control_wl_off(
wl_iw_send_priv_event(dev, "STOP");
}
- mutex_unlock(&wl_start_lock);
+ dhd_os_start_unlock(iw->pub);
WL_TRACE(("Exited %s\n", __FUNCTION__));
@@ -1554,7 +1558,15 @@ wl_iw_control_wl_on(
WL_TRACE(("Enter %s \n", __FUNCTION__));
- ret = wl_control_wl_start(dev);
+ if ((ret = wl_control_wl_start(dev)) == BCME_SDIO_ERROR) {
+ WL_ERROR(("%s failed first attemp\n", __FUNCTION__));
+ bcm_mdelay(100);
+ if ((ret = wl_control_wl_start(dev)) == BCME_SDIO_ERROR) {
+ WL_ERROR(("%s failed second attemp\n", __FUNCTION__));
+ net_os_send_hang_message(dev);
+ return ret;
+ }
+ }
wl_iw_send_priv_event(dev, "START");
@@ -2513,7 +2525,7 @@ wl_iw_get_aplist(
list->version = dtoh32(list->version);
list->count = dtoh32(list->count);
if (list->version != WL_BSS_INFO_VERSION) {
- WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n", \
+ WL_ERROR(("%s: list->version %d != WL_BSS_INFO_VERSION\n", \
__FUNCTION__, list->version));
kfree(list);
return -EINVAL;
@@ -2521,21 +2533,21 @@ wl_iw_get_aplist(
for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
- ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
- buflen));
+ if ((uintptr)bi >= ((uintptr)list + buflen)) {
+ WL_ERROR(("%s: Scan results out of bounds\n",__FUNCTION__));
+ kfree(list);
+ return -E2BIG;
+ }
-
if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
continue;
-
memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
addr[dwrq->length].sa_family = ARPHRD_ETHER;
qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
qual[dwrq->length].noise = 0x100 + bi->phy_noise;
-
#if WIRELESS_EXT > 18
qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
#else
@@ -2598,21 +2610,20 @@ wl_iw_iscan_get_aplist(
for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length))
: list->bss_info;
- ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
- WLC_IW_ISCAN_MAXLEN));
+ if ((uintptr)bi >= ((uintptr)list + WLC_IW_ISCAN_MAXLEN)) {
+ WL_ERROR(("%s: Scan results out of bounds\n",__FUNCTION__));
+ return -E2BIG;
+ }
-
if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
continue;
-
memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
addr[dwrq->length].sa_family = ARPHRD_ETHER;
qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
qual[dwrq->length].noise = 0x100 + bi->phy_noise;
-
#if WIRELESS_EXT > 18
qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
#else
@@ -3535,7 +3546,10 @@ wl_iw_get_scan_prep(
int ret = 0;
int channel;
- ASSERT(list);
+ if (!list) {
+ WL_ERROR(("%s: Null list pointer",__FUNCTION__));
+ return -EINVAL;
+ }
for (i = 0; i < list->count && i < IW_MAX_AP; i++)
{
@@ -3543,7 +3557,7 @@ wl_iw_get_scan_prep(
WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n", \
__FUNCTION__, list->version));
return ret;
- }
+ }
bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
@@ -3559,7 +3573,6 @@ wl_iw_get_scan_prep(
iwe.u.data.flags = 1;
event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
-
if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
iwe.cmd = SIOCGIWMODE;
if (dtoh16(bi->capability) & DOT11_CAP_ESS)
@@ -3569,7 +3582,6 @@ wl_iw_get_scan_prep(
event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
}
-
iwe.cmd = SIOCGIWFREQ;
channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
iwe.u.freq.m = wf_channel2mhz(channel,
@@ -3578,7 +3590,6 @@ wl_iw_get_scan_prep(
iwe.u.freq.e = 6;
event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
-
iwe.cmd = IWEVQUAL;
iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
@@ -3595,7 +3606,6 @@ wl_iw_get_scan_prep(
iwe.u.data.length = 0;
event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
-
if (bi->rateset.count) {
if (((event -extra) + IW_EV_LCP_LEN) <= (uintptr)end) {
value = event + IW_EV_LCP_LEN;
@@ -3887,26 +3897,26 @@ wl_iw_iscan_get_scan(
bi = NULL;
for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) {
bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
- ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
- WLC_IW_ISCAN_MAXLEN));
-
+ if ((uintptr)bi >= ((uintptr)list + WLC_IW_ISCAN_MAXLEN)) {
+ WL_ERROR(("%s: Scan results out of bounds\n",__FUNCTION__));
+ return -E2BIG;
+ }
+
if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN +
IW_EV_QUAL_LEN >= end)
return -E2BIG;
-
+
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
-
iwe.u.data.length = dtoh32(bi->SSID_len);
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
-
if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
iwe.cmd = SIOCGIWMODE;
if (dtoh16(bi->capability) & DOT11_CAP_ESS)
@@ -3916,7 +3926,6 @@ wl_iw_iscan_get_scan(
event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
}
-
iwe.cmd = SIOCGIWFREQ;
channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
iwe.u.freq.m = wf_channel2mhz(channel,
@@ -3925,17 +3934,14 @@ wl_iw_iscan_get_scan(
iwe.u.freq.e = 6;
event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
-
iwe.cmd = IWEVQUAL;
iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
iwe.u.qual.noise = 0x100 + bi->phy_noise;
event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
-
wl_iw_handle_scanresults_ies(&event, end, info, bi);
-
iwe.cmd = SIOCGIWENCODE;
if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
@@ -3944,7 +3950,6 @@ wl_iw_iscan_get_scan(
iwe.u.data.length = 0;
event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
-
if (bi->rateset.count) {
if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end)
return -E2BIG;
@@ -5483,7 +5488,6 @@ wl_iw_combined_scan_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nss
p = (char*)iscan->iscan_ex_params_p->params.channel_list + nchan * sizeof(uint16);
}
-
iscan->iscan_ex_params_p->params.channel_num = \
htod32((nssid << WL_SCAN_PARAMS_NSSID_SHIFT) | \
(nchan & WL_SCAN_PARAMS_COUNT_MASK));
@@ -5492,7 +5496,6 @@ wl_iw_combined_scan_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nss
(uint)((iscan->iscan_ex_params_p->params.channel_num >> WL_SCAN_PARAMS_NSSID_SHIFT) & \
WL_SCAN_PARAMS_COUNT_MASK);
-
params_size = (int) (p - (char*)iscan->iscan_ex_params_p + nssid * sizeof(wlc_ssid_t));
iscan->iscan_ex_param_size = params_size;
@@ -7884,7 +7887,7 @@ wl_iw_bt_init(struct net_device *dev)
return 0;
}
-int wl_iw_attach(struct net_device *dev, void * dhdp)
+int wl_iw_attach(struct net_device *dev, void *dhdp)
{
int params_size;
wl_iw_t *iw;
@@ -7893,7 +7896,6 @@ int wl_iw_attach(struct net_device *dev, void * dhdp)
#endif
mutex_init(&wl_cache_lock);
- mutex_init(&wl_start_lock);
#if defined(WL_IW_USE_ISCAN)
if (!dev)
diff --git a/drivers/net/wireless/bcm4329/wl_iw.h b/drivers/net/wireless/bcm4329/wl_iw.h
index 5335b5661cdd..3b45792979af 100644
--- a/drivers/net/wireless/bcm4329/wl_iw.h
+++ b/drivers/net/wireless/bcm4329/wl_iw.h
@@ -196,7 +196,6 @@ extern int net_os_set_suspend_disable(struct net_device *dev, int val);
extern int net_os_set_suspend(struct net_device *dev, int val);
extern int net_os_set_dtim_skip(struct net_device *dev, int val);
extern int net_os_set_packet_filter(struct net_device *dev, int val);
-extern int net_os_send_hang_message(struct net_device *dev);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
@@ -231,6 +230,7 @@ extern int dhd_dev_get_pno_status(struct net_device *dev);
#define PNO_TLV_TYPE_SSID_IE 'S'
#define PNO_TLV_TYPE_TIME 'T'
#define PNO_EVENT_UP "PNO_EVENT"
+#define PNO_SCAN_MAX_FW 508
typedef struct cmd_tlv {
char prefix;
diff --git a/drivers/serial/tegra_hsuart.c b/drivers/serial/tegra_hsuart.c
index 785cfb229779..292863ccaaa0 100644
--- a/drivers/serial/tegra_hsuart.c
+++ b/drivers/serial/tegra_hsuart.c
@@ -416,13 +416,9 @@ static void tegra_tx_dma_complete_work(struct work_struct *work)
struct tegra_uart_port *t =
container_of(work, struct tegra_uart_port, tx_work);
struct tegra_dma_req *req = &t->tx_dma_req;
- struct circ_buf *xmit = &t->uport.state->xmit;
- int count = req->bytes_transferred;
unsigned long flags;
int timeout = 20;
- dev_vdbg(t->uport.dev, "%s: %d\n", __func__, count);
-
while ((uart_readb(t, UART_LSR) & TX_EMPTY_STATUS) != TX_EMPTY_STATUS) {
timeout--;
if (timeout == 0) {
@@ -434,11 +430,8 @@ static void tegra_tx_dma_complete_work(struct work_struct *work)
}
spin_lock_irqsave(&t->uport.lock, flags);
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
- t->tx_in_progress = 0;
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&t->uport);
+ t->tx_in_progress = 0;
if (req->status != -TEGRA_DMA_REQ_ERROR_ABORTED)
tegra_start_next_tx(t);
@@ -449,7 +442,21 @@ static void tegra_tx_dma_complete_work(struct work_struct *work)
static void tegra_tx_dma_complete_callback(struct tegra_dma_req *req)
{
struct tegra_uart_port *t = req->dev;
+ struct circ_buf *xmit = &t->uport.state->xmit;
+ int count = req->bytes_transferred;
+ unsigned long flags;
+
+ dev_vdbg(t->uport.dev, "%s: %d\n", __func__, count);
+
+ spin_lock_irqsave(&t->uport.lock, flags);
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&t->uport);
+
schedule_work(&t->tx_work);
+
+ spin_unlock_irqrestore(&t->uport.lock, flags);
}
static irqreturn_t tegra_uart_isr(int irq, void *data)
@@ -552,6 +559,9 @@ static void tegra_stop_rx(struct uart_port *u)
static void tegra_uart_hw_deinit(struct tegra_uart_port *t)
{
unsigned char fcr;
+ unsigned long flags;
+
+ flush_work(&t->tx_work);
/* Disable interrupts */
uart_writeb(t, 0, UART_IER);
@@ -559,6 +569,8 @@ static void tegra_uart_hw_deinit(struct tegra_uart_port *t)
while ((uart_readb(t, UART_LSR) & UART_LSR_TEMT) != UART_LSR_TEMT);
udelay(200);
+ spin_lock_irqsave(&t->uport.lock, flags);
+
/* Reset the Rx and Tx FIFOs */
fcr = t->fcr_shadow;
fcr |= UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR;
@@ -568,6 +580,8 @@ static void tegra_uart_hw_deinit(struct tegra_uart_port *t)
clk_disable(t->clk);
t->baud = 0;
+
+ spin_unlock_irqrestore(&t->uport.lock, flags);
}
static void tegra_uart_free_rx_dma(struct tegra_uart_port *t)
@@ -805,14 +819,11 @@ fail:
static void tegra_shutdown(struct uart_port *u)
{
struct tegra_uart_port *t;
- unsigned long flags;
- spin_lock_irqsave(&u->lock, flags);
t = container_of(u, struct tegra_uart_port, uport);
dev_vdbg(u->dev, "+tegra_shutdown\n");
tegra_uart_hw_deinit(t);
- spin_unlock_irqrestore(&u->lock, flags);
t->rx_in_progress = 0;
t->tx_in_progress = 0;
@@ -947,11 +958,9 @@ static void tegra_stop_tx(struct uart_port *u)
t = container_of(u, struct tegra_uart_port, uport);
- if (t->use_tx_dma) {
+ if (t->use_tx_dma)
tegra_dma_dequeue_req(t->tx_dma, &t->tx_dma_req);
- flush_work(&t->tx_work);
- }
- t->tx_in_progress = 0;
+
return;
}
@@ -1170,6 +1179,8 @@ static int tegra_uart_suspend(struct platform_device *pdev, pm_message_t state)
u = &t->uport;
uart_suspend_port(&tegra_uart_driver, u);
+
+ flush_work(&t->tx_work);
return 0;
}
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index a0b5a93b72d2..0841e6b27e5c 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -976,6 +976,7 @@ void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs)
void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
{
unsigned char *block;
+ unsigned char *dtd_block;
struct fb_videomode *mode, *m;
int num = 0, i, first = 1;
@@ -992,14 +993,42 @@ void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
if (mode == NULL)
return;
- block = edid + edid[0x2];
+ block = edid + 0x4;
+ dtd_block = edid + edid[0x2];
+
+ DPRINTK(" Short Video Modes\n");
+ while (block < dtd_block) {
+ unsigned tag = block[0] >> 5;
+ unsigned len = block[0] & 0x1f;
+
+ block++;
+ if (dtd_block - block < len)
+ break;
+
+ if (tag == 0x2) {
+ for (i = 0; i < len; i++) {
+ unsigned m = block[i];
+ if (m > 0 && m < CEA_MODEDB_SIZE) {
+ memcpy(&mode[num], &cea_modes[m],
+ sizeof(mode[num]));
+ DPRINTK(" %d: %dx%d @ %d\n", m,
+ cea_modes[m].xres, cea_modes[m].yres,
+ cea_modes[m].refresh);
+
+ num++;
+ }
+ }
+ }
+
+ block += len;
+ }
DPRINTK(" Extended Detailed Timings\n");
for (i = 0; i < (128 - edid[0x2]) / DETAILED_TIMING_DESCRIPTION_SIZE;
- i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) {
- if (!(block[0] == 0x00 && block[1] == 0x00)) {
- get_detailed_timing(block, &mode[num]);
+ i++, dtd_block += DETAILED_TIMING_DESCRIPTION_SIZE) {
+ if (!(dtd_block[0] == 0x00 && dtd_block[1] == 0x00)) {
+ get_detailed_timing(dtd_block, &mode[num]);
if (first) {
mode[num].flag |= FB_MODE_IS_FIRST;
first = 0;
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index cf68c0ba8d1b..209e6be1163e 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -402,6 +402,459 @@ const struct fb_videomode vesa_modes[] = {
FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
};
EXPORT_SYMBOL(vesa_modes);
+
+const struct fb_videomode cea_modes[CEA_MODEDB_SIZE] = {
+ {},
+ /* 1: 640x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 640, .yres = 480, .pixclock = 39721,
+ .left_margin = 48, .right_margin = 16,
+ .upper_margin = 33, .lower_margin = 1,
+ .hsync_len = 96, .vsync_len = 2,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 2: 720x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 720, .yres = 480, .pixclock = 37037,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 3: 720x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 720, .yres = 480, .pixclock = 37037,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 4: 1280x720p @ 59.94Hz/60Hz */
+ {.refresh = 60, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 110,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 5: 1920x1080i @ 59.94Hz/60Hz */
+ {.refresh = 60, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 6: 720(1440)x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 7: 720(1440)x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 8: 720(1440)x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 240, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 9: 720(1440)x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 240, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 10: 2880x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 11: 2880x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 12: 2880x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 240, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 13: 2880x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 240, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 14: 1440x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 120, .right_margin = 32,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 124, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 15: 1440x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 120, .right_margin = 32,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 124, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 16: 1920x1080p @ 59.94Hz/60Hz */
+ {.refresh = 60, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 17: 720x576p @ 50Hz */
+ {.refresh = 50, .xres = 720, .yres = 576, .pixclock = 37037,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 18: 720x576p @ 50Hz */
+ {.refresh = 50, .xres = 720, .yres = 576, .pixclock = 37037,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 19: 1280x720p @ 50Hz */
+ {.refresh = 50, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 440,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 20: 1920x1080i @ 50Hz */
+ {.refresh = 50, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 21: 720(1440)x576i @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 22: 720(1440)x576i @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 23: 720(1440)x288p @ 50Hz */
+ {.refresh = 49, .xres = 1440, .yres = 288, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 24: 720(1440)x288p @ 50Hz */
+ {.refresh = 49, .xres = 1440, .yres = 288, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 25: 2880x576i @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 26: 2880x576i @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 27: 2880x288p @ 50Hz */
+ {.refresh = 49, .xres = 2880, .yres = 288, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 28: 2880x288p @ 50Hz */
+ {.refresh = 49, .xres = 2880, .yres = 288, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 29: 1440x576p @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 136, .right_margin = 24,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 128, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 30: 1440x576p @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 136, .right_margin = 24,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 128, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 31: 1920x1080p @ 50Hz */
+ {.refresh = 50, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 32: 1920x1080p @ 23.97Hz/24Hz */
+ {.refresh = 24, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 638,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 33: 1920x1080p @ 25Hz */
+ {.refresh = 25, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 34: 1920x1080p @ 29.97Hz/30Hz */
+ {.refresh = 30, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 35: 2880x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 9259,
+ .left_margin = 240, .right_margin = 64,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 248, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 36: 2880x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 9259,
+ .left_margin = 240, .right_margin = 64,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 248, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 37: 2880x576p @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 9259,
+ .left_margin = 272, .right_margin = 48,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 256, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 38: 2880x576p @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 9259,
+ .left_margin = 272, .right_margin = 48,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 256, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 39: 1920x1080i @ 50Hz */
+ {.refresh = 50, .xres = 1920, .yres = 1080, .pixclock = 13888,
+ .left_margin = 184, .right_margin = 32,
+ .upper_margin = 57, .lower_margin = 2,
+ .hsync_len = 168, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 40: 1920x1080i @ 100Hz */
+ {.refresh = 100, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 41: 1280x720p @ 100Hz */
+ {.refresh = 100, .xres = 1280, .yres = 720, .pixclock = 6734,
+ .left_margin = 220, .right_margin = 440,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 42: 720x576p @ 100Hz */
+ {.refresh = 100, .xres = 720, .yres = 576, .pixclock = 18518,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 43: 720x576p @ 100Hz */
+ {.refresh = 100, .xres = 720, .yres = 576, .pixclock = 18518,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 44: 720(1440)x576i @ 100Hz */
+ {.refresh = 100, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 45: 720(1440)x576i @ 100Hz */
+ {.refresh = 100, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 46: 1920x1080i @ 119.88/120Hz */
+ {.refresh = 120, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 47: 1280x720p @ 119.88/120Hz */
+ {.refresh = 120, .xres = 1280, .yres = 720, .pixclock = 6734,
+ .left_margin = 220, .right_margin = 110,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 48: 720x480p @ 119.88/120Hz */
+ {.refresh = 119, .xres = 720, .yres = 480, .pixclock = 18518,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 49: 720x480p @ 119.88/120Hz */
+ {.refresh = 119, .xres = 720, .yres = 480, .pixclock = 18518,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 50: 720(1440)x480i @ 119.88/120Hz */
+ {.refresh = 119, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 51: 720(1440)x480i @ 119.88/120Hz */
+ {.refresh = 119, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 52: 720x576p @ 200Hz */
+ {.refresh = 200, .xres = 720, .yres = 576, .pixclock = 9259,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 53: 720x576p @ 200Hz */
+ {.refresh = 200, .xres = 720, .yres = 576, .pixclock = 9259,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 54: 720(1440)x576i @ 200Hz */
+ {.refresh = 200, .xres = 1440, .yres = 576, .pixclock = 9259,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 55: 720(1440)x576i @ 200Hz */
+ {.refresh = 200, .xres = 1440, .yres = 576, .pixclock = 9259,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 56: 720x480p @ 239.76/240Hz */
+ {.refresh = 239, .xres = 720, .yres = 480, .pixclock = 9259,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 57: 720x480p @ 239.76/240Hz */
+ {.refresh = 239, .xres = 720, .yres = 480, .pixclock = 9259,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 58: 720(1440)x480i @ 239.76/240Hz */
+ {.refresh = 239, .xres = 1440, .yres = 480, .pixclock = 9259,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 59: 720(1440)x480i @ 239.76/240Hz */
+ {.refresh = 239, .xres = 1440, .yres = 480, .pixclock = 9259,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 60: 1280x720p @ 23.97Hz/24Hz */
+ {.refresh = 24, .xres = 1280, .yres = 720, .pixclock = 16835,
+ .left_margin = 220, .right_margin = 1760,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 61: 1280x720p @ 25Hz */
+ {.refresh = 25, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 2420,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 62: 1280x720p @ 29.97Hz/30Hz */
+ {.refresh = 30, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 1760,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 63: 1920x1080p @ 119.88/120Hz */
+ {.refresh = 120, .xres = 1920, .yres = 1080, .pixclock = 3367,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 64: 1920x1080p @ 100Hz */
+ {.refresh = 100, .xres = 1920, .yres = 1080, .pixclock = 3367,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+};
+EXPORT_SYMBOL(cea_modes);
#endif /* CONFIG_FB_MODE_HELPERS */
/**
diff --git a/drivers/video/tegra/dc/dc.c b/drivers/video/tegra/dc/dc.c
index db90860b731d..d802535b913b 100644
--- a/drivers/video/tegra/dc/dc.c
+++ b/drivers/video/tegra/dc/dc.c
@@ -1102,8 +1102,10 @@ static int tegra_dc_suspend(struct nvhost_device *ndev, pm_message_t state)
dev_info(&ndev->dev, "suspend\n");
mutex_lock(&dc->lock);
- if (dc->enabled)
+ if (dc->enabled) {
+ tegra_fb_suspend(dc->fb);
_tegra_dc_disable(dc);
+ }
mutex_unlock(&dc->lock);
return 0;
diff --git a/drivers/video/tegra/fb.c b/drivers/video/tegra/fb.c
index 1d518a561242..2a42ae56d71b 100644
--- a/drivers/video/tegra/fb.c
+++ b/drivers/video/tegra/fb.c
@@ -216,6 +216,7 @@ static int tegra_fb_blank(int blank, struct fb_info *info)
case FB_BLANK_POWERDOWN:
dev_dbg(&tegra_fb->ndev->dev, "blank\n");
+ flush_workqueue(tegra_fb->flip_wq);
tegra_dc_disable(tegra_fb->win->dc);
return 0;
@@ -224,6 +225,12 @@ static int tegra_fb_blank(int blank, struct fb_info *info)
}
}
+void tegra_fb_suspend(struct tegra_fb_info *tegra_fb)
+{
+ flush_workqueue(tegra_fb->flip_wq);
+}
+
+
static int tegra_fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -604,15 +611,21 @@ void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info,
}
}
- /* in case the first mode was not matched */
- m = list_first_entry(&fb_info->info->modelist, struct fb_modelist, list);
- m->mode.flag |= FB_MODE_IS_FIRST;
-
- fb_info->info->mode = (struct fb_videomode *)
- fb_find_best_display(specs, &fb_info->info->modelist);
-
- fb_videomode_to_var(&fb_info->info->var, fb_info->info->mode);
- tegra_fb_set_par(fb_info->info);
+ if (list_empty(&fb_info->info->modelist)) {
+ struct tegra_dc_mode mode;
+ memset(&fb_info->info->var, 0x0, sizeof(fb_info->info->var));
+ memset(&mode, 0x0, sizeof(mode));
+ tegra_dc_set_mode(fb_info->win->dc, &mode);
+ } else {
+ /* in case the first mode was not matched */
+ m = list_first_entry(&fb_info->info->modelist, struct fb_modelist, list);
+ m->mode.flag |= FB_MODE_IS_FIRST;
+ fb_info->info->mode = (struct fb_videomode *)
+ fb_find_best_display(specs, &fb_info->info->modelist);
+
+ fb_videomode_to_var(&fb_info->info->var, fb_info->info->mode);
+ tegra_fb_set_par(fb_info->info);
+ }
event.info = fb_info->info;
fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
@@ -651,7 +664,7 @@ struct tegra_fb_info *tegra_fb_register(struct nvhost_device *ndev,
tegra_fb->fb_mem = fb_mem;
tegra_fb->xres = fb_data->xres;
tegra_fb->yres = fb_data->yres;
- tegra_fb->fb_nvmap = nvmap_create_client(nvmap_dev);
+ tegra_fb->fb_nvmap = nvmap_create_client(nvmap_dev, "tegra-fb");
if (!tegra_fb->fb_nvmap) {
dev_err(&ndev->dev, "couldn't create nvmap client\n");
ret = -ENOMEM;
diff --git a/drivers/video/tegra/host/dev.c b/drivers/video/tegra/host/dev.c
index 29c56cffe564..daed882be5a2 100644
--- a/drivers/video/tegra/host/dev.c
+++ b/drivers/video/tegra/host/dev.c
@@ -669,7 +669,7 @@ static int __devinit nvhost_probe(struct platform_device *pdev)
host->pdev = pdev;
- host->nvmap = nvmap_create_client(nvmap_dev);
+ host->nvmap = nvmap_create_client(nvmap_dev, "nvhost");
if (!host->nvmap) {
dev_err(&pdev->dev, "unable to create nvmap client\n");
err = -EIO;
diff --git a/drivers/video/tegra/nvmap/nvmap.h b/drivers/video/tegra/nvmap/nvmap.h
index 1e5b800baf7a..9bb7da77a501 100644
--- a/drivers/video/tegra/nvmap/nvmap.h
+++ b/drivers/video/tegra/nvmap/nvmap.h
@@ -100,6 +100,7 @@ struct nvmap_carveout_commit {
};
struct nvmap_client {
+ const char *name;
struct nvmap_device *dev;
struct nvmap_share *share;
struct rb_root handle_refs;
diff --git a/drivers/video/tegra/nvmap/nvmap_dev.c b/drivers/video/tegra/nvmap/nvmap_dev.c
index 295289c3ce9a..a899bb4ef1b9 100644
--- a/drivers/video/tegra/nvmap/nvmap_dev.c
+++ b/drivers/video/tegra/nvmap/nvmap_dev.c
@@ -22,10 +22,12 @@
#include <linux/backing-dev.h>
#include <linux/bitmap.h>
+#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
@@ -425,7 +427,8 @@ struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
return NULL;
}
-struct nvmap_client *nvmap_create_client(struct nvmap_device *dev)
+struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
+ const char *name)
{
struct nvmap_client *client;
int i;
@@ -438,6 +441,7 @@ struct nvmap_client *nvmap_create_client(struct nvmap_device *dev)
if (!client)
return NULL;
+ client->name = name;
client->super = true;
client->dev = dev;
/* TODO: allocate unique IOVMM client for each nvmap client */
@@ -552,7 +556,7 @@ static int nvmap_open(struct inode *inode, struct file *filp)
return ret;
BUG_ON(dev != nvmap_dev);
- priv = nvmap_create_client(dev);
+ priv = nvmap_create_client(dev, "user");
if (!priv)
return -ENOMEM;
@@ -737,46 +741,106 @@ static ssize_t attr_show_usage(struct device *dev,
return sprintf(buf, "%08x\n", node->heap_bit);
}
-static ssize_t attr_show_clients(struct device *dev,
- struct device_attribute *attr, char *buf)
+static struct device_attribute heap_attr_show_usage =
+ __ATTR(usage, S_IRUGO, attr_show_usage, NULL);
+
+static struct attribute *heap_extra_attrs[] = {
+ &heap_attr_show_usage.attr,
+ NULL,
+};
+
+static struct attribute_group heap_extra_attr_group = {
+ .attrs = heap_extra_attrs,
+};
+
+static void client_stringify(struct nvmap_client *client, struct seq_file *s)
{
- struct nvmap_carveout_node *node = nvmap_heap_device_to_arg(dev);
+ char task_comm[sizeof(client->task->comm)];
+ get_task_comm(task_comm, client->task);
+ seq_printf(s, "%8s %16s %8u", client->name, task_comm,
+ client->task->pid);
+}
+
+static void allocations_stringify(struct nvmap_client *client,
+ struct seq_file *s)
+{
+ struct rb_node *n = client->handle_refs.rb_node;
+
+ for (; n != NULL; n = rb_next(n)) {
+ struct nvmap_handle_ref *ref =
+ rb_entry(n, struct nvmap_handle_ref, node);
+ struct nvmap_handle *handle = ref->handle;
+ if (!handle->heap_pgalloc)
+ seq_printf(s, " %8u@%8lx ", handle->size,
+ handle->carveout->base);
+ }
+ seq_printf(s, "\n");
+}
+
+static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
+{
+ struct nvmap_carveout_node *node = s->private;
struct nvmap_carveout_commit *commit;
- char *orig_buf = buf;
mutex_lock(&node->clients_mutex);
list_for_each_entry(commit, &node->clients, list) {
struct nvmap_client *client =
get_client_from_carveout_commit(node, commit);
- char task_comm[sizeof(client->task->comm)];
- get_task_comm(task_comm, client->task);
- buf += sprintf(buf, "%16s %8u %8u\n", task_comm,
- client->task->pid, commit->commit);
+ client_stringify(client, s);
+ allocations_stringify(client, s);
}
mutex_unlock(&node->clients_mutex);
- return buf - orig_buf;
-}
-static struct device_attribute heap_attr_show_usage =
- __ATTR(usage, S_IRUGO, attr_show_usage, NULL);
+ return 0;
+}
-static struct device_attribute heap_attr_show_clients =
- __ATTR(clients, S_IRUGO, attr_show_clients, NULL);
+static int nvmap_debug_allocations_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nvmap_debug_allocations_show,
+ inode->i_private);
+}
-static struct attribute *heap_extra_attrs[] = {
- &heap_attr_show_usage.attr,
- &heap_attr_show_clients.attr,
- NULL,
+static struct file_operations debug_allocations_fops = {
+ .open = nvmap_debug_allocations_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
-static struct attribute_group heap_extra_attr_group = {
- .attrs = heap_extra_attrs,
+static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
+{
+ struct nvmap_carveout_node *node = s->private;
+ struct nvmap_carveout_commit *commit;
+
+ mutex_lock(&node->clients_mutex);
+ list_for_each_entry(commit, &node->clients, list) {
+ struct nvmap_client *client =
+ get_client_from_carveout_commit(node, commit);
+ client_stringify(client, s);
+ seq_printf(s, " %8u\n", commit->commit);
+ }
+ mutex_unlock(&node->clients_mutex);
+
+ return 0;
+}
+
+static int nvmap_debug_clients_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nvmap_debug_clients_show, inode->i_private);
+}
+
+static struct file_operations debug_clients_fops = {
+ .open = nvmap_debug_clients_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static int nvmap_probe(struct platform_device *pdev)
{
struct nvmap_platform_data *plat = pdev->dev.platform_data;
struct nvmap_device *dev;
+ struct dentry *nvmap_debug_root;
unsigned int i;
int e;
@@ -885,6 +949,10 @@ static int nvmap_probe(struct platform_device *pdev)
goto fail;
}
+ nvmap_debug_root = debugfs_create_dir("nvmap", NULL);
+ if (IS_ERR_OR_NULL(nvmap_debug_root))
+ dev_err(&pdev->dev, "couldn't create debug files\n");
+
for (i = 0; i < plat->nr_carveouts; i++) {
struct nvmap_carveout_node *node = &dev->heaps[i];
const struct nvmap_platform_carveout *co = &plat->carveouts[i];
@@ -907,7 +975,19 @@ static int nvmap_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "created carveout %s (%uKiB)\n",
co->name, co->size / 1024);
+
+ if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
+ struct dentry *heap_root =
+ debugfs_create_dir(co->name, nvmap_debug_root);
+ if (!IS_ERR_OR_NULL(heap_root)) {
+ debugfs_create_file("clients", 0664, heap_root,
+ node, &debug_clients_fops);
+ debugfs_create_file("allocations", 0664,
+ heap_root, node, &debug_allocations_fops);
+ }
+ }
}
+
platform_set_drvdata(pdev, dev);
nvmap_dev = dev;
return 0;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 26147746c272..84b0ef45e24e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3523,9 +3523,12 @@ static int ext4_commit_super(struct super_block *sb, int sync)
else
es->s_kbytes_written =
cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
- ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
+ if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeblocks_counter))
+ ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeblocks_counter));
- es->s_free_inodes_count = cpu_to_le32(percpu_counter_sum_positive(
+ if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
+ es->s_free_inodes_count =
+ cpu_to_le32(percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeinodes_counter));
sb->s_dirt = 0;
BUFFER_TRACE(sbh, "marking dirty");
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 3fc99cbac5f2..8b1f0a982bdb 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -1094,6 +1094,7 @@ extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter);
/* drivers/video/modedb.c */
#define VESA_MODEDB_SIZE 34
+#define CEA_MODEDB_SIZE 65
extern void fb_var_to_videomode(struct fb_videomode *mode,
const struct fb_var_screeninfo *var);
extern void fb_videomode_to_var(struct fb_var_screeninfo *var,
@@ -1145,6 +1146,7 @@ struct fb_videomode {
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
+extern const struct fb_videomode cea_modes[];
struct fb_modelist {
struct list_head list;
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 8a7d510ffa9c..46f6ba56fa91 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -78,6 +78,11 @@ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
return 1;
}
+static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+{
+ return (fbc->counters != NULL);
+}
+
#else
struct percpu_counter {
@@ -143,6 +148,11 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
return percpu_counter_read(fbc);
}
+static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+{
+ return 1;
+}
+
#endif /* CONFIG_SMP */
static inline void percpu_counter_inc(struct percpu_counter *fbc)
diff --git a/include/linux/tegra_avp.h b/include/linux/tegra_avp.h
new file mode 100644
index 000000000000..2650b553c615
--- /dev/null
+++ b/include/linux/tegra_avp.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_TEGRA_AVP_H
+#define __LINUX_TEGRA_AVP_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define TEGRA_AVP_LIB_MAX_NAME 32
+#define TEGRA_AVP_LIB_MAX_ARGS 220 /* DO NOT CHANGE THIS! */
+
+struct tegra_avp_lib {
+ char name[TEGRA_AVP_LIB_MAX_NAME];
+ void __user *args;
+ size_t args_len;
+ int greedy;
+ unsigned long handle;
+};
+
+#define TEGRA_AVP_IOCTL_MAGIC 'r'
+
+#define TEGRA_AVP_IOCTL_LOAD_LIB _IOWR(TEGRA_AVP_IOCTL_MAGIC, 0x40, struct tegra_avp_lib)
+#define TEGRA_AVP_IOCTL_UNLOAD_LIB _IOW(TEGRA_AVP_IOCTL_MAGIC, 0x41, unsigned long)
+
+#define TEGRA_AVP_IOCTL_MIN_NR _IOC_NR(TEGRA_AVP_IOCTL_LOAD_LIB)
+#define TEGRA_AVP_IOCTL_MAX_NR _IOC_NR(TEGRA_AVP_IOCTL_UNLOAD_LIB)
+
+#endif
diff --git a/include/linux/tegra_rpc.h b/include/linux/tegra_rpc.h
new file mode 100644
index 000000000000..16e6367cf569
--- /dev/null
+++ b/include/linux/tegra_rpc.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * Based on original code from NVIDIA, and a partial rewrite by:
+ * Gary King <gking@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_TEGRA_RPC_H
+#define __LINUX_TEGRA_RPC_H
+
+#define TEGRA_RPC_MAX_MSG_LEN 256
+
+/* Note: the actual size of the name in the protocol message is 16 bytes,
+ * but that is because the name there is not NUL terminated, only NUL
+ * padded. */
+#define TEGRA_RPC_MAX_NAME_LEN 17
+
+struct tegra_rpc_port_desc {
+ char name[TEGRA_RPC_MAX_NAME_LEN];
+ int notify_fd; /* fd representing a trpc_sema to signal when a
+ * message has been received */
+};
+
+#define TEGRA_RPC_IOCTL_MAGIC 'r'
+
+#define TEGRA_RPC_IOCTL_PORT_CREATE _IOW(TEGRA_RPC_IOCTL_MAGIC, 0x20, struct tegra_rpc_port_desc)
+#define TEGRA_RPC_IOCTL_PORT_GET_NAME _IOR(TEGRA_RPC_IOCTL_MAGIC, 0x21, char *)
+#define TEGRA_RPC_IOCTL_PORT_CONNECT _IOR(TEGRA_RPC_IOCTL_MAGIC, 0x22, long)
+#define TEGRA_RPC_IOCTL_PORT_LISTEN _IOR(TEGRA_RPC_IOCTL_MAGIC, 0x23, long)
+
+#define TEGRA_RPC_IOCTL_MIN_NR _IOC_NR(TEGRA_RPC_IOCTL_PORT_CREATE)
+#define TEGRA_RPC_IOCTL_MAX_NR _IOC_NR(TEGRA_RPC_IOCTL_PORT_LISTEN)
+
+#endif
diff --git a/include/linux/tegra_sema.h b/include/linux/tegra_sema.h
new file mode 100644
index 000000000000..7b423b6cb5c4
--- /dev/null
+++ b/include/linux/tegra_sema.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_TEGRA_SEMA_H
+#define __LINUX_TEGRA_SEMA_H
+
+/* this shares the magic with the tegra RPC and AVP drivers.
+ * See include/linux/tegra_avp.h and include/linux/tegra_rpc.h */
+#define TEGRA_SEMA_IOCTL_MAGIC 'r'
+
+/* If IOCTL_WAIT is interrupted by a signal and the timeout was not -1,
+ * then the value pointed to by the argument will be updated with the amount
+ * of time remaining for the wait. */
+#define TEGRA_SEMA_IOCTL_WAIT _IOW(TEGRA_SEMA_IOCTL_MAGIC, 0x30, long *)
+#define TEGRA_SEMA_IOCTL_SIGNAL _IO(TEGRA_SEMA_IOCTL_MAGIC, 0x31)
+
+#define TEGRA_SEMA_IOCTL_MIN_NR _IOC_NR(TEGRA_SEMA_IOCTL_WAIT)
+#define TEGRA_SEMA_IOCTL_MAX_NR _IOC_NR(TEGRA_SEMA_IOCTL_SIGNAL)
+
+#endif
diff --git a/include/linux/tegra_spdif.h b/include/linux/tegra_spdif.h
new file mode 100644
index 000000000000..8d7f6457a0d1
--- /dev/null
+++ b/include/linux/tegra_spdif.h
@@ -0,0 +1,56 @@
+/* include/linux/tegra_spdif.h
+ *
+ * SPDIF audio driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _TEGRA_SPDIF_H
+#define _TEGRA_SPDIF_H
+
+#include <linux/ioctl.h>
+
+#define TEGRA_SPDIF_MAGIC 's'
+
+
+
+struct tegra_audio_buf_config {
+ unsigned size; /* order */
+ unsigned threshold; /* order */
+ unsigned chunk; /* order */
+};
+
+
+
+#define TEGRA_AUDIO_OUT_SET_BUF_CONFIG _IOW(TEGRA_SPDIF_MAGIC, 0, \
+ const struct tegra_audio_buf_config *)
+#define TEGRA_AUDIO_OUT_GET_BUF_CONFIG _IOR(TEGRA_SPDIF_MAGIC, 1, \
+ struct tegra_audio_buf_config *)
+
+#define TEGRA_AUDIO_OUT_GET_ERROR_COUNT _IOR(TEGRA_SPDIF_MAGIC, 2, \
+ unsigned *)
+
+struct tegra_audio_out_preload {
+ void *data;
+ size_t len;
+ size_t len_written;
+};
+
+#define TEGRA_AUDIO_OUT_PRELOAD_FIFO _IOWR(TEGRA_SPDIF_MAGIC, 3, \
+ struct tegra_audio_out_preload *)
+
+#endif/*_TEGRA_SPDIF_H*/
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 645e541a45f6..0da2837416eb 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -120,10 +120,10 @@ static inline int pm_qos_get_value(struct pm_qos_object *o)
switch (o->type) {
case PM_QOS_MIN:
- return plist_last(&o->requests)->prio;
+ return plist_first(&o->requests)->prio;
case PM_QOS_MAX:
- return plist_first(&o->requests)->prio;
+ return plist_last(&o->requests)->prio;
default:
/* runtime check for not using enum */