summaryrefslogtreecommitdiff
path: root/drivers/video/tegra/host
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/video/tegra/host')
-rw-r--r--drivers/video/tegra/host/Makefile30
-rw-r--r--drivers/video/tegra/host/bus.c629
-rw-r--r--drivers/video/tegra/host/bus.h38
-rw-r--r--drivers/video/tegra/host/bus_client.c675
-rw-r--r--drivers/video/tegra/host/bus_client.h42
-rw-r--r--drivers/video/tegra/host/chip_support.c56
-rw-r--r--drivers/video/tegra/host/chip_support.h181
-rw-r--r--drivers/video/tegra/host/debug.c234
-rw-r--r--drivers/video/tegra/host/debug.h50
-rw-r--r--drivers/video/tegra/host/dev.c31
-rw-r--r--drivers/video/tegra/host/dev.h24
-rw-r--r--drivers/video/tegra/host/gr2d/Makefile7
-rw-r--r--drivers/video/tegra/host/gr2d/gr2d.c73
-rw-r--r--drivers/video/tegra/host/gr3d/Makefile10
-rw-r--r--drivers/video/tegra/host/gr3d/gr3d.c265
-rw-r--r--drivers/video/tegra/host/gr3d/gr3d.h57
-rw-r--r--drivers/video/tegra/host/gr3d/gr3d_t20.c399
-rw-r--r--drivers/video/tegra/host/gr3d/gr3d_t20.h33
-rw-r--r--drivers/video/tegra/host/gr3d/gr3d_t30.c439
-rw-r--r--drivers/video/tegra/host/gr3d/gr3d_t30.h33
-rw-r--r--drivers/video/tegra/host/gr3d/scale3d.c941
-rw-r--r--drivers/video/tegra/host/gr3d/scale3d.h47
-rw-r--r--drivers/video/tegra/host/host1x/Makefile8
-rw-r--r--drivers/video/tegra/host/host1x/host1x.c552
-rw-r--r--drivers/video/tegra/host/host1x/host1x.h78
-rw-r--r--drivers/video/tegra/host/host1x/host1x01_hardware.h170
-rw-r--r--drivers/video/tegra/host/host1x/host1x_cdma.c517
-rw-r--r--drivers/video/tegra/host/host1x/host1x_cdma.h39
-rw-r--r--drivers/video/tegra/host/host1x/host1x_channel.c681
-rw-r--r--drivers/video/tegra/host/host1x/host1x_debug.c405
-rw-r--r--drivers/video/tegra/host/host1x/host1x_hwctx.h66
-rw-r--r--drivers/video/tegra/host/host1x/host1x_intr.c294
-rw-r--r--drivers/video/tegra/host/host1x/host1x_syncpt.c180
-rw-r--r--drivers/video/tegra/host/host1x/host1x_syncpt.h62
-rw-r--r--drivers/video/tegra/host/host1x/hw_host1x01_channel.h182
-rw-r--r--drivers/video/tegra/host/host1x/hw_host1x01_sync.h398
-rw-r--r--drivers/video/tegra/host/host1x/hw_host1x01_uclass.h474
-rw-r--r--drivers/video/tegra/host/isp/Makefile7
-rw-r--r--drivers/video/tegra/host/isp/isp.c79
-rw-r--r--drivers/video/tegra/host/mpe/Makefile7
-rw-r--r--drivers/video/tegra/host/mpe/mpe.c680
-rw-r--r--drivers/video/tegra/host/mpe/mpe.h32
-rw-r--r--drivers/video/tegra/host/nvhost_acm.c649
-rw-r--r--drivers/video/tegra/host/nvhost_acm.h58
-rw-r--r--drivers/video/tegra/host/nvhost_cdma.c559
-rw-r--r--drivers/video/tegra/host/nvhost_cdma.h117
-rw-r--r--drivers/video/tegra/host/nvhost_channel.c188
-rw-r--r--drivers/video/tegra/host/nvhost_channel.h77
-rw-r--r--drivers/video/tegra/host/nvhost_hwctx.h66
-rw-r--r--drivers/video/tegra/host/nvhost_intr.c441
-rw-r--r--drivers/video/tegra/host/nvhost_intr.h115
-rw-r--r--drivers/video/tegra/host/nvhost_job.c361
-rw-r--r--drivers/video/tegra/host/nvhost_job.h148
-rw-r--r--drivers/video/tegra/host/nvhost_memmgr.c34
-rw-r--r--drivers/video/tegra/host/nvhost_memmgr.h38
-rw-r--r--drivers/video/tegra/host/nvhost_syncpt.c512
-rw-r--r--drivers/video/tegra/host/nvhost_syncpt.h151
-rw-r--r--drivers/video/tegra/host/nvmap.c109
-rw-r--r--drivers/video/tegra/host/nvmap.h28
-rw-r--r--drivers/video/tegra/host/t20/Makefile8
-rw-r--r--drivers/video/tegra/host/t20/t20.c294
-rw-r--r--drivers/video/tegra/host/t20/t20.h29
-rw-r--r--drivers/video/tegra/host/t30/Makefile8
-rw-r--r--drivers/video/tegra/host/t30/t30.c309
-rw-r--r--drivers/video/tegra/host/t30/t30.h29
-rw-r--r--drivers/video/tegra/host/vi/Makefile7
-rw-r--r--drivers/video/tegra/host/vi/vi.c79
67 files changed, 13619 insertions, 0 deletions
diff --git a/drivers/video/tegra/host/Makefile b/drivers/video/tegra/host/Makefile
new file mode 100644
index 000000000000..c2608218c811
--- /dev/null
+++ b/drivers/video/tegra/host/Makefile
@@ -0,0 +1,30 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/video/tegra/host
+
+nvhost-objs = \
+ nvhost_acm.o \
+ nvhost_syncpt.o \
+ nvhost_cdma.o \
+ nvhost_intr.o \
+ nvhost_channel.o \
+ nvhost_job.o \
+ bus.o \
+ dev.o \
+ debug.o \
+ bus_client.o \
+ chip_support.o \
+ nvhost_memmgr.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += mpe/
+obj-$(CONFIG_TEGRA_GRHOST) += gr3d/
+obj-$(CONFIG_TEGRA_GRHOST) += host1x/
+obj-$(CONFIG_TEGRA_GRHOST) += t20/
+obj-$(CONFIG_TEGRA_GRHOST) += t30/
+obj-$(CONFIG_TEGRA_GRHOST) += gr2d/
+obj-$(CONFIG_TEGRA_GRHOST) += isp/
+ifeq ($(CONFIG_TEGRA_CAMERA),y)
+obj-$(CONFIG_TEGRA_GRHOST) += vi/
+endif
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost.o
+
+obj-$(CONFIG_TEGRA_GRHOST_USE_NVMAP) += nvmap.o
diff --git a/drivers/video/tegra/host/bus.c b/drivers/video/tegra/host/bus.c
new file mode 100644
index 000000000000..f22dac288051
--- /dev/null
+++ b/drivers/video/tegra/host/bus.c
@@ -0,0 +1,629 @@
+/*
+ * drivers/video/tegra/host/bus.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@google.com>
+ *
+ * Copyright (C) 2010-2012 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/nvhost.h>
+
+#include "bus.h"
+#include "dev.h"
+
+struct nvhost_bus *nvhost_bus_inst;
+struct nvhost_master *nvhost;
+
+struct resource *nvhost_get_resource(struct nvhost_device *dev,
+ unsigned int type, unsigned int num)
+{
+ int i;
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (type == resource_type(r) && num-- == 0)
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_resource);
+
+int nvhost_get_irq(struct nvhost_device *dev, unsigned int num)
+{
+ struct resource *r = nvhost_get_resource(dev, IORESOURCE_IRQ, num);
+
+ return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_irq);
+
+struct resource *nvhost_get_resource_byname(struct nvhost_device *dev,
+ unsigned int type,
+ const char *name)
+{
+ int i;
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (type == resource_type(r) && !strcmp(r->name, name))
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_resource_byname);
+
+int nvhost_get_irq_byname(struct nvhost_device *dev, const char *name)
+{
+ struct resource *r = nvhost_get_resource_byname(dev, IORESOURCE_IRQ,
+ name);
+
+ return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_irq_byname);
+
+static struct nvhost_device_id *nvhost_bus_match_id(struct nvhost_device *dev,
+ struct nvhost_device_id *id_table)
+{
+ while (id_table->name[0]) {
+ if (strcmp(dev->name, id_table->name) == 0
+ && dev->version == id_table->version)
+ return id_table;
+ id_table++;
+ }
+ return NULL;
+}
+
+static int nvhost_bus_match(struct device *_dev, struct device_driver *drv)
+{
+ struct nvhost_device *dev = to_nvhost_device(_dev);
+ struct nvhost_driver *ndrv = to_nvhost_driver(drv);
+
+ /* check if driver support multiple devices through id_table */
+ if (ndrv->id_table)
+ return nvhost_bus_match_id(dev, ndrv->id_table) != NULL;
+ else /* driver does not support id_table */
+ return !strcmp(dev->name, drv->name);
+}
+
+static int nvhost_drv_probe(struct device *_dev)
+{
+ struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+ struct nvhost_device *dev = to_nvhost_device(_dev);
+
+ if (drv && drv->probe) {
+ if (drv->id_table)
+ return drv->probe(dev, nvhost_bus_match_id(dev, drv->id_table));
+ else
+ return drv->probe(dev, NULL);
+ }
+ else
+ return -ENODEV;
+}
+
+static int nvhost_drv_remove(struct device *_dev)
+{
+ struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+ struct nvhost_device *dev = to_nvhost_device(_dev);
+
+ return drv->remove(dev);
+}
+
+static void nvhost_drv_shutdown(struct device *_dev)
+{
+ struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+ struct nvhost_device *dev = to_nvhost_device(_dev);
+
+ drv->shutdown(dev);
+}
+
+int nvhost_driver_register(struct nvhost_driver *drv)
+{
+ drv->driver.bus = &nvhost_bus_inst->nvhost_bus_type;
+ if (drv->probe)
+ drv->driver.probe = nvhost_drv_probe;
+ if (drv->remove)
+ drv->driver.remove = nvhost_drv_remove;
+ if (drv->shutdown)
+ drv->driver.shutdown = nvhost_drv_shutdown;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(nvhost_driver_register);
+
+void nvhost_driver_unregister(struct nvhost_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(nvhost_driver_unregister);
+
+int nvhost_add_devices(struct nvhost_device **devs, int num)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < num; i++) {
+ ret = nvhost_device_register(devs[i]);
+ if (ret) {
+ while (--i >= 0)
+ nvhost_device_unregister(devs[i]);
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvhost_add_devices);
+
+int nvhost_device_register(struct nvhost_device *dev)
+{
+ int i, ret = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ device_initialize(&dev->dev);
+
+ /* If the dev does not have a parent, assign host1x as parent */
+ if (!dev->dev.parent && nvhost && nvhost->dev != dev)
+ dev->dev.parent = &nvhost->dev->dev;
+
+ dev->dev.bus = &nvhost_bus_inst->nvhost_bus_type;
+
+ if (dev->id != -1)
+ dev_set_name(&dev->dev, "%s.%d", dev->name, dev->id);
+ else
+ dev_set_name(&dev->dev, "%s", dev->name);
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *p, *r = &dev->resource[i];
+
+ if (r->name == NULL)
+ r->name = dev_name(&dev->dev);
+
+ p = r->parent;
+ if (!p) {
+ if (resource_type(r) == IORESOURCE_MEM)
+ p = &iomem_resource;
+ else if (resource_type(r) == IORESOURCE_IO)
+ p = &ioport_resource;
+ }
+
+ if (p && insert_resource(p, r)) {
+ pr_err("%s: failed to claim resource %d\n",
+ dev_name(&dev->dev), i);
+ ret = -EBUSY;
+ goto failed;
+ }
+ }
+
+ ret = device_add(&dev->dev);
+ if (ret == 0)
+ return ret;
+
+failed:
+ while (--i >= 0) {
+ struct resource *r = &dev->resource[i];
+ unsigned long type = resource_type(r);
+
+ if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+ release_resource(r);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvhost_device_register);
+
+void nvhost_device_unregister(struct nvhost_device *dev)
+{
+ int i;
+ if (dev) {
+ device_del(&dev->dev);
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+ unsigned long type = resource_type(r);
+
+ if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+ release_resource(r);
+ }
+
+ put_device(&dev->dev);
+ }
+}
+EXPORT_SYMBOL_GPL(nvhost_device_unregister);
+
+#ifdef CONFIG_PM_SLEEP
+
+static int nvhost_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+ struct nvhost_driver *pdrv = to_nvhost_driver(dev->driver);
+ struct nvhost_device *pdev = to_nvhost_device(dev);
+ int ret = 0;
+
+ if (dev->driver && pdrv->suspend)
+ ret = pdrv->suspend(pdev, mesg);
+
+ return ret;
+}
+
+static int nvhost_legacy_resume(struct device *dev)
+{
+ struct nvhost_driver *pdrv = to_nvhost_driver(dev->driver);
+ struct nvhost_device *pdev = to_nvhost_device(dev);
+ int ret = 0;
+
+ if (dev->driver && pdrv->resume)
+ ret = pdrv->resume(pdev);
+
+ return ret;
+}
+
+static int nvhost_pm_prepare(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm && drv->pm->prepare)
+ ret = drv->pm->prepare(dev);
+
+ return ret;
+}
+
+static void nvhost_pm_complete(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->complete)
+ drv->pm->complete(dev);
+}
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define nvhost_pm_prepare NULL
+#define nvhost_pm_complete NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_SUSPEND
+
+int __weak nvhost_pm_suspend(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend)
+ ret = drv->pm->suspend(dev);
+ } else {
+ ret = nvhost_legacy_suspend(dev, PMSG_SUSPEND);
+ }
+
+ return ret;
+}
+
+int __weak nvhost_pm_suspend_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend_noirq)
+ ret = drv->pm->suspend_noirq(dev);
+ }
+
+ return ret;
+}
+
+int __weak nvhost_pm_resume(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume)
+ ret = drv->pm->resume(dev);
+ } else {
+ ret = nvhost_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+int __weak nvhost_pm_resume_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume_noirq)
+ ret = drv->pm->resume_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_SUSPEND */
+
+#define nvhost_pm_suspend NULL
+#define nvhost_pm_resume NULL
+#define nvhost_pm_suspend_noirq NULL
+#define nvhost_pm_resume_noirq NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATION
+
+static int nvhost_pm_freeze(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze)
+ ret = drv->pm->freeze(dev);
+ } else {
+ ret = nvhost_legacy_suspend(dev, PMSG_FREEZE);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_freeze_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze_noirq)
+ ret = drv->pm->freeze_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_thaw(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw)
+ ret = drv->pm->thaw(dev);
+ } else {
+ ret = nvhost_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_thaw_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw_noirq)
+ ret = drv->pm->thaw_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_poweroff(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff)
+ ret = drv->pm->poweroff(dev);
+ } else {
+ ret = nvhost_legacy_suspend(dev, PMSG_HIBERNATE);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_poweroff_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff_noirq)
+ ret = drv->pm->poweroff_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_restore(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore)
+ ret = drv->pm->restore(dev);
+ } else {
+ ret = nvhost_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_restore_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore_noirq)
+ ret = drv->pm->restore_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_HIBERNATION */
+
+#define nvhost_pm_freeze NULL
+#define nvhost_pm_thaw NULL
+#define nvhost_pm_poweroff NULL
+#define nvhost_pm_restore NULL
+#define nvhost_pm_freeze_noirq NULL
+#define nvhost_pm_thaw_noirq NULL
+#define nvhost_pm_poweroff_noirq NULL
+#define nvhost_pm_restore_noirq NULL
+
+#endif /* !CONFIG_HIBERNATION */
+
+#ifdef CONFIG_PM_RUNTIME
+
+int __weak nvhost_pm_runtime_suspend(struct device *dev)
+{
+ return pm_generic_runtime_suspend(dev);
+};
+
+int __weak nvhost_pm_runtime_resume(struct device *dev)
+{
+ return pm_generic_runtime_resume(dev);
+};
+
+int __weak nvhost_pm_runtime_idle(struct device *dev)
+{
+ return pm_generic_runtime_idle(dev);
+};
+
+#else /* !CONFIG_PM_RUNTIME */
+
+#define nvhost_pm_runtime_suspend NULL
+#define nvhost_pm_runtime_resume NULL
+#define nvhost_pm_runtime_idle NULL
+
+#endif /* !CONFIG_PM_RUNTIME */
+
+static const struct dev_pm_ops nvhost_dev_pm_ops = {
+ .prepare = nvhost_pm_prepare,
+ .complete = nvhost_pm_complete,
+ .suspend = nvhost_pm_suspend,
+ .resume = nvhost_pm_resume,
+ .freeze = nvhost_pm_freeze,
+ .thaw = nvhost_pm_thaw,
+ .poweroff = nvhost_pm_poweroff,
+ .restore = nvhost_pm_restore,
+ .suspend_noirq = nvhost_pm_suspend_noirq,
+ .resume_noirq = nvhost_pm_resume_noirq,
+ .freeze_noirq = nvhost_pm_freeze_noirq,
+ .thaw_noirq = nvhost_pm_thaw_noirq,
+ .poweroff_noirq = nvhost_pm_poweroff_noirq,
+ .restore_noirq = nvhost_pm_restore_noirq,
+ .runtime_suspend = nvhost_pm_runtime_suspend,
+ .runtime_resume = nvhost_pm_runtime_resume,
+ .runtime_idle = nvhost_pm_runtime_idle,
+};
+
+static int set_parent(struct device *dev, void *data)
+{
+ struct nvhost_device *ndev = to_nvhost_device(dev);
+ struct nvhost_master *host = data;
+ if (!dev->parent && ndev != host->dev)
+ dev->parent = &host->dev->dev;
+ return 0;
+}
+
+int nvhost_bus_add_host(struct nvhost_master *host)
+{
+ nvhost = host;
+
+ /* Assign host1x as parent to all devices in nvhost bus */
+ bus_for_each_dev(&nvhost_bus_inst->nvhost_bus_type, NULL, host, set_parent);
+
+ return 0;
+}
+
+struct nvhost_bus *nvhost_bus_get(void)
+{
+ return nvhost_bus_inst;
+}
+
+int nvhost_bus_init(void)
+{
+ int err;
+ struct nvhost_chip_support *chip_ops;
+
+ pr_info("host1x bus init\n");
+
+ nvhost_bus_inst = kzalloc(sizeof(*nvhost_bus_inst), GFP_KERNEL);
+ if (nvhost_bus_inst == NULL) {
+ pr_err("%s: Cannot allocate nvhost_bus\n", __func__);
+ return -ENOMEM;
+ }
+
+ chip_ops = kzalloc(sizeof(*chip_ops), GFP_KERNEL);
+ if (chip_ops == NULL) {
+ pr_err("%s: Cannot allocate nvhost_chip_support\n", __func__);
+ kfree(nvhost_bus_inst);
+ nvhost_bus_inst = NULL;
+ return -ENOMEM;
+ }
+
+ nvhost_bus_inst->nvhost_bus_type.name = "nvhost";
+ nvhost_bus_inst->nvhost_bus_type.match = nvhost_bus_match;
+ nvhost_bus_inst->nvhost_bus_type.pm = &nvhost_dev_pm_ops;
+ nvhost_bus_inst->nvhost_chip_ops = chip_ops;
+
+ err = bus_register(&nvhost_bus_inst->nvhost_bus_type);
+
+ return err;
+}
+postcore_initcall(nvhost_bus_init);
diff --git a/drivers/video/tegra/host/bus.h b/drivers/video/tegra/host/bus.h
new file mode 100644
index 000000000000..99f820335d60
--- /dev/null
+++ b/drivers/video/tegra/host/bus.h
@@ -0,0 +1,38 @@
+/*
+ * drivers/video/tegra/host/bus.h
+ *
+ * Tegra Graphics Host bus API header
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_BUS_H
+#define __NVHOST_BUS_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+#include "chip_support.h"
+
+struct nvhost_bus {
+ struct nvhost_chip_support *nvhost_chip_ops;
+ struct bus_type nvhost_bus_type;
+};
+
+struct nvhost_bus *nvhost_bus_get(void);
+
+extern struct nvhost_bus *nvhost_bus_inst;
+
+#endif
diff --git a/drivers/video/tegra/host/bus_client.c b/drivers/video/tegra/host/bus_client.c
new file mode 100644
index 000000000000..2b92a62cc0bc
--- /dev/null
+++ b/drivers/video/tegra/host/bus_client.c
@@ -0,0 +1,675 @@
+/*
+ * drivers/video/tegra/host/bus_client.c
+ *
+ * Tegra Graphics Host Client Module
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+#include <linux/clk.h>
+#include <linux/hrtimer.h>
+
+#include <trace/events/nvhost.h>
+
+#include <linux/io.h>
+#include <linux/string.h>
+
+#include <linux/nvhost.h>
+#include <linux/nvhost_ioctl.h>
+
+#include <mach/gpufuse.h>
+#include <mach/hardware.h>
+#include <mach/iomap.h>
+
+#include "debug.h"
+#include "bus_client.h"
+#include "dev.h"
+#include "nvhost_memmgr.h"
+#include "chip_support.h"
+#include "nvhost_acm.h"
+
+#include "nvhost_channel.h"
+#include "nvhost_job.h"
+#include "nvhost_hwctx.h"
+
+static int validate_reg(struct nvhost_device *ndev, u32 offset, int count)
+{
+ struct resource *r = nvhost_get_resource(ndev, IORESOURCE_MEM, 0);
+ int err = 0;
+
+ if (offset + 4 * count > resource_size(r)
+ || (offset + 4 * count < offset))
+ err = -EPERM;
+
+ return err;
+}
+
+int nvhost_read_module_regs(struct nvhost_device *ndev,
+ u32 offset, int count, u32 *values)
+{
+ void __iomem *p = ndev->aperture + offset;
+ int err;
+
+ /* verify offset */
+ err = validate_reg(ndev, offset, count);
+ if (err)
+ return err;
+
+ nvhost_module_busy(ndev);
+ while (count--) {
+ *(values++) = readl(p);
+ p += 4;
+ }
+ rmb();
+ nvhost_module_idle(ndev);
+
+ return 0;
+}
+
+int nvhost_write_module_regs(struct nvhost_device *ndev,
+ u32 offset, int count, const u32 *values)
+{
+ void __iomem *p = ndev->aperture + offset;
+ int err;
+
+ /* verify offset */
+ err = validate_reg(ndev, offset, count);
+ if (err)
+ return err;
+
+ nvhost_module_busy(ndev);
+ while (count--) {
+ writel(*(values++), p);
+ p += 4;
+ }
+ wmb();
+ nvhost_module_idle(ndev);
+
+ return 0;
+}
+
+struct nvhost_channel_userctx {
+ struct nvhost_channel *ch;
+ struct nvhost_hwctx *hwctx;
+ struct nvhost_submit_hdr_ext hdr;
+ int num_relocshifts;
+ struct nvhost_job *job;
+ struct mem_mgr *memmgr;
+ u32 timeout;
+ u32 priority;
+ int clientid;
+};
+
+static int nvhost_channelrelease(struct inode *inode, struct file *filp)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+
+ trace_nvhost_channel_release(priv->ch->dev->name);
+
+ filp->private_data = NULL;
+
+ nvhost_module_remove_client(priv->ch->dev, priv);
+ nvhost_putchannel(priv->ch, priv->hwctx);
+
+ if (priv->hwctx)
+ priv->ch->ctxhandler->put(priv->hwctx);
+
+ if (priv->job)
+ nvhost_job_put(priv->job);
+
+ mem_op().put_mgr(priv->memmgr);
+ kfree(priv);
+ return 0;
+}
+
+static int nvhost_channelopen(struct inode *inode, struct file *filp)
+{
+ struct nvhost_channel_userctx *priv;
+ struct nvhost_channel *ch;
+
+ ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
+ ch = nvhost_getchannel(ch);
+ if (!ch)
+ return -ENOMEM;
+ trace_nvhost_channel_open(ch->dev->name);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ nvhost_putchannel(ch, NULL);
+ return -ENOMEM;
+ }
+ filp->private_data = priv;
+ priv->ch = ch;
+ if(nvhost_module_add_client(ch->dev, priv))
+ goto fail;
+
+ if (ch->ctxhandler && ch->ctxhandler->alloc) {
+ priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
+ if (!priv->hwctx)
+ goto fail;
+ }
+ priv->priority = NVHOST_PRIORITY_MEDIUM;
+ priv->clientid = atomic_add_return(1,
+ &nvhost_get_host(ch->dev)->clientid);
+ priv->timeout = CONFIG_TEGRA_GRHOST_DEFAULT_TIMEOUT;
+
+ return 0;
+fail:
+ nvhost_channelrelease(inode, filp);
+ return -ENOMEM;
+}
+
+static int set_submit(struct nvhost_channel_userctx *ctx)
+{
+ struct nvhost_device *ndev = ctx->ch->dev;
+ struct nvhost_master *host = nvhost_get_host(ndev);
+
+ /* submit should have at least 1 cmdbuf */
+ if (!ctx->hdr.num_cmdbufs ||
+ !nvhost_syncpt_is_valid(&host->syncpt,
+ ctx->hdr.syncpt_id))
+ return -EIO;
+
+ if (!ctx->memmgr) {
+ dev_err(&ndev->dev, "no nvmap context set\n");
+ return -EFAULT;
+ }
+
+ ctx->job = nvhost_job_alloc(ctx->ch,
+ ctx->hwctx,
+ &ctx->hdr,
+ ctx->memmgr,
+ ctx->priority,
+ ctx->clientid);
+ if (!ctx->job)
+ return -ENOMEM;
+ ctx->job->timeout = ctx->timeout;
+
+ if (ctx->hdr.submit_version >= NVHOST_SUBMIT_VERSION_V2)
+ ctx->num_relocshifts = ctx->hdr.num_relocs;
+
+ return 0;
+}
+
+static void reset_submit(struct nvhost_channel_userctx *ctx)
+{
+ ctx->hdr.num_cmdbufs = 0;
+ ctx->hdr.num_relocs = 0;
+ ctx->num_relocshifts = 0;
+ ctx->hdr.num_waitchks = 0;
+
+ if (ctx->job) {
+ nvhost_job_put(ctx->job);
+ ctx->job = NULL;
+ }
+}
+
+static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+ size_t remaining = count;
+ int err = 0;
+ struct nvhost_job *job = priv->job;
+ struct nvhost_submit_hdr_ext *hdr = &priv->hdr;
+ const char *chname = priv->ch->dev->name;
+
+ if (!job)
+ return -EIO;
+
+ while (remaining) {
+ size_t consumed;
+ if (!hdr->num_relocs &&
+ !priv->num_relocshifts &&
+ !hdr->num_cmdbufs &&
+ !hdr->num_waitchks) {
+ consumed = sizeof(struct nvhost_submit_hdr);
+ if (remaining < consumed)
+ break;
+ if (copy_from_user(hdr, buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ hdr->submit_version = NVHOST_SUBMIT_VERSION_V0;
+ err = set_submit(priv);
+ if (err)
+ break;
+ trace_nvhost_channel_write_submit(chname,
+ count, hdr->num_cmdbufs, hdr->num_relocs,
+ hdr->syncpt_id, hdr->syncpt_incrs);
+ } else if (hdr->num_cmdbufs) {
+ struct nvhost_cmdbuf cmdbuf;
+ consumed = sizeof(cmdbuf);
+ if (remaining < consumed)
+ break;
+ if (copy_from_user(&cmdbuf, buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ trace_nvhost_channel_write_cmdbuf(chname,
+ cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
+ nvhost_job_add_gather(job,
+ cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
+ hdr->num_cmdbufs--;
+ } else if (hdr->num_relocs) {
+ int numrelocs = remaining / sizeof(struct nvhost_reloc);
+ if (!numrelocs)
+ break;
+ numrelocs = min_t(int, numrelocs, priv->hdr.num_relocs);
+ consumed = numrelocs * sizeof(struct nvhost_reloc);
+ if (copy_from_user(&job->relocarray[job->num_relocs],
+ buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ while (numrelocs) {
+ struct nvhost_reloc *reloc =
+ &job->relocarray[job->num_relocs];
+ trace_nvhost_channel_write_reloc(chname,
+ reloc->cmdbuf_mem,
+ reloc->cmdbuf_offset,
+ reloc->target,
+ reloc->target_offset);
+ job->num_relocs++;
+ hdr->num_relocs--;
+ numrelocs--;
+ }
+ } else if (hdr->num_waitchks) {
+ int numwaitchks =
+ (remaining / sizeof(struct nvhost_waitchk));
+ if (!numwaitchks)
+ break;
+ numwaitchks = min_t(int,
+ numwaitchks, hdr->num_waitchks);
+ consumed = numwaitchks * sizeof(struct nvhost_waitchk);
+ if (copy_from_user(&job->waitchk[job->num_waitchk],
+ buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ trace_nvhost_channel_write_waitchks(
+ chname, numwaitchks,
+ hdr->waitchk_mask);
+ job->num_waitchk += numwaitchks;
+ hdr->num_waitchks -= numwaitchks;
+ } else if (priv->num_relocshifts) {
+ int next_shift =
+ job->num_relocs - priv->num_relocshifts;
+ int num =
+ (remaining / sizeof(struct nvhost_reloc_shift));
+ if (!num)
+ break;
+ num = min_t(int, num, priv->num_relocshifts);
+ consumed = num * sizeof(struct nvhost_reloc_shift);
+ if (copy_from_user(&job->relocshiftarray[next_shift],
+ buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ priv->num_relocshifts -= num;
+ } else {
+ err = -EFAULT;
+ break;
+ }
+ remaining -= consumed;
+ buf += consumed;
+ }
+
+ if (err < 0) {
+ dev_err(&priv->ch->dev->dev, "channel write error\n");
+ reset_submit(priv);
+ return err;
+ }
+
+ return count - remaining;
+}
+
+static int nvhost_ioctl_channel_flush(
+ struct nvhost_channel_userctx *ctx,
+ struct nvhost_get_param_args *args,
+ int null_kickoff)
+{
+ struct nvhost_device *ndev = to_nvhost_device(&ctx->ch->dev->dev);
+ int err;
+
+ trace_nvhost_ioctl_channel_flush(ctx->ch->dev->name);
+
+ if (!ctx->job ||
+ ctx->hdr.num_relocs ||
+ ctx->hdr.num_cmdbufs ||
+ ctx->hdr.num_waitchks) {
+ reset_submit(ctx);
+ dev_err(&ndev->dev, "channel submit out of sync\n");
+ return -EFAULT;
+ }
+
+ err = nvhost_job_pin(ctx->job, &nvhost_get_host(ndev)->syncpt);
+ if (err) {
+ dev_warn(&ndev->dev, "nvhost_job_pin failed: %d\n", err);
+ return err;
+ }
+
+ if (nvhost_debug_null_kickoff_pid == current->tgid)
+ null_kickoff = 1;
+ ctx->job->null_kickoff = null_kickoff;
+
+ if ((nvhost_debug_force_timeout_pid == current->tgid) &&
+ (nvhost_debug_force_timeout_channel == ctx->ch->chid)) {
+ ctx->timeout = nvhost_debug_force_timeout_val;
+ }
+
+ /* context switch if needed, and submit user's gathers to the channel */
+ err = nvhost_channel_submit(ctx->job);
+ args->value = ctx->job->syncpt_end;
+ if (err)
+ nvhost_job_unpin(ctx->job);
+
+ nvhost_job_put(ctx->job);
+ ctx->job = NULL;
+
+ return err;
+}
+
+static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
+ struct nvhost_read_3d_reg_args *args)
+{
+ BUG_ON(!channel_op().read3dreg);
+ return channel_op().read3dreg(ctx->ch, ctx->hwctx,
+ args->offset, &args->value);
+}
+
+static long nvhost_channelctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+ u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
+ int err = 0;
+
+ if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
+ (_IOC_NR(cmd) == 0) ||
+ (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
+ (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
+ return -EFAULT;
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case NVHOST_IOCTL_CHANNEL_FLUSH:
+ err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0);
+ break;
+ case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF:
+ err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1);
+ break;
+ case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT:
+ {
+ struct nvhost_submit_hdr_ext *hdr;
+
+ if (priv->hdr.num_relocs ||
+ priv->num_relocshifts ||
+ priv->hdr.num_cmdbufs ||
+ priv->hdr.num_waitchks) {
+ reset_submit(priv);
+ dev_err(&priv->ch->dev->dev,
+ "channel submit out of sync\n");
+ err = -EIO;
+ break;
+ }
+
+ hdr = (struct nvhost_submit_hdr_ext *)buf;
+ if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) {
+ dev_err(&priv->ch->dev->dev,
+ "submit version %d > max supported %d\n",
+ hdr->submit_version,
+ NVHOST_SUBMIT_VERSION_MAX_SUPPORTED);
+ err = -EINVAL;
+ break;
+ }
+ memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext));
+ err = set_submit(priv);
+ trace_nvhost_ioctl_channel_submit(priv->ch->dev->name,
+ priv->hdr.submit_version,
+ priv->hdr.num_cmdbufs, priv->hdr.num_relocs,
+ priv->hdr.num_waitchks,
+ priv->hdr.syncpt_id, priv->hdr.syncpt_incrs);
+ break;
+ }
+ case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
+ /* host syncpt ID is used by the RM (and never be given out) */
+ BUG_ON(priv->ch->dev->syncpts & (1 << NVSYNCPT_GRAPHICS_HOST));
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->dev->syncpts;
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->dev->waitbases;
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->dev->modulemutexes;
+ break;
+ case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
+ {
+ int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
+ struct mem_mgr *new_client = mem_op().get_mgr_file(fd);
+
+ if (IS_ERR(new_client)) {
+ err = PTR_ERR(new_client);
+ break;
+ }
+
+ if (priv->memmgr)
+ mem_op().put_mgr(priv->memmgr);
+
+ priv->memmgr = new_client;
+ break;
+ }
+ case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
+ err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
+ {
+ unsigned long rate;
+ struct nvhost_clk_rate_args *arg =
+ (struct nvhost_clk_rate_args *)buf;
+
+ err = nvhost_module_get_rate(priv->ch->dev, &rate, 0);
+ if (err == 0)
+ arg->rate = rate;
+ break;
+ }
+ case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
+ {
+ struct nvhost_clk_rate_args *arg =
+ (struct nvhost_clk_rate_args *)buf;
+ unsigned long rate = (unsigned long)arg->rate;
+
+ err = nvhost_module_set_rate(priv->ch->dev, priv, rate, 0);
+ break;
+ }
+ case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
+ priv->timeout =
+ (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
+ dev_dbg(&priv->ch->dev->dev,
+ "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
+ __func__, priv->timeout, priv);
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->hwctx->has_timedout;
+ break;
+ case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
+ priv->priority =
+ (u32)((struct nvhost_set_priority_args *)buf)->priority;
+ break;
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
+ err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
+
+ return err;
+}
+
+static const struct file_operations nvhost_channelops = {
+ .owner = THIS_MODULE,
+ .release = nvhost_channelrelease,
+ .open = nvhost_channelopen,
+ .write = nvhost_channelwrite,
+ .unlocked_ioctl = nvhost_channelctl
+};
+
+int nvhost_client_user_init(struct nvhost_device *dev)
+{
+ int err, devno;
+
+ struct nvhost_channel *ch = dev->channel;
+ err = alloc_chrdev_region(&devno, 0, 1, IFACE_NAME);
+ if (err < 0) {
+ dev_err(&dev->dev, "failed to allocate devno\n");
+ goto fail;
+ }
+
+ cdev_init(&ch->cdev, &nvhost_channelops);
+ ch->cdev.owner = THIS_MODULE;
+
+ err = cdev_add(&ch->cdev, devno, 1);
+ if (err < 0) {
+ dev_err(&dev->dev,
+ "failed to add chan %i cdev\n", dev->index);
+ goto fail;
+ }
+ ch->node = device_create(nvhost_get_host(dev)->nvhost_class,
+ NULL, devno, NULL,
+ IFACE_NAME "-%s", dev->name);
+ if (IS_ERR(ch->node)) {
+ err = PTR_ERR(ch->node);
+ dev_err(&dev->dev,
+ "failed to create %s channel device\n", dev->name);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return err;
+}
+
+int nvhost_client_device_init(struct nvhost_device *dev)
+{
+ int err;
+ struct nvhost_master *nvhost_master = nvhost_get_host(dev);
+ struct nvhost_channel *ch;
+
+ ch = nvhost_alloc_channel(dev);
+ if (ch == NULL)
+ return -ENODEV;
+
+ /* store the pointer to this device for channel */
+ ch->dev = dev;
+
+ err = nvhost_channel_init(ch, nvhost_master, dev->index);
+ if (err)
+ goto fail;
+
+ err = nvhost_client_user_init(dev);
+ if (err)
+ goto fail;
+
+ err = nvhost_module_init(dev);
+ if (err)
+ goto fail;
+
+ dev_info(&dev->dev, "initialized\n");
+
+ return 0;
+
+fail:
+ /* Add clean-up */
+ nvhost_free_channel(ch);
+ return err;
+}
+EXPORT_SYMBOL(nvhost_client_device_init);
+
+int nvhost_client_device_suspend(struct nvhost_device *dev)
+{
+ int ret = 0;
+
+ ret = nvhost_channel_suspend(dev->channel);
+ if (ret)
+ return ret;
+
+ dev_info(&dev->dev, "suspend status: %d\n", ret);
+
+ return ret;
+}
+
+int nvhost_client_device_get_resources(struct nvhost_device *dev)
+{
+ struct resource *r = NULL;
+ void __iomem *regs = NULL;
+ struct resource *reg_mem = NULL;
+
+ r = nvhost_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!r)
+ goto fail;
+
+ reg_mem = request_mem_region(r->start, resource_size(r), dev->name);
+ if (!reg_mem)
+ goto fail;
+
+ regs = ioremap(r->start, resource_size(r));
+ if (!regs)
+ goto fail;
+
+ dev->reg_mem = reg_mem;
+ dev->aperture = regs;
+
+ return 0;
+
+fail:
+ if (reg_mem)
+ release_mem_region(r->start, resource_size(r));
+ if (regs)
+ iounmap(regs);
+
+ dev_err(&dev->dev, "failed to get register memory\n");
+
+ return -ENXIO;
+}
+EXPORT_SYMBOL(nvhost_client_device_get_resources);
+
+void nvhost_client_device_put_resources(struct nvhost_device *dev)
+{
+ struct resource *r;
+
+ r = nvhost_get_resource(dev, IORESOURCE_MEM, 0);
+ BUG_ON(!r);
+
+ iounmap(dev->aperture);
+
+ release_mem_region(r->start, resource_size(r));
+}
+EXPORT_SYMBOL(nvhost_client_device_put_resources);
diff --git a/drivers/video/tegra/host/bus_client.h b/drivers/video/tegra/host/bus_client.h
new file mode 100644
index 000000000000..8c7bdc9faefe
--- /dev/null
+++ b/drivers/video/tegra/host/bus_client.h
@@ -0,0 +1,42 @@
+/*
+ * drivers/video/tegra/host/bus_client.h
+ *
+ * Tegra Graphics Host client
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_BUS_CLIENT_H
+#define __NVHOST_BUS_CLIENT_H
+
+#include <linux/types.h>
+struct nvhost_device;
+
+int nvhost_read_module_regs(struct nvhost_device *ndev,
+ u32 offset, int count, u32 *values);
+
+int nvhost_write_module_regs(struct nvhost_device *ndev,
+ u32 offset, int count, const u32 *values);
+
+int nvhost_client_user_init(struct nvhost_device *dev);
+
+int nvhost_client_device_init(struct nvhost_device *dev);
+
+int nvhost_client_device_suspend(struct nvhost_device *dev);
+
+int nvhost_client_device_get_resources(struct nvhost_device *dev);
+void nvhost_client_device_put_resources(struct nvhost_device *dev);
+
+#endif
diff --git a/drivers/video/tegra/host/chip_support.c b/drivers/video/tegra/host/chip_support.c
new file mode 100644
index 000000000000..9abb1fa026a4
--- /dev/null
+++ b/drivers/video/tegra/host/chip_support.c
@@ -0,0 +1,56 @@
+/*
+ * drivers/video/tegra/host/chip_support.c
+ *
+ * Tegra Graphics Host Chip support module
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/errno.h>
+
+#include <mach/hardware.h>
+
+#include "bus.h"
+#include "chip_support.h"
+#include "t20/t20.h"
+#include "t30/t30.h"
+
+struct nvhost_chip_support *nvhost_get_chip_ops(void)
+{
+ return (nvhost_bus_get())->nvhost_chip_ops;
+}
+
+int nvhost_init_chip_support(struct nvhost_master *host)
+{
+ int err = 0;
+ struct nvhost_chip_support *chip_ops;
+
+ chip_ops = nvhost_get_chip_ops();
+
+ switch (tegra_get_chipid()) {
+ case TEGRA_CHIPID_TEGRA2:
+ err = nvhost_init_t20_support(host, chip_ops);
+ break;
+
+ case TEGRA_CHIPID_TEGRA3:
+ err = nvhost_init_t30_support(host, chip_ops);
+ break;
+
+ default:
+ err = -ENODEV;
+ }
+
+ return err;
+}
diff --git a/drivers/video/tegra/host/chip_support.h b/drivers/video/tegra/host/chip_support.h
new file mode 100644
index 000000000000..412ce8b65466
--- /dev/null
+++ b/drivers/video/tegra/host/chip_support.h
@@ -0,0 +1,181 @@
+/*
+ * drivers/video/tegra/host/chip_support.h
+ *
+ * Tegra Graphics Host Chip Support
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _NVHOST_CHIP_SUPPORT_H_
+#define _NVHOST_CHIP_SUPPORT_H_
+
+#include <linux/types.h>
+#include "bus.h"
+
+struct output;
+
+struct nvhost_master;
+struct nvhost_intr;
+struct nvhost_syncpt;
+struct nvhost_userctx_timeout;
+struct nvhost_channel;
+struct nvhost_hwctx;
+struct nvhost_cdma;
+struct nvhost_job;
+struct push_buffer;
+struct nvhost_syncpt;
+struct dentry;
+struct nvhost_job;
+struct nvhost_intr_syncpt;
+struct mem_handle;
+struct mem_mgr;
+struct nvhost_device;
+
+struct nvhost_channel_ops {
+ int (*init)(struct nvhost_channel *,
+ struct nvhost_master *,
+ int chid);
+ int (*submit)(struct nvhost_job *job);
+ int (*read3dreg)(struct nvhost_channel *channel,
+ struct nvhost_hwctx *hwctx,
+ u32 offset,
+ u32 *value);
+ int (*save_context)(struct nvhost_channel *channel);
+ int (*drain_read_fifo)(struct nvhost_channel *ch,
+ u32 *ptr, unsigned int count, unsigned int *pending);
+};
+
+struct nvhost_cdma_ops {
+ void (*start)(struct nvhost_cdma *);
+ void (*stop)(struct nvhost_cdma *);
+ void (*kick)(struct nvhost_cdma *);
+ int (*timeout_init)(struct nvhost_cdma *,
+ u32 syncpt_id);
+ void (*timeout_destroy)(struct nvhost_cdma *);
+ void (*timeout_teardown_begin)(struct nvhost_cdma *);
+ void (*timeout_teardown_end)(struct nvhost_cdma *,
+ u32 getptr);
+ void (*timeout_cpu_incr)(struct nvhost_cdma *,
+ u32 getptr,
+ u32 syncpt_incrs,
+ u32 syncval,
+ u32 nr_slots,
+ u32 waitbases);
+};
+
+struct nvhost_pushbuffer_ops {
+ void (*reset)(struct push_buffer *);
+ int (*init)(struct push_buffer *);
+ void (*destroy)(struct push_buffer *);
+ void (*push_to)(struct push_buffer *,
+ struct mem_mgr *, struct mem_handle *,
+ u32 op1, u32 op2);
+ void (*pop_from)(struct push_buffer *,
+ unsigned int slots);
+ u32 (*space)(struct push_buffer *);
+ u32 (*putptr)(struct push_buffer *);
+};
+
+struct nvhost_debug_ops {
+ void (*debug_init)(struct dentry *de);
+ void (*show_channel_cdma)(struct nvhost_master *,
+ struct nvhost_channel *,
+ struct output *,
+ int chid);
+ void (*show_channel_fifo)(struct nvhost_master *,
+ struct nvhost_channel *,
+ struct output *,
+ int chid);
+ void (*show_mlocks)(struct nvhost_master *m,
+ struct output *o);
+
+};
+
+struct nvhost_syncpt_ops {
+ void (*reset)(struct nvhost_syncpt *, u32 id);
+ void (*reset_wait_base)(struct nvhost_syncpt *, u32 id);
+ void (*read_wait_base)(struct nvhost_syncpt *, u32 id);
+ u32 (*update_min)(struct nvhost_syncpt *, u32 id);
+ void (*cpu_incr)(struct nvhost_syncpt *, u32 id);
+ int (*patch_wait)(struct nvhost_syncpt *sp,
+ void *patch_addr);
+ void (*debug)(struct nvhost_syncpt *);
+ const char * (*name)(struct nvhost_syncpt *, u32 id);
+ int (*mutex_try_lock)(struct nvhost_syncpt *,
+ unsigned int idx);
+ void (*mutex_unlock)(struct nvhost_syncpt *,
+ unsigned int idx);
+};
+
+struct nvhost_intr_ops {
+ void (*init_host_sync)(struct nvhost_intr *);
+ void (*set_host_clocks_per_usec)(
+ struct nvhost_intr *, u32 clocks);
+ void (*set_syncpt_threshold)(
+ struct nvhost_intr *, u32 id, u32 thresh);
+ void (*enable_syncpt_intr)(struct nvhost_intr *, u32 id);
+ void (*disable_syncpt_intr)(struct nvhost_intr *, u32 id);
+ void (*disable_all_syncpt_intrs)(struct nvhost_intr *);
+ int (*request_host_general_irq)(struct nvhost_intr *);
+ void (*free_host_general_irq)(struct nvhost_intr *);
+ int (*request_syncpt_irq)(struct nvhost_intr_syncpt *syncpt);
+};
+
+struct nvhost_dev_ops {
+ struct nvhost_channel *(*alloc_nvhost_channel)(
+ struct nvhost_device *dev);
+ void (*free_nvhost_channel)(struct nvhost_channel *ch);
+};
+
+struct nvhost_mem_ops {
+ struct mem_mgr *(*alloc_mgr)(void);
+ void (*put_mgr)(struct mem_mgr *);
+ struct mem_mgr *(*get_mgr)(struct mem_mgr *);
+ struct mem_mgr *(*get_mgr_file)(int fd);
+ struct mem_handle *(*alloc)(struct mem_mgr *,
+ size_t size, size_t align,
+ int flags);
+ struct mem_handle *(*get)(struct mem_mgr *, u32 id);
+ void (*put)(struct mem_mgr *, struct mem_handle *);
+ phys_addr_t (*pin)(struct mem_mgr *, struct mem_handle *);
+ void (*unpin)(struct mem_mgr *, struct mem_handle *);
+ void *(*mmap)(struct mem_handle *);
+ void (*munmap)(struct mem_handle *, void *);
+};
+
+struct nvhost_chip_support {
+ struct nvhost_channel_ops channel;
+ struct nvhost_cdma_ops cdma;
+ struct nvhost_pushbuffer_ops push_buffer;
+ struct nvhost_debug_ops debug;
+ struct nvhost_syncpt_ops syncpt;
+ struct nvhost_intr_ops intr;
+ struct nvhost_dev_ops nvhost_dev;
+ struct nvhost_mem_ops mem;
+};
+
+struct nvhost_chip_support *nvhost_get_chip_ops(void);
+
+#define host_device_op() nvhost_get_chip_ops()->nvhost_dev
+#define channel_cdma_op() nvhost_get_chip_ops()->cdma
+#define channel_op() nvhost_get_chip_ops()->channel
+#define syncpt_op() nvhost_get_chip_ops()->syncpt
+#define intr_op() nvhost_get_chip_ops()->intr
+#define cdma_op() nvhost_get_chip_ops()->cdma
+#define cdma_pb_op() nvhost_get_chip_ops()->push_buffer
+#define mem_op() (nvhost_get_chip_ops()->mem)
+
+int nvhost_init_chip_support(struct nvhost_master *);
+
+#endif /* _NVHOST_CHIP_SUPPORT_H_ */
diff --git a/drivers/video/tegra/host/debug.c b/drivers/video/tegra/host/debug.c
new file mode 100644
index 000000000000..58f9348b84bd
--- /dev/null
+++ b/drivers/video/tegra/host/debug.c
@@ -0,0 +1,234 @@
+/*
+ * drivers/video/tegra/host/debug.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <linux/io.h>
+
+#include "bus.h"
+#include "dev.h"
+#include "debug.h"
+#include "nvhost_acm.h"
+#include "nvhost_channel.h"
+#include "chip_support.h"
+
+pid_t nvhost_debug_null_kickoff_pid;
+unsigned int nvhost_debug_trace_cmdbuf;
+
+pid_t nvhost_debug_force_timeout_pid;
+u32 nvhost_debug_force_timeout_val;
+u32 nvhost_debug_force_timeout_channel;
+
+void nvhost_debug_output(struct output *o, const char* fmt, ...)
+{
+ va_list args;
+ int len;
+
+ va_start(args, fmt);
+ len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
+ va_end(args);
+ o->fn(o->ctx, o->buf, len);
+}
+
+static int show_channels(struct device *dev, void *data)
+{
+ struct nvhost_channel *ch;
+ struct nvhost_device *nvdev = to_nvhost_device(dev);
+ struct output *o = data;
+ struct nvhost_master *m;
+
+ if (nvdev == NULL)
+ return 0;
+
+ m = nvhost_get_host(nvdev);
+ ch = nvdev->channel;
+ if (ch) {
+ mutex_lock(&ch->reflock);
+ if (ch->refcount) {
+ mutex_lock(&ch->cdma.lock);
+ nvhost_get_chip_ops()->debug.show_channel_fifo(m, ch, o, nvdev->index);
+ nvhost_get_chip_ops()->debug.show_channel_cdma(m, ch, o, nvdev->index);
+ mutex_unlock(&ch->cdma.lock);
+ }
+ mutex_unlock(&ch->reflock);
+ }
+
+ return 0;
+}
+
+static void show_syncpts(struct nvhost_master *m, struct output *o)
+{
+ int i;
+ BUG_ON(!nvhost_get_chip_ops()->syncpt.name);
+ nvhost_debug_output(o, "---- syncpts ----\n");
+ for (i = 0; i < nvhost_syncpt_nb_pts(&m->syncpt); i++) {
+ u32 max = nvhost_syncpt_read_max(&m->syncpt, i);
+ u32 min = nvhost_syncpt_update_min(&m->syncpt, i);
+ if (!min && !max)
+ continue;
+ nvhost_debug_output(o, "id %d (%s) min %d max %d\n",
+ i, nvhost_get_chip_ops()->syncpt.name(&m->syncpt, i),
+ min, max);
+ }
+
+ for (i = 0; i < nvhost_syncpt_nb_pts(&m->syncpt); i++) {
+ u32 base_val;
+ base_val = nvhost_syncpt_read_wait_base(&m->syncpt, i);
+ if (base_val)
+ nvhost_debug_output(o, "waitbase id %d val %d\n",
+ i, base_val);
+ }
+
+ nvhost_debug_output(o, "\n");
+}
+
+static void show_all(struct nvhost_master *m, struct output *o)
+{
+ nvhost_module_busy(m->dev);
+
+ nvhost_get_chip_ops()->debug.show_mlocks(m, o);
+ show_syncpts(m, o);
+ nvhost_debug_output(o, "---- channels ----\n");
+ bus_for_each_dev(&(nvhost_bus_get())->nvhost_bus_type, NULL, o,
+ show_channels);
+
+ nvhost_module_idle(m->dev);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int show_channels_no_fifo(struct device *dev, void *data)
+{
+ struct nvhost_channel *ch;
+ struct nvhost_device *nvdev = to_nvhost_device(dev);
+ struct output *o = data;
+ struct nvhost_master *m;
+
+ if (nvdev == NULL)
+ return 0;
+
+ m = nvhost_get_host(nvdev);
+ ch = nvdev->channel;
+ if (ch) {
+ mutex_lock(&ch->reflock);
+ if (ch->refcount) {
+ mutex_lock(&ch->cdma.lock);
+ nvhost_get_chip_ops()->debug.show_channel_cdma(m,
+ ch, o, nvdev->index);
+ mutex_unlock(&ch->cdma.lock);
+ }
+ mutex_unlock(&ch->reflock);
+ }
+
+ return 0;
+}
+
+static void show_all_no_fifo(struct nvhost_master *m, struct output *o)
+{
+ nvhost_module_busy(m->dev);
+
+ nvhost_get_chip_ops()->debug.show_mlocks(m, o);
+ show_syncpts(m, o);
+ nvhost_debug_output(o, "---- channels ----\n");
+ bus_for_each_dev(&(nvhost_bus_get())->nvhost_bus_type, NULL, o,
+ show_channels_no_fifo);
+
+ nvhost_module_idle(m->dev);
+}
+
+static int nvhost_debug_show_all(struct seq_file *s, void *unused)
+{
+ struct output o = {
+ .fn = write_to_seqfile,
+ .ctx = s
+ };
+ show_all(s->private, &o);
+ return 0;
+}
+static int nvhost_debug_show(struct seq_file *s, void *unused)
+{
+ struct output o = {
+ .fn = write_to_seqfile,
+ .ctx = s
+ };
+ show_all_no_fifo(s->private, &o);
+ return 0;
+}
+
+static int nvhost_debug_open_all(struct inode *inode, struct file *file)
+{
+ return single_open(file, nvhost_debug_show_all, inode->i_private);
+}
+
+static const struct file_operations nvhost_debug_all_fops = {
+ .open = nvhost_debug_open_all,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int nvhost_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nvhost_debug_show, inode->i_private);
+}
+
+static const struct file_operations nvhost_debug_fops = {
+ .open = nvhost_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void nvhost_debug_init(struct nvhost_master *master)
+{
+ struct dentry *de = debugfs_create_dir("tegra_host", NULL);
+
+ debugfs_create_file("status", S_IRUGO, de,
+ master, &nvhost_debug_fops);
+ debugfs_create_file("status_all", S_IRUGO, de,
+ master, &nvhost_debug_all_fops);
+
+ debugfs_create_u32("null_kickoff_pid", S_IRUGO|S_IWUSR, de,
+ &nvhost_debug_null_kickoff_pid);
+ debugfs_create_u32("trace_cmdbuf", S_IRUGO|S_IWUSR, de,
+ &nvhost_debug_trace_cmdbuf);
+
+ if (nvhost_get_chip_ops()->debug.debug_init)
+ nvhost_get_chip_ops()->debug.debug_init(de);
+
+ debugfs_create_u32("force_timeout_pid", S_IRUGO|S_IWUSR, de,
+ &nvhost_debug_force_timeout_pid);
+ debugfs_create_u32("force_timeout_val", S_IRUGO|S_IWUSR, de,
+ &nvhost_debug_force_timeout_val);
+ debugfs_create_u32("force_timeout_channel", S_IRUGO|S_IWUSR, de,
+ &nvhost_debug_force_timeout_channel);
+}
+#else
+void nvhost_debug_init(struct nvhost_master *master)
+{
+}
+#endif
+
+void nvhost_debug_dump(struct nvhost_master *master)
+{
+ struct output o = {
+ .fn = write_to_printk
+ };
+ show_all(master, &o);
+}
diff --git a/drivers/video/tegra/host/debug.h b/drivers/video/tegra/host/debug.h
new file mode 100644
index 000000000000..3dc156ab4741
--- /dev/null
+++ b/drivers/video/tegra/host/debug.h
@@ -0,0 +1,50 @@
+/*
+ * drivers/video/tegra/host/debug.h
+ *
+ * Tegra Graphics Host Debug
+ *
+ * Copyright (c) 2011-2012 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __NVHOST_DEBUG_H
+#define __NVHOST_DEBUG_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+struct output {
+ void (*fn)(void *ctx, const char* str, size_t len);
+ void *ctx;
+ char buf[256];
+};
+
+static inline void write_to_seqfile(void *ctx, const char* str, size_t len)
+{
+ seq_write((struct seq_file *)ctx, str, len);
+}
+
+static inline void write_to_printk(void *ctx, const char* str, size_t len)
+{
+ printk(KERN_INFO "%s", str);
+}
+
+void nvhost_debug_output(struct output *o, const char* fmt, ...);
+
+extern pid_t nvhost_debug_null_kickoff_pid;
+extern pid_t nvhost_debug_force_timeout_pid;
+extern u32 nvhost_debug_force_timeout_val;
+extern u32 nvhost_debug_force_timeout_channel;
+extern unsigned int nvhost_debug_trace_cmdbuf;
+
+#endif /*__NVHOST_DEBUG_H */
diff --git a/drivers/video/tegra/host/dev.c b/drivers/video/tegra/host/dev.c
new file mode 100644
index 000000000000..6200507548f7
--- /dev/null
+++ b/drivers/video/tegra/host/dev.c
@@ -0,0 +1,31 @@
+/*
+ * drivers/video/tegra/host/dev.c
+ *
+ * Tegra Graphics Host Driver Entrypoint
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/nvhost.h>
+
+#include <linux/nvhost.h>
+#include <mach/gpufuse.h>
+
+MODULE_AUTHOR("NVIDIA");
+MODULE_DESCRIPTION("Graphics host driver for Tegra products");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform-nvhost");
diff --git a/drivers/video/tegra/host/dev.h b/drivers/video/tegra/host/dev.h
new file mode 100644
index 000000000000..53ec2de13aa1
--- /dev/null
+++ b/drivers/video/tegra/host/dev.h
@@ -0,0 +1,24 @@
+/*
+ * drivers/video/tegra/host/dev.h
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef NVHOST_DEV_H
+#define NVHOST_DEV_H
+
+#include "host1x/host1x.h"
+
+#endif
diff --git a/drivers/video/tegra/host/gr2d/Makefile b/drivers/video/tegra/host/gr2d/Makefile
new file mode 100644
index 000000000000..a79a2101677b
--- /dev/null
+++ b/drivers/video/tegra/host/gr2d/Makefile
@@ -0,0 +1,7 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/video/tegra/host
+
+nvhost-gr2d-objs = \
+ gr2d.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-gr2d.o
diff --git a/drivers/video/tegra/host/gr2d/gr2d.c b/drivers/video/tegra/host/gr2d/gr2d.c
new file mode 100644
index 000000000000..56752eba5951
--- /dev/null
+++ b/drivers/video/tegra/host/gr2d/gr2d.c
@@ -0,0 +1,73 @@
+/*
+ * drivers/video/tegra/host/gr2d/gr2d.c
+ *
+ * Tegra Graphics 2D
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "dev.h"
+#include "bus_client.h"
+
+static int __devinit gr2d_probe(struct nvhost_device *dev,
+ struct nvhost_device_id *id_table)
+{
+ return nvhost_client_device_init(dev);
+}
+
+static int __exit gr2d_remove(struct nvhost_device *dev)
+{
+ /* Add clean-up */
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int gr2d_suspend(struct nvhost_device *dev, pm_message_t state)
+{
+ return nvhost_client_device_suspend(dev);
+}
+
+static int gr2d_resume(struct nvhost_device *dev)
+{
+ dev_info(&dev->dev, "resuming\n");
+ return 0;
+}
+#endif
+
+static struct nvhost_driver gr2d_driver = {
+ .probe = gr2d_probe,
+ .remove = __exit_p(gr2d_remove),
+#ifdef CONFIG_PM
+ .suspend = gr2d_suspend,
+ .resume = gr2d_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "gr2d",
+ }
+};
+
+static int __init gr2d_init(void)
+{
+ return nvhost_driver_register(&gr2d_driver);
+}
+
+static void __exit gr2d_exit(void)
+{
+ nvhost_driver_unregister(&gr2d_driver);
+}
+
+module_init(gr2d_init);
+module_exit(gr2d_exit);
diff --git a/drivers/video/tegra/host/gr3d/Makefile b/drivers/video/tegra/host/gr3d/Makefile
new file mode 100644
index 000000000000..dfbd078ab423
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/Makefile
@@ -0,0 +1,10 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/video/tegra/host
+
+nvhost-gr3d-objs = \
+ gr3d.o \
+ gr3d_t20.o \
+ gr3d_t30.o \
+ scale3d.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-gr3d.o
diff --git a/drivers/video/tegra/host/gr3d/gr3d.c b/drivers/video/tegra/host/gr3d/gr3d.c
new file mode 100644
index 000000000000..775c77b0e88d
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/gr3d.c
@@ -0,0 +1,265 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d.c
+ *
+ * Tegra Graphics Host 3D
+ *
+ * Copyright (c) 2012 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <mach/gpufuse.h>
+
+#include "t20/t20.h"
+#include "host1x/host1x01_hardware.h"
+#include "nvhost_hwctx.h"
+#include "dev.h"
+#include "gr3d.h"
+#include "gr3d_t20.h"
+#include "gr3d_t30.h"
+#include "scale3d.h"
+#include "bus_client.h"
+#include "nvhost_channel.h"
+#include "nvhost_memmgr.h"
+#include "chip_support.h"
+
+void nvhost_3dctx_restore_begin(struct host1x_hwctx_handler *p, u32 *ptr)
+{
+ /* set class to host */
+ ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ host1x_uclass_incr_syncpt_base_r(), 1);
+ /* increment sync point base */
+ ptr[1] = nvhost_class_host_incr_syncpt_base(p->waitbase,
+ p->restore_incrs);
+ /* set class to 3D */
+ ptr[2] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ /* program PSEQ_QUAD_ID */
+ ptr[3] = nvhost_opcode_imm(AR3D_PSEQ_QUAD_ID, 0);
+}
+
+void nvhost_3dctx_restore_direct(u32 *ptr, u32 start_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_incr(start_reg, count);
+}
+
+void nvhost_3dctx_restore_indirect(u32 *ptr, u32 offset_reg, u32 offset,
+ u32 data_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_imm(offset_reg, offset);
+ ptr[1] = nvhost_opcode_nonincr(data_reg, count);
+}
+
+void nvhost_3dctx_restore_end(struct host1x_hwctx_handler *p, u32 *ptr)
+{
+ /* syncpt increment to track restore gather. */
+ ptr[0] = nvhost_opcode_imm_incr_syncpt(
+ host1x_uclass_incr_syncpt_cond_op_done_v(), p->syncpt);
+}
+
+/*** ctx3d ***/
+struct host1x_hwctx *nvhost_3dctx_alloc_common(struct host1x_hwctx_handler *p,
+ struct nvhost_channel *ch, bool map_restore)
+{
+ struct mem_mgr *memmgr = nvhost_get_host(ch->dev)->memmgr;
+ struct host1x_hwctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+ ctx->restore = mem_op().alloc(memmgr, p->restore_size * 4, 32,
+ map_restore ? mem_mgr_flag_write_combine
+ : mem_mgr_flag_uncacheable);
+ if (IS_ERR_OR_NULL(ctx->restore)) {
+ ctx->restore = NULL;
+ goto fail;
+ }
+
+ if (map_restore) {
+ ctx->restore_virt = mem_op().mmap(ctx->restore);
+ if (!ctx->restore_virt)
+ goto fail;
+ } else
+ ctx->restore_virt = NULL;
+
+ kref_init(&ctx->hwctx.ref);
+ ctx->hwctx.h = &p->h;
+ ctx->hwctx.channel = ch;
+ ctx->hwctx.valid = false;
+ ctx->save_incrs = p->save_incrs;
+ ctx->save_thresh = p->save_thresh;
+ ctx->save_slots = p->save_slots;
+ ctx->restore_phys = mem_op().pin(memmgr, ctx->restore);
+ if (IS_ERR_VALUE(ctx->restore_phys))
+ goto fail;
+
+ ctx->restore_size = p->restore_size;
+ ctx->restore_incrs = p->restore_incrs;
+ return ctx;
+
+fail:
+ if (map_restore && ctx->restore_virt) {
+ mem_op().munmap(ctx->restore, ctx->restore_virt);
+ ctx->restore_virt = NULL;
+ }
+ mem_op().put(memmgr, ctx->restore);
+ ctx->restore = NULL;
+ kfree(ctx);
+ return NULL;
+}
+
+void nvhost_3dctx_get(struct nvhost_hwctx *ctx)
+{
+ kref_get(&ctx->ref);
+}
+
+void nvhost_3dctx_free(struct kref *ref)
+{
+ struct nvhost_hwctx *nctx = container_of(ref, struct nvhost_hwctx, ref);
+ struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
+ struct mem_mgr *memmgr = nvhost_get_host(nctx->channel->dev)->memmgr;
+
+ if (ctx->restore_virt) {
+ mem_op().munmap(ctx->restore, ctx->restore_virt);
+ ctx->restore_virt = NULL;
+ }
+ mem_op().unpin(memmgr, ctx->restore);
+ ctx->restore_phys = 0;
+ mem_op().put(memmgr, ctx->restore);
+ ctx->restore = NULL;
+ kfree(ctx);
+}
+
+void nvhost_3dctx_put(struct nvhost_hwctx *ctx)
+{
+ kref_put(&ctx->ref, nvhost_3dctx_free);
+}
+
+int nvhost_gr3d_prepare_power_off(struct nvhost_device *dev)
+{
+ return nvhost_channel_save_context(dev->channel);
+}
+
+enum gr3d_ip_ver {
+ gr3d_01 = 1,
+ gr3d_02,
+};
+
+struct gr3d_desc {
+ void (*finalize_poweron)(struct nvhost_device *dev);
+ void (*busy)(struct nvhost_device *);
+ void (*idle)(struct nvhost_device *);
+ void (*suspend_ndev)(struct nvhost_device *);
+ void (*init)(struct nvhost_device *dev);
+ void (*deinit)(struct nvhost_device *dev);
+ int (*prepare_poweroff)(struct nvhost_device *dev);
+ struct nvhost_hwctx_handler *(*alloc_hwctx_handler)(u32 syncpt,
+ u32 waitbase, struct nvhost_channel *ch);
+};
+
+static const struct gr3d_desc gr3d[] = {
+ [gr3d_01] = {
+ .finalize_poweron = NULL,
+ .busy = NULL,
+ .idle = NULL,
+ .suspend_ndev = NULL,
+ .init = NULL,
+ .deinit = NULL,
+ .prepare_poweroff = nvhost_gr3d_prepare_power_off,
+ .alloc_hwctx_handler = nvhost_gr3d_t20_ctxhandler_init,
+ },
+ [gr3d_02] = {
+ .finalize_poweron = NULL,
+ .busy = nvhost_scale3d_notify_busy,
+ .idle = nvhost_scale3d_notify_idle,
+ .suspend_ndev = nvhost_scale3d_suspend,
+ .init = nvhost_scale3d_init,
+ .deinit = nvhost_scale3d_deinit,
+ .prepare_poweroff = nvhost_gr3d_prepare_power_off,
+ .alloc_hwctx_handler = nvhost_gr3d_t30_ctxhandler_init,
+ },
+};
+
+static struct nvhost_device_id gr3d_id[] = {
+ { "gr3d", gr3d_01 },
+ { "gr3d", gr3d_02 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(nvhost, gr3d_id);
+
+static int __devinit gr3d_probe(struct nvhost_device *dev,
+ struct nvhost_device_id *id_table)
+{
+ int index = 0;
+ struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+
+ index = id_table->version;
+
+ drv->finalize_poweron = gr3d[index].finalize_poweron;
+ drv->busy = gr3d[index].busy;
+ drv->idle = gr3d[index].idle;
+ drv->suspend_ndev = gr3d[index].suspend_ndev;
+ drv->init = gr3d[index].init;
+ drv->deinit = gr3d[index].deinit;
+ drv->prepare_poweroff = gr3d[index].prepare_poweroff;
+ drv->alloc_hwctx_handler = gr3d[index].alloc_hwctx_handler;
+
+ return nvhost_client_device_init(dev);
+}
+
+static int __exit gr3d_remove(struct nvhost_device *dev)
+{
+ /* Add clean-up */
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int gr3d_suspend(struct nvhost_device *dev, pm_message_t state)
+{
+ return nvhost_client_device_suspend(dev);
+}
+
+static int gr3d_resume(struct nvhost_device *dev)
+{
+ dev_info(&dev->dev, "resuming\n");
+ return 0;
+}
+#endif
+
+static struct nvhost_driver gr3d_driver = {
+ .probe = gr3d_probe,
+ .remove = __exit_p(gr3d_remove),
+#ifdef CONFIG_PM
+ .suspend = gr3d_suspend,
+ .resume = gr3d_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "gr3d",
+ },
+ .id_table = gr3d_id,
+};
+
+static int __init gr3d_init(void)
+{
+ return nvhost_driver_register(&gr3d_driver);
+}
+
+static void __exit gr3d_exit(void)
+{
+ nvhost_driver_unregister(&gr3d_driver);
+}
+
+module_init(gr3d_init);
+module_exit(gr3d_exit);
diff --git a/drivers/video/tegra/host/gr3d/gr3d.h b/drivers/video/tegra/host/gr3d/gr3d.h
new file mode 100644
index 000000000000..61f708cea95c
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/gr3d.h
@@ -0,0 +1,57 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d.h
+ *
+ * Tegra Graphics Host 3D
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_GR3D_GR3D_H
+#define __NVHOST_GR3D_GR3D_H
+
+#include "host1x/host1x_hwctx.h"
+#include <linux/types.h>
+
+/* Registers of 3D unit */
+
+#define AR3D_PSEQ_QUAD_ID 0x545
+#define AR3D_DW_MEMORY_OUTPUT_ADDRESS 0x904
+#define AR3D_DW_MEMORY_OUTPUT_DATA 0x905
+#define AR3D_FDC_CONTROL_0 0xa00
+#define AR3D_FDC_CONTROL_0_RESET_VAL 0xe00
+#define AR3D_FDC_CONTROL_0_INVALIDATE 1
+#define AR3D_GSHIM_WRITE_MASK 0xb00
+#define AR3D_GSHIM_READ_SELECT 0xb01
+#define AR3D_GLOBAL_MEMORY_OUTPUT_READS 0xe40
+
+struct nvhost_hwctx;
+struct nvhost_channel;
+struct kref;
+
+/* Functions used commonly by all 3D context switch modules */
+void nvhost_3dctx_restore_begin(struct host1x_hwctx_handler *h, u32 *ptr);
+void nvhost_3dctx_restore_direct(u32 *ptr, u32 start_reg, u32 count);
+void nvhost_3dctx_restore_indirect(u32 *ptr, u32 offset_reg,
+ u32 offset, u32 data_reg, u32 count);
+void nvhost_3dctx_restore_end(struct host1x_hwctx_handler *h, u32 *ptr);
+struct host1x_hwctx *nvhost_3dctx_alloc_common(
+ struct host1x_hwctx_handler *p,
+ struct nvhost_channel *ch, bool map_restore);
+void nvhost_3dctx_get(struct nvhost_hwctx *ctx);
+void nvhost_3dctx_free(struct kref *ref);
+void nvhost_3dctx_put(struct nvhost_hwctx *ctx);
+int nvhost_gr3d_prepare_power_off(struct nvhost_device *dev);
+
+#endif
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t20.c b/drivers/video/tegra/host/gr3d/gr3d_t20.c
new file mode 100644
index 000000000000..694b00527790
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/gr3d_t20.c
@@ -0,0 +1,399 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d_t20.c
+ *
+ * Tegra Graphics Host 3D for Tegra2
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvhost_hwctx.h"
+#include "nvhost_channel.h"
+#include "host1x/host1x.h"
+#include "host1x/host1x01_hardware.h"
+#include "gr3d.h"
+#include "chip_support.h"
+#include "nvhost_memmgr.h"
+
+#include <linux/slab.h>
+
+static const struct hwctx_reginfo ctxsave_regs_3d_global[] = {
+ HWCTX_REGINFO(0xe00, 4, DIRECT),
+ HWCTX_REGINFO(0xe05, 30, DIRECT),
+ HWCTX_REGINFO(0xe25, 2, DIRECT),
+ HWCTX_REGINFO(0xe28, 2, DIRECT),
+ HWCTX_REGINFO(0x001, 2, DIRECT),
+ HWCTX_REGINFO(0x00c, 10, DIRECT),
+ HWCTX_REGINFO(0x100, 34, DIRECT),
+ HWCTX_REGINFO(0x124, 2, DIRECT),
+ HWCTX_REGINFO(0x200, 5, DIRECT),
+ HWCTX_REGINFO(0x205, 1024, INDIRECT),
+ HWCTX_REGINFO(0x207, 1024, INDIRECT),
+ HWCTX_REGINFO(0x209, 1, DIRECT),
+ HWCTX_REGINFO(0x300, 64, DIRECT),
+ HWCTX_REGINFO(0x343, 25, DIRECT),
+ HWCTX_REGINFO(0x363, 2, DIRECT),
+ HWCTX_REGINFO(0x400, 16, DIRECT),
+ HWCTX_REGINFO(0x411, 1, DIRECT),
+ HWCTX_REGINFO(0x500, 4, DIRECT),
+ HWCTX_REGINFO(0x520, 32, DIRECT),
+ HWCTX_REGINFO(0x540, 64, INDIRECT),
+ HWCTX_REGINFO(0x600, 16, INDIRECT_4X),
+ HWCTX_REGINFO(0x603, 128, INDIRECT),
+ HWCTX_REGINFO(0x608, 4, DIRECT),
+ HWCTX_REGINFO(0x60e, 1, DIRECT),
+ HWCTX_REGINFO(0x700, 64, INDIRECT),
+ HWCTX_REGINFO(0x710, 50, DIRECT),
+ HWCTX_REGINFO(0x800, 16, INDIRECT_4X),
+ HWCTX_REGINFO(0x803, 512, INDIRECT),
+ HWCTX_REGINFO(0x805, 64, INDIRECT),
+ HWCTX_REGINFO(0x820, 32, DIRECT),
+ HWCTX_REGINFO(0x900, 64, INDIRECT),
+ HWCTX_REGINFO(0x902, 2, DIRECT),
+ HWCTX_REGINFO(0xa02, 10, DIRECT),
+ HWCTX_REGINFO(0xe04, 1, DIRECT),
+ HWCTX_REGINFO(0xe2a, 1, DIRECT),
+};
+
+/* the same context save command sequence is used for all contexts. */
+#define SAVE_BEGIN_V0_SIZE 5
+#define SAVE_DIRECT_V0_SIZE 3
+#define SAVE_INDIRECT_V0_SIZE 5
+#define SAVE_END_V0_SIZE 5
+#define SAVE_INCRS 3
+#define SAVE_THRESH_OFFSET 1
+#define RESTORE_BEGIN_SIZE 4
+#define RESTORE_DIRECT_SIZE 1
+#define RESTORE_INDIRECT_SIZE 2
+#define RESTORE_END_SIZE 1
+
+struct save_info {
+ u32 *ptr;
+ unsigned int save_count;
+ unsigned int restore_count;
+ unsigned int save_incrs;
+ unsigned int restore_incrs;
+};
+
+static u32 *setup_restore_regs_v0(u32 *ptr,
+ const struct hwctx_reginfo *regs,
+ unsigned int nr_regs)
+{
+ const struct hwctx_reginfo *rend = regs + nr_regs;
+
+ for ( ; regs != rend; ++regs) {
+ u32 offset = regs->offset;
+ u32 count = regs->count;
+ u32 indoff = offset + 1;
+ switch (regs->type) {
+ case HWCTX_REGINFO_DIRECT:
+ nvhost_3dctx_restore_direct(ptr, offset, count);
+ ptr += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_4X:
+ ++indoff;
+ /* fall through */
+ case HWCTX_REGINFO_INDIRECT:
+ nvhost_3dctx_restore_indirect(ptr,
+ offset, 0, indoff, count);
+ ptr += RESTORE_INDIRECT_SIZE;
+ break;
+ }
+ ptr += count;
+ }
+ return ptr;
+}
+
+static void setup_restore_v0(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+ nvhost_3dctx_restore_begin(h, ptr);
+ ptr += RESTORE_BEGIN_SIZE;
+
+ ptr = setup_restore_regs_v0(ptr,
+ ctxsave_regs_3d_global,
+ ARRAY_SIZE(ctxsave_regs_3d_global));
+
+ nvhost_3dctx_restore_end(h, ptr);
+
+ wmb();
+}
+
+/*** v0 saver ***/
+
+static void save_push_v0(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma)
+{
+ struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
+ struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx);
+
+ nvhost_cdma_push_gather(cdma,
+ nvhost_get_host(nctx->channel->dev)->memmgr,
+ p->save_buf,
+ 0,
+ nvhost_opcode_gather(p->save_size),
+ p->save_phys);
+}
+
+static void save_begin_v0(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+ /* 3d: when done, increment syncpt to base+1 */
+ ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ ptr[1] = nvhost_opcode_imm_incr_syncpt(
+ host1x_uclass_incr_syncpt_cond_op_done_v(),
+ h->syncpt); /* incr 1 */
+ /* host: wait for syncpt base+1 */
+ ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ host1x_uclass_wait_syncpt_base_r(), 1);
+ ptr[3] = nvhost_class_host_wait_syncpt_base(h->syncpt,
+ h->waitbase, 1);
+ /* host: signal context read thread to start reading */
+ ptr[4] = nvhost_opcode_imm_incr_syncpt(
+ host1x_uclass_incr_syncpt_cond_immediate_v(),
+ h->syncpt); /* incr 2 */
+}
+
+static void save_direct_v0(u32 *ptr, u32 start_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_nonincr(host1x_uclass_indoff_r(), 1);
+ ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ start_reg, true);
+ ptr[2] = nvhost_opcode_nonincr(host1x_uclass_inddata_r(), count);
+}
+
+static void save_indirect_v0(u32 *ptr, u32 offset_reg, u32 offset,
+ u32 data_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID,
+ offset_reg, 1);
+ ptr[1] = offset;
+ ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ host1x_uclass_indoff_r(), 1);
+ ptr[3] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ data_reg, false);
+ ptr[4] = nvhost_opcode_nonincr(host1x_uclass_inddata_r(), count);
+}
+
+static void save_end_v0(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+ /* Wait for context read service to finish (cpu incr 3) */
+ ptr[0] = nvhost_opcode_nonincr(host1x_uclass_wait_syncpt_base_r(), 1);
+ ptr[1] = nvhost_class_host_wait_syncpt_base(h->syncpt,
+ h->waitbase, h->save_incrs);
+ /* Advance syncpoint base */
+ ptr[2] = nvhost_opcode_nonincr(host1x_uclass_incr_syncpt_base_r(), 1);
+ ptr[3] = nvhost_class_host_incr_syncpt_base(h->waitbase,
+ h->save_incrs);
+ /* set class back to the unit */
+ ptr[4] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+}
+
+static u32 *save_regs_v0(u32 *ptr, unsigned int *pending,
+ struct nvhost_channel *ch,
+ const struct hwctx_reginfo *regs,
+ unsigned int nr_regs)
+{
+ const struct hwctx_reginfo *rend = regs + nr_regs;
+ int drain_result = 0;
+
+ for ( ; regs != rend; ++regs) {
+ u32 count = regs->count;
+ switch (regs->type) {
+ case HWCTX_REGINFO_DIRECT:
+ ptr += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT:
+ case HWCTX_REGINFO_INDIRECT_4X:
+ ptr += RESTORE_INDIRECT_SIZE;
+ break;
+ }
+ drain_result = nvhost_channel_drain_read_fifo(ch,
+ ptr, count, pending);
+ BUG_ON(drain_result < 0);
+ ptr += count;
+ }
+ return ptr;
+}
+
+/*** save ***/
+
+static void setup_save_regs(struct save_info *info,
+ const struct hwctx_reginfo *regs,
+ unsigned int nr_regs)
+{
+ const struct hwctx_reginfo *rend = regs + nr_regs;
+ u32 *ptr = info->ptr;
+ unsigned int save_count = info->save_count;
+ unsigned int restore_count = info->restore_count;
+
+ for ( ; regs != rend; ++regs) {
+ u32 offset = regs->offset;
+ u32 count = regs->count;
+ u32 indoff = offset + 1;
+ switch (regs->type) {
+ case HWCTX_REGINFO_DIRECT:
+ if (ptr) {
+ save_direct_v0(ptr, offset, count);
+ ptr += SAVE_DIRECT_V0_SIZE;
+ }
+ save_count += SAVE_DIRECT_V0_SIZE;
+ restore_count += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_4X:
+ ++indoff;
+ /* fall through */
+ case HWCTX_REGINFO_INDIRECT:
+ if (ptr) {
+ save_indirect_v0(ptr, offset, 0,
+ indoff, count);
+ ptr += SAVE_INDIRECT_V0_SIZE;
+ }
+ save_count += SAVE_INDIRECT_V0_SIZE;
+ restore_count += RESTORE_INDIRECT_SIZE;
+ break;
+ }
+ if (ptr) {
+ /* SAVE cases only: reserve room for incoming data */
+ u32 k = 0;
+ /*
+ * Create a signature pattern for indirect data (which
+ * will be overwritten by true incoming data) for
+ * better deducing where we are in a long command
+ * sequence, when given only a FIFO snapshot for debug
+ * purposes.
+ */
+ for (k = 0; k < count; k++)
+ *(ptr + k) = 0xd000d000 | (offset << 16) | k;
+ ptr += count;
+ }
+ save_count += count;
+ restore_count += count;
+ }
+
+ info->ptr = ptr;
+ info->save_count = save_count;
+ info->restore_count = restore_count;
+}
+
+static void setup_save(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+ struct save_info info = {
+ ptr,
+ SAVE_BEGIN_V0_SIZE,
+ RESTORE_BEGIN_SIZE,
+ SAVE_INCRS,
+ 1
+ };
+
+ if (info.ptr) {
+ save_begin_v0(h, info.ptr);
+ info.ptr += SAVE_BEGIN_V0_SIZE;
+ }
+
+ /* save regs */
+ setup_save_regs(&info,
+ ctxsave_regs_3d_global,
+ ARRAY_SIZE(ctxsave_regs_3d_global));
+
+ if (info.ptr) {
+ save_end_v0(h, info.ptr);
+ info.ptr += SAVE_END_V0_SIZE;
+ }
+
+ wmb();
+
+ h->save_size = info.save_count + SAVE_END_V0_SIZE;
+ h->restore_size = info.restore_count + RESTORE_END_SIZE;
+ h->save_incrs = info.save_incrs;
+ h->save_thresh = h->save_incrs - SAVE_THRESH_OFFSET;
+ h->restore_incrs = info.restore_incrs;
+}
+
+
+
+/*** ctx3d ***/
+
+static struct nvhost_hwctx *ctx3d_alloc_v0(struct nvhost_hwctx_handler *h,
+ struct nvhost_channel *ch)
+{
+ struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);
+ struct host1x_hwctx *ctx =
+ nvhost_3dctx_alloc_common(p, ch, true);
+ if (ctx) {
+ setup_restore_v0(p, ctx->restore_virt);
+ return &ctx->hwctx;
+ } else
+ return NULL;
+}
+
+static void ctx3d_save_service(struct nvhost_hwctx *nctx)
+{
+ struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
+
+ u32 *ptr = (u32 *)ctx->restore_virt + RESTORE_BEGIN_SIZE;
+ unsigned int pending = 0;
+
+ ptr = save_regs_v0(ptr, &pending, nctx->channel,
+ ctxsave_regs_3d_global,
+ ARRAY_SIZE(ctxsave_regs_3d_global));
+
+ wmb();
+ nvhost_syncpt_cpu_incr(&nvhost_get_host(nctx->channel->dev)->syncpt,
+ host1x_hwctx_handler(ctx)->syncpt);
+}
+
+struct nvhost_hwctx_handler *nvhost_gr3d_t20_ctxhandler_init(
+ u32 syncpt, u32 waitbase,
+ struct nvhost_channel *ch)
+{
+ struct mem_mgr *memmgr;
+ u32 *save_ptr;
+ struct host1x_hwctx_handler *p;
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return NULL;
+ memmgr = nvhost_get_host(ch->dev)->memmgr;
+
+ p->syncpt = syncpt;
+ p->waitbase = waitbase;
+
+ setup_save(p, NULL);
+
+ p->save_buf = mem_op().alloc(memmgr, p->save_size * sizeof(u32), 32,
+ mem_mgr_flag_write_combine);
+ if (IS_ERR_OR_NULL(p->save_buf)) {
+ p->save_buf = NULL;
+ return NULL;
+ }
+
+ p->save_slots = 1;
+
+ save_ptr = mem_op().mmap(p->save_buf);
+ if (!save_ptr) {
+ mem_op().put(memmgr, p->save_buf);
+ p->save_buf = NULL;
+ return NULL;
+ }
+
+ p->save_phys = mem_op().pin(memmgr, p->save_buf);
+
+ setup_save(p, save_ptr);
+
+ p->h.alloc = ctx3d_alloc_v0;
+ p->h.save_push = save_push_v0;
+ p->h.save_service = ctx3d_save_service;
+ p->h.get = nvhost_3dctx_get;
+ p->h.put = nvhost_3dctx_put;
+
+ return &p->h;
+}
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t20.h b/drivers/video/tegra/host/gr3d/gr3d_t20.h
new file mode 100644
index 000000000000..e6fb8fdf8aba
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/gr3d_t20.h
@@ -0,0 +1,33 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d_t20.h
+ *
+ * Tegra Graphics Host 3D for Tegra2
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_GR3D_GR3D_T20_H
+#define __NVHOST_GR3D_GR3D_T20_H
+
+#include <linux/types.h>
+
+struct nvhost_hwctx_handler;
+struct nvhost_channel;
+
+struct nvhost_hwctx_handler *nvhost_gr3d_t20_ctxhandler_init(
+ u32 syncpt, u32 waitbase,
+ struct nvhost_channel *ch);
+
+#endif
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t30.c b/drivers/video/tegra/host/gr3d/gr3d_t30.c
new file mode 100644
index 000000000000..664708c7fc80
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/gr3d_t30.c
@@ -0,0 +1,439 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d_t30.c
+ *
+ * Tegra Graphics Host 3D for Tegra3
+ *
+ * Copyright (c) 2011-2012 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvhost_hwctx.h"
+#include "nvhost_channel.h"
+#include "nvhost_cdma.h"
+#include "dev.h"
+#include "host1x/host1x01_hardware.h"
+#include "gr3d.h"
+#include "chip_support.h"
+#include "nvhost_memmgr.h"
+
+#include <mach/gpufuse.h>
+#include <mach/hardware.h>
+#include <linux/slab.h>
+
+static const struct hwctx_reginfo ctxsave_regs_3d_global[] = {
+ HWCTX_REGINFO(0xe00, 4, DIRECT),
+ HWCTX_REGINFO(0xe05, 30, DIRECT),
+ HWCTX_REGINFO(0xe25, 2, DIRECT),
+ HWCTX_REGINFO(0xe28, 2, DIRECT),
+ HWCTX_REGINFO(0xe30, 16, DIRECT),
+ HWCTX_REGINFO(0x001, 2, DIRECT),
+ HWCTX_REGINFO(0x00c, 10, DIRECT),
+ HWCTX_REGINFO(0x100, 34, DIRECT),
+ HWCTX_REGINFO(0x124, 2, DIRECT),
+ HWCTX_REGINFO(0x200, 5, DIRECT),
+ HWCTX_REGINFO(0x205, 1024, INDIRECT),
+ HWCTX_REGINFO(0x207, 1024, INDIRECT),
+ HWCTX_REGINFO(0x209, 1, DIRECT),
+ HWCTX_REGINFO(0x300, 64, DIRECT),
+ HWCTX_REGINFO(0x343, 25, DIRECT),
+ HWCTX_REGINFO(0x363, 2, DIRECT),
+ HWCTX_REGINFO(0x400, 16, DIRECT),
+ HWCTX_REGINFO(0x411, 1, DIRECT),
+ HWCTX_REGINFO(0x412, 1, DIRECT),
+ HWCTX_REGINFO(0x500, 4, DIRECT),
+ HWCTX_REGINFO(0x520, 32, DIRECT),
+ HWCTX_REGINFO(0x540, 64, INDIRECT),
+ HWCTX_REGINFO(0x600, 16, INDIRECT_4X),
+ HWCTX_REGINFO(0x603, 128, INDIRECT),
+ HWCTX_REGINFO(0x608, 4, DIRECT),
+ HWCTX_REGINFO(0x60e, 1, DIRECT),
+ HWCTX_REGINFO(0x700, 64, INDIRECT),
+ HWCTX_REGINFO(0x710, 50, DIRECT),
+ HWCTX_REGINFO(0x750, 16, DIRECT),
+ HWCTX_REGINFO(0x800, 16, INDIRECT_4X),
+ HWCTX_REGINFO(0x803, 512, INDIRECT),
+ HWCTX_REGINFO(0x805, 64, INDIRECT),
+ HWCTX_REGINFO(0x820, 32, DIRECT),
+ HWCTX_REGINFO(0x900, 64, INDIRECT),
+ HWCTX_REGINFO(0x902, 2, DIRECT),
+ HWCTX_REGINFO(0x90a, 1, DIRECT),
+ HWCTX_REGINFO(0xa02, 10, DIRECT),
+ HWCTX_REGINFO(0xb04, 1, DIRECT),
+ HWCTX_REGINFO(0xb06, 13, DIRECT),
+};
+
+static const struct hwctx_reginfo ctxsave_regs_3d_perset[] = {
+ HWCTX_REGINFO(0xe04, 1, DIRECT),
+ HWCTX_REGINFO(0xe2a, 1, DIRECT),
+ HWCTX_REGINFO(0x413, 1, DIRECT),
+ HWCTX_REGINFO(0x90b, 1, DIRECT),
+ HWCTX_REGINFO(0xe41, 1, DIRECT),
+};
+
+static unsigned int restore_set1_offset;
+
+#define SAVE_BEGIN_V1_SIZE (1 + RESTORE_BEGIN_SIZE)
+#define SAVE_DIRECT_V1_SIZE (4 + RESTORE_DIRECT_SIZE)
+#define SAVE_INDIRECT_V1_SIZE (6 + RESTORE_INDIRECT_SIZE)
+#define SAVE_END_V1_SIZE (9 + RESTORE_END_SIZE)
+#define SAVE_INCRS 3
+#define SAVE_THRESH_OFFSET 0
+#define RESTORE_BEGIN_SIZE 4
+#define RESTORE_DIRECT_SIZE 1
+#define RESTORE_INDIRECT_SIZE 2
+#define RESTORE_END_SIZE 1
+
+struct save_info {
+ u32 *ptr;
+ unsigned int save_count;
+ unsigned int restore_count;
+ unsigned int save_incrs;
+ unsigned int restore_incrs;
+};
+
+/*** v1 saver ***/
+
+static void save_push_v1(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma)
+{
+ struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
+ struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx);
+
+ /* wait for 3d idle */
+ nvhost_cdma_push(cdma,
+ nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
+ nvhost_opcode_imm_incr_syncpt(
+ host1x_uclass_incr_syncpt_cond_op_done_v(),
+ p->syncpt));
+ nvhost_cdma_push(cdma,
+ nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ host1x_uclass_wait_syncpt_base_r(), 1),
+ nvhost_class_host_wait_syncpt_base(p->syncpt,
+ p->waitbase, 1));
+ /* back to 3d */
+ nvhost_cdma_push(cdma,
+ nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
+ NVHOST_OPCODE_NOOP);
+
+ /* invalidate the FDC to prevent cache-coherency issues across GPUs
+ note that we assume FDC_CONTROL_0 is left in the reset state by all
+ contexts. the invalidate bit will clear itself, so the register
+ should be unchanged after this */
+ nvhost_cdma_push(cdma,
+ nvhost_opcode_imm(AR3D_FDC_CONTROL_0,
+ AR3D_FDC_CONTROL_0_RESET_VAL
+ | AR3D_FDC_CONTROL_0_INVALIDATE),
+ NVHOST_OPCODE_NOOP);
+
+ /* set register set 0 and 1 register read memory output addresses,
+ and send their reads to memory */
+
+ nvhost_cdma_push(cdma,
+ nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 2),
+ nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1));
+ nvhost_cdma_push(cdma,
+ nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1),
+ ctx->restore_phys + restore_set1_offset * 4);
+
+ nvhost_cdma_push(cdma,
+ nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 1),
+ nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1));
+ nvhost_cdma_push(cdma,
+ nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1),
+ ctx->restore_phys);
+ /* gather the save buffer */
+ nvhost_cdma_push_gather(cdma,
+ nvhost_get_host(nctx->channel->dev)->memmgr,
+ p->save_buf,
+ 0,
+ nvhost_opcode_gather(p->save_size),
+ p->save_phys);
+}
+
+static void save_begin_v1(struct host1x_hwctx_handler *p, u32 *ptr)
+{
+ ptr[0] = nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_DATA,
+ RESTORE_BEGIN_SIZE);
+ nvhost_3dctx_restore_begin(p, ptr + 1);
+ ptr += RESTORE_BEGIN_SIZE;
+}
+
+static void save_direct_v1(u32 *ptr, u32 start_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID,
+ AR3D_DW_MEMORY_OUTPUT_DATA, 1);
+ nvhost_3dctx_restore_direct(ptr + 1, start_reg, count);
+ ptr += RESTORE_DIRECT_SIZE;
+ ptr[1] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ host1x_uclass_indoff_r(), 1);
+ ptr[2] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ start_reg, true);
+ /* TODO could do this in the setclass if count < 6 */
+ ptr[3] = nvhost_opcode_nonincr(host1x_uclass_inddata_r(), count);
+}
+
+static void save_indirect_v1(u32 *ptr, u32 offset_reg, u32 offset,
+ u32 data_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ ptr[1] = nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_DATA,
+ RESTORE_INDIRECT_SIZE);
+ nvhost_3dctx_restore_indirect(ptr + 2, offset_reg, offset, data_reg,
+ count);
+ ptr += RESTORE_INDIRECT_SIZE;
+ ptr[2] = nvhost_opcode_imm(offset_reg, offset);
+ ptr[3] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ host1x_uclass_indoff_r(), 1);
+ ptr[4] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ data_reg, false);
+ ptr[5] = nvhost_opcode_nonincr(host1x_uclass_inddata_r(), count);
+}
+
+static void save_end_v1(struct host1x_hwctx_handler *p, u32 *ptr)
+{
+ /* write end of restore buffer */
+ ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID,
+ AR3D_DW_MEMORY_OUTPUT_DATA, 1);
+ nvhost_3dctx_restore_end(p, ptr + 1);
+ ptr += RESTORE_END_SIZE;
+ /* reset to dual reg if necessary */
+ ptr[1] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK,
+ (1 << 2) - 1);
+ /* op_done syncpt incr to flush FDC */
+ ptr[2] = nvhost_opcode_imm_incr_syncpt(
+ host1x_uclass_incr_syncpt_cond_op_done_v(), p->syncpt);
+ /* host wait for that syncpt incr, and advance the wait base */
+ ptr[3] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ host1x_uclass_wait_syncpt_base_r(),
+ nvhost_mask2(
+ host1x_uclass_wait_syncpt_base_r(),
+ host1x_uclass_incr_syncpt_base_r()));
+ ptr[4] = nvhost_class_host_wait_syncpt_base(p->syncpt,
+ p->waitbase, p->save_incrs - 1);
+ ptr[5] = nvhost_class_host_incr_syncpt_base(p->waitbase,
+ p->save_incrs);
+ /* set class back to 3d */
+ ptr[6] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ /* send reg reads back to host */
+ ptr[7] = nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 0);
+ /* final syncpt increment to release waiters */
+ ptr[8] = nvhost_opcode_imm(0, p->syncpt);
+}
+
+/*** save ***/
+
+
+
+static void setup_save_regs(struct save_info *info,
+ const struct hwctx_reginfo *regs,
+ unsigned int nr_regs)
+{
+ const struct hwctx_reginfo *rend = regs + nr_regs;
+ u32 *ptr = info->ptr;
+ unsigned int save_count = info->save_count;
+ unsigned int restore_count = info->restore_count;
+
+ for ( ; regs != rend; ++regs) {
+ u32 offset = regs->offset;
+ u32 count = regs->count;
+ u32 indoff = offset + 1;
+ switch (regs->type) {
+ case HWCTX_REGINFO_DIRECT:
+ if (ptr) {
+ save_direct_v1(ptr, offset, count);
+ ptr += SAVE_DIRECT_V1_SIZE;
+ }
+ save_count += SAVE_DIRECT_V1_SIZE;
+ restore_count += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_4X:
+ ++indoff;
+ /* fall through */
+ case HWCTX_REGINFO_INDIRECT:
+ if (ptr) {
+ save_indirect_v1(ptr, offset, 0,
+ indoff, count);
+ ptr += SAVE_INDIRECT_V1_SIZE;
+ }
+ save_count += SAVE_INDIRECT_V1_SIZE;
+ restore_count += RESTORE_INDIRECT_SIZE;
+ break;
+ }
+ if (ptr) {
+ /* SAVE cases only: reserve room for incoming data */
+ u32 k = 0;
+ /*
+ * Create a signature pattern for indirect data (which
+ * will be overwritten by true incoming data) for
+ * better deducing where we are in a long command
+ * sequence, when given only a FIFO snapshot for debug
+ * purposes.
+ */
+ for (k = 0; k < count; k++)
+ *(ptr + k) = 0xd000d000 | (offset << 16) | k;
+ ptr += count;
+ }
+ save_count += count;
+ restore_count += count;
+ }
+
+ info->ptr = ptr;
+ info->save_count = save_count;
+ info->restore_count = restore_count;
+}
+
+static void switch_gpu(struct save_info *info,
+ unsigned int save_src_set,
+ u32 save_dest_sets,
+ u32 restore_dest_sets)
+{
+ if (info->ptr) {
+ info->ptr[0] = nvhost_opcode_setclass(
+ NV_GRAPHICS_3D_CLASS_ID,
+ AR3D_DW_MEMORY_OUTPUT_DATA, 1);
+ info->ptr[1] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK,
+ restore_dest_sets);
+ info->ptr[2] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK,
+ save_dest_sets);
+ info->ptr[3] = nvhost_opcode_imm(AR3D_GSHIM_READ_SELECT,
+ save_src_set);
+ info->ptr += 4;
+ }
+ info->save_count += 4;
+ info->restore_count += 1;
+}
+
+static void setup_save(struct host1x_hwctx_handler *p, u32 *ptr)
+{
+ struct save_info info = {
+ ptr,
+ SAVE_BEGIN_V1_SIZE,
+ RESTORE_BEGIN_SIZE,
+ SAVE_INCRS,
+ 1
+ };
+ int save_end_size = SAVE_END_V1_SIZE;
+
+ if (info.ptr) {
+ save_begin_v1(p, info.ptr);
+ info.ptr += SAVE_BEGIN_V1_SIZE;
+ }
+
+ /* read from set0, write cmds through set0, restore to set0 and 1 */
+ switch_gpu(&info, 0, 1, 3);
+
+ /* save regs that are common to both sets */
+ setup_save_regs(&info,
+ ctxsave_regs_3d_global,
+ ARRAY_SIZE(ctxsave_regs_3d_global));
+
+ /* read from set 0, write cmds through set0, restore to set0 */
+ switch_gpu(&info, 0, 1, 1);
+
+ /* save set 0 specific regs */
+ setup_save_regs(&info,
+ ctxsave_regs_3d_perset,
+ ARRAY_SIZE(ctxsave_regs_3d_perset));
+
+
+ /* read from set1, write cmds through set1, restore to set1 */
+ switch_gpu(&info, 1, 2, 2);
+ /* note offset at which set 1 restore starts */
+ restore_set1_offset = info.restore_count;
+ /* save set 1 specific regs */
+ setup_save_regs(&info,
+ ctxsave_regs_3d_perset,
+ ARRAY_SIZE(ctxsave_regs_3d_perset));
+
+
+ /* read from set0, write cmds through set1, restore to set0 and 1 */
+ switch_gpu(&info, 0, 2, 3);
+
+ if (info.ptr) {
+ save_end_v1(p, info.ptr);
+ info.ptr += SAVE_END_V1_SIZE;
+ }
+
+ wmb();
+
+ p->save_size = info.save_count + save_end_size;
+ p->restore_size = info.restore_count + RESTORE_END_SIZE;
+ p->save_incrs = info.save_incrs;
+ p->save_thresh = p->save_incrs - SAVE_THRESH_OFFSET;
+ p->restore_incrs = info.restore_incrs;
+}
+
+
+/*** ctx3d ***/
+
+static struct nvhost_hwctx *ctx3d_alloc_v1(struct nvhost_hwctx_handler *h,
+ struct nvhost_channel *ch)
+{
+ struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);
+ struct host1x_hwctx *ctx = nvhost_3dctx_alloc_common(p, ch, false);
+
+ if (ctx)
+ return &ctx->hwctx;
+ else
+ return NULL;
+}
+
+struct nvhost_hwctx_handler *nvhost_gr3d_t30_ctxhandler_init(
+ u32 syncpt, u32 waitbase,
+ struct nvhost_channel *ch)
+{
+ struct mem_mgr *memmgr;
+ u32 *save_ptr;
+ struct host1x_hwctx_handler *p;
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ memmgr = nvhost_get_host(ch->dev)->memmgr;
+
+ p->syncpt = syncpt;
+ p->waitbase = waitbase;
+
+ setup_save(p, NULL);
+
+ p->save_buf = mem_op().alloc(memmgr, p->save_size * 4, 32,
+ mem_mgr_flag_write_combine);
+ if (IS_ERR_OR_NULL(p->save_buf)) {
+ p->save_buf = NULL;
+ return NULL;
+ }
+
+ p->save_slots = 8;
+
+ save_ptr = mem_op().mmap(p->save_buf);
+ if (!save_ptr) {
+ mem_op().put(memmgr, p->save_buf);
+ p->save_buf = NULL;
+ return NULL;
+ }
+
+ p->save_phys = mem_op().pin(memmgr, p->save_buf);
+
+ setup_save(p, save_ptr);
+
+ mem_op().munmap(p->save_buf, save_ptr);
+
+ p->h.alloc = ctx3d_alloc_v1;
+ p->h.save_push = save_push_v1;
+ p->h.save_service = NULL;
+ p->h.get = nvhost_3dctx_get;
+ p->h.put = nvhost_3dctx_put;
+
+ return &p->h;
+}
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t30.h b/drivers/video/tegra/host/gr3d/gr3d_t30.h
new file mode 100644
index 000000000000..94d5dc0f353b
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/gr3d_t30.h
@@ -0,0 +1,33 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d_t30.h
+ *
+ * Tegra Graphics Host 3D for Tegra3
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_GR3D_GR3D_T30_H
+#define __NVHOST_GR3D_GR3D_T30_H
+
+#include <linux/types.h>
+
+struct nvhost_hwctx_handler;
+struct nvhost_channel;
+
+struct nvhost_hwctx_handler *nvhost_gr3d_t30_ctxhandler_init(
+ u32 syncpt, u32 waitbase,
+ struct nvhost_channel *ch);
+
+#endif
diff --git a/drivers/video/tegra/host/gr3d/scale3d.c b/drivers/video/tegra/host/gr3d/scale3d.c
new file mode 100644
index 000000000000..49147975a9e4
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/scale3d.c
@@ -0,0 +1,941 @@
+/*
+ * drivers/video/tegra/host/gr3d/scale3d.c
+ *
+ * Tegra Graphics Host 3D clock scaling
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * 3d clock scaling
+ *
+ * module3d_notify_busy() is called upon submit, module3d_notify_idle() is
+ * called when all outstanding submits are completed. Idle times are measured
+ * over a fixed time period (scale3d.p_estimation_window). If the 3d module
+ * idle time percentage goes over the limit (set in scale3d.p_idle_max), 3d
+ * clocks are scaled down. If the percentage goes under the minimum limit (set
+ * in scale3d.p_idle_min), 3d clocks are scaled up. An additional test is made
+ * for clocking up quickly in response to load peaks.
+ *
+ * 3d.emc clock is scaled proportionately to 3d clock, with a quadratic-
+ * bezier-like factor added to pull 3d.emc rate a bit lower.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <mach/clk.h>
+#include <mach/hardware.h>
+#include "scale3d.h"
+#include "dev.h"
+#include <media/tegra_camera.h>
+
+#define GR3D_PRINT_STATS BIT(1)
+#define GR3D_PRINT_BUSY BIT(2)
+#define GR3D_PRINT_IDLE BIT(3)
+#define GR3D_PRINT_HINT BIT(4)
+#define GR3D_PRINT_TARGET BIT(5)
+
+/* time frame for load and hint tracking - when events come in at a larger
+ * interval, this probably indicates the current estimates are stale
+ */
+#define GR3D_TIMEFRAME 1000000 /* 1 sec */
+
+/* the number of frames to use in the running average of load estimates and
+ * throughput hints. Choosing 6 frames targets a window of about 100 msec.
+ * Large flucutuations in frame times require a window that's large enough to
+ * prevent spiky scaling behavior, which in turn exacerbates frame rate
+ * instability.
+ */
+#define GR3D_FRAME_SPAN 6
+
+static int scale3d_is_enabled(void);
+static void scale3d_enable(int enable);
+
+#define POW2(x) ((x) * (x))
+
+/*
+ * 3D clock scaling should be treated differently when camera is on in AP37.
+ * 3D in AP37 requires 1.3V and combining it with MPE reaches to EDP limit.
+ * 3D clock really needs to be set to lower frequency which requires 1.0V.
+ * The same thing applies to 3D EMC clock.
+ */
+#define CAMERA_3D_CLK 300000000
+#define CAMERA_3D_EMC_CLK 437000000
+
+/*
+ * debugfs parameters to control 3d clock scaling test
+ *
+ * estimation_window - time period for clock rate evaluation
+ * idle_min - if less than [idle_min / 10] percent idle over
+ * [estimation_window] microseconds, clock up.
+ * idle_max - if over [idle_max] percent idle over [estimation_window]
+ * microseconds, clock down.
+ * max_scale - limits rate changes to no less than (100 - max_scale)% or
+ * (100 + 2 * max_scale)% of current clock rate
+ * verbosity - bit flag to control debug printouts:
+ * 1 - stats
+ * 2 - busy
+ * 3 - idle
+ * 4 - hints
+ * 5 - target frequencies
+ */
+
+struct scale3d_info_rec {
+ struct mutex lock; /* lock for timestamps etc */
+ int enable;
+ int init;
+ ktime_t last_scale;
+ int is_idle;
+ ktime_t last_adjust;
+ int fast_up_count;
+ int slow_down_count;
+ int is_scaled;
+ long emc_slope;
+ long emc_offset;
+ long emc_dip_slope;
+ long emc_dip_offset;
+ long emc_xmid;
+ unsigned long max_rate_3d;
+ unsigned long min_rate_3d;
+ ktime_t last_throughput_hint;
+
+ struct work_struct work;
+ struct delayed_work idle_timer;
+
+ ktime_t last_estimation_window;
+ long last_total_idle;
+ long total_idle;
+ ktime_t estimation_window;
+ ktime_t last_notification;
+ long idle_estimate;
+
+ unsigned int scale;
+ unsigned int p_busy_cutoff;
+ unsigned int p_estimation_window;
+ unsigned int p_use_throughput_hint;
+ unsigned int p_throughput_lo_limit;
+ unsigned int p_throughput_lower_limit;
+ unsigned int p_throughput_hi_limit;
+ unsigned int p_scale_step;
+ unsigned int p_idle_min;
+ unsigned int idle_min;
+ unsigned int p_idle_max;
+ unsigned int idle_max;
+ unsigned int p_adjust;
+ unsigned int p_scale_emc;
+ unsigned int p_emc_dip;
+ unsigned int p_verbosity;
+ struct clk *clk_3d;
+ struct clk *clk_3d2;
+ struct clk *clk_3d_emc;
+ int *freqlist;
+ int freq_count;
+};
+
+static struct scale3d_info_rec scale3d;
+
+static void scale_to_freq(unsigned long hz)
+{
+ unsigned long curr;
+
+ if (!tegra_is_clk_enabled(scale3d.clk_3d))
+ return;
+
+ if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3)
+ if (!tegra_is_clk_enabled(scale3d.clk_3d2))
+ return;
+
+ curr = clk_get_rate(scale3d.clk_3d);
+ if (hz == curr)
+ return;
+
+ if (!(hz >= scale3d.max_rate_3d && curr == scale3d.max_rate_3d)) {
+ if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3)
+ clk_set_rate(scale3d.clk_3d2, 0);
+ clk_set_rate(scale3d.clk_3d, hz);
+
+ if (scale3d.p_scale_emc) {
+ long after = (long) clk_get_rate(scale3d.clk_3d);
+ hz = after * scale3d.emc_slope + scale3d.emc_offset;
+ if (scale3d.p_emc_dip)
+ hz -=
+ (scale3d.emc_dip_slope *
+ POW2(after / 1000 - scale3d.emc_xmid) +
+ scale3d.emc_dip_offset);
+ clk_set_rate(scale3d.clk_3d_emc, hz);
+ }
+ }
+}
+
+static void scale3d_clocks(unsigned long percent)
+{
+ unsigned long hz, curr;
+
+ curr = clk_get_rate(scale3d.clk_3d);
+ hz = percent * (curr / 100);
+
+ scale_to_freq(hz);
+}
+
+static void scale3d_clocks_handler(struct work_struct *work)
+{
+ unsigned int scale;
+
+ mutex_lock(&scale3d.lock);
+ scale = scale3d.scale;
+ mutex_unlock(&scale3d.lock);
+
+ if (scale != 0)
+ scale3d_clocks(scale);
+}
+
+void nvhost_scale3d_suspend(struct nvhost_device *dev)
+{
+ if (!scale3d.enable)
+ return;
+
+ cancel_work_sync(&scale3d.work);
+ cancel_delayed_work(&scale3d.idle_timer);
+}
+
+/* set 3d clocks to max */
+static void reset_3d_clocks(void)
+{
+ if (clk_get_rate(scale3d.clk_3d) != scale3d.max_rate_3d) {
+ if (is_tegra_camera_on())
+ clk_set_rate(scale3d.clk_3d, CAMERA_3D_CLK);
+ else
+ clk_set_rate(scale3d.clk_3d, scale3d.max_rate_3d);
+ if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) {
+ if (is_tegra_camera_on())
+ clk_set_rate(scale3d.clk_3d2, CAMERA_3D_CLK);
+ else
+ clk_set_rate(scale3d.clk_3d2,
+ scale3d.max_rate_3d);
+ }
+ if (scale3d.p_scale_emc) {
+ if (is_tegra_camera_on())
+ clk_set_rate(scale3d.clk_3d_emc,
+ CAMERA_3D_EMC_CLK);
+ else
+ clk_set_rate(scale3d.clk_3d_emc,
+ clk_round_rate(scale3d.clk_3d_emc,
+ UINT_MAX));
+ }
+ }
+}
+
+static int scale3d_is_enabled(void)
+{
+ int enable;
+
+ if (!scale3d.enable)
+ return 0;
+
+ mutex_lock(&scale3d.lock);
+ enable = scale3d.enable;
+ mutex_unlock(&scale3d.lock);
+
+ return enable;
+}
+
+static void scale3d_enable(int enable)
+{
+ int disable = 0;
+
+ mutex_lock(&scale3d.lock);
+
+ if (enable) {
+ if (scale3d.max_rate_3d != scale3d.min_rate_3d)
+ scale3d.enable = 1;
+ } else {
+ scale3d.enable = 0;
+ disable = 1;
+ }
+
+ mutex_unlock(&scale3d.lock);
+
+ if (disable)
+ reset_3d_clocks();
+}
+
+/* scaling_adjust - use scale up / scale down hint counts to adjust scaling
+ * parameters.
+ *
+ * hint_ratio is 100 x the ratio of scale up to scale down hints. Three cases
+ * are distinguished:
+ *
+ * hint_ratio < HINT_RATIO_MIN - set parameters to maximize scaling effect
+ * hint_ratio > HINT_RATIO_MAX - set parameters to minimize scaling effect
+ * hint_ratio between limits - scale parameters linearly
+ *
+ * the parameters adjusted are
+ *
+ * * idle_min percentage
+ * * idle_max percentage
+ */
+#define SCALING_ADJUST_PERIOD 1000000
+#define HINT_RATIO_MAX 400
+#define HINT_RATIO_MIN 100
+#define HINT_RATIO_MID ((HINT_RATIO_MAX + HINT_RATIO_MIN) / 2)
+#define HINT_RATIO_DIFF (HINT_RATIO_MAX - HINT_RATIO_MIN)
+
+static void scaling_adjust(ktime_t time)
+{
+ long hint_ratio;
+ int idle_min_adjustment;
+ int idle_max_adjustment;
+ unsigned long dt;
+
+ dt = (unsigned long) ktime_us_delta(time, scale3d.last_adjust);
+ if (dt < SCALING_ADJUST_PERIOD)
+ return;
+
+ hint_ratio = (100 * (scale3d.fast_up_count + 1)) /
+ (scale3d.slow_down_count + 1);
+
+ if (hint_ratio > HINT_RATIO_MAX) {
+ idle_min_adjustment = scale3d.p_idle_min;
+ idle_max_adjustment = scale3d.p_idle_max;
+ } else if (hint_ratio < HINT_RATIO_MIN) {
+ idle_min_adjustment = -((int) scale3d.p_idle_min) / 2;
+ idle_max_adjustment = -((int) scale3d.p_idle_max) / 2;
+ } else {
+ int diff;
+ int factor;
+
+ diff = HINT_RATIO_MID - hint_ratio;
+ if (diff < 0)
+ factor = -diff * 2;
+ else {
+ factor = -diff;
+ diff *= 2;
+ }
+
+ idle_min_adjustment =
+ (factor * (int) scale3d.p_idle_min) / HINT_RATIO_DIFF;
+ idle_max_adjustment =
+ (factor * (int) scale3d.p_idle_max) / HINT_RATIO_DIFF;
+ }
+
+ scale3d.idle_min = scale3d.p_idle_min + idle_min_adjustment;
+ scale3d.idle_max = scale3d.p_idle_max + idle_max_adjustment;
+
+ if (scale3d.p_verbosity & GR3D_PRINT_STATS)
+ pr_info("scale3d stats: + %d - %d min %u max %u\n",
+ scale3d.fast_up_count, scale3d.slow_down_count,
+ scale3d.idle_min, scale3d.idle_max);
+
+ scale3d.fast_up_count = 0;
+ scale3d.slow_down_count = 0;
+ scale3d.last_adjust = time;
+}
+
+#undef SCALING_ADJUST_PERIOD
+#undef HINT_RATIO_MAX
+#undef HINT_RATIO_MIN
+#undef HINT_RATIO_MID
+#undef HINT_RATIO_DIFF
+
+static void scaling_state_check(ktime_t time)
+{
+ unsigned long dt;
+
+ /* adjustment: set scale parameters (idle_min, idle_max) +/- 25%
+ * based on ratio of scale up to scale down hints
+ */
+ if (scale3d.p_adjust)
+ scaling_adjust(time);
+ else {
+ scale3d.idle_min = scale3d.p_idle_min;
+ scale3d.idle_max = scale3d.p_idle_max;
+ }
+
+ dt = (unsigned long) ktime_us_delta(time, scale3d.last_scale);
+ if (dt < scale3d.p_estimation_window)
+ return;
+
+ scale3d.last_scale = time;
+
+ /* if too busy, scale up */
+ if (scale3d.idle_estimate < scale3d.idle_min) {
+ scale3d.is_scaled = 0;
+ scale3d.fast_up_count++;
+ if (scale3d.p_verbosity & GR3D_PRINT_BUSY)
+ pr_info("scale3d: %ld/1000 busy\n",
+ 1000 - scale3d.idle_estimate);
+
+ reset_3d_clocks();
+ return;
+ }
+
+ if (scale3d.p_verbosity & GR3D_PRINT_IDLE)
+ pr_info("scale3d: idle %lu/1000\n",
+ scale3d.idle_estimate);
+
+ if (scale3d.idle_estimate > scale3d.idle_max) {
+ if (!scale3d.is_scaled)
+ scale3d.is_scaled = 1;
+
+ scale3d.slow_down_count++;
+ /* if idle time is high, clock down */
+ scale3d.scale =
+ 100 - (scale3d.idle_estimate - scale3d.idle_min) / 10;
+ schedule_work(&scale3d.work);
+ }
+}
+
+/* the idle estimate is done by keeping 2 time stamps, initially set to the
+ * same time. Once the estimation_window time has been exceeded, one time
+ * stamp is moved up to the current time. The idle estimate is calculated
+ * based on the idle time percentage from the earlier estimate. The next time
+ * an estimation_window time is exceeded, the previous idle time and estimates
+ * are moved up - this is intended to prevent abrupt changes to the idle
+ * estimate.
+ */
+static void update_load_estimate(int idle)
+{
+ unsigned long window;
+ unsigned long t;
+
+ ktime_t now = ktime_get();
+ t = ktime_us_delta(now, scale3d.last_notification);
+
+ /* if the last event was over GR3D_TIMEFRAME usec ago (1 sec), the
+ * current load tracking data is probably stale
+ */
+ if (t > GR3D_TIMEFRAME) {
+ scale3d.is_idle = idle;
+ scale3d.last_notification = now;
+ scale3d.estimation_window = now;
+ scale3d.last_estimation_window = now;
+ scale3d.total_idle = 0;
+ scale3d.last_total_idle = 0;
+ scale3d.idle_estimate = idle ? 1000 : 0;
+ return;
+ }
+
+ if (scale3d.is_idle) {
+ scale3d.total_idle += t;
+ scale3d.last_total_idle += t;
+ }
+
+ scale3d.is_idle = idle;
+ scale3d.last_notification = now;
+
+ window = ktime_us_delta(now, scale3d.last_estimation_window);
+ /* prevent division by 0 if events come in less than 1 usec apart */
+ if (window > 0)
+ scale3d.idle_estimate =
+ (1000 * scale3d.last_total_idle) / window;
+
+ /* move up to the last estimation window */
+ if (ktime_us_delta(now, scale3d.estimation_window) >
+ scale3d.p_estimation_window) {
+ scale3d.last_estimation_window = scale3d.estimation_window;
+ scale3d.last_total_idle = scale3d.total_idle;
+ scale3d.total_idle = 0;
+ scale3d.estimation_window = now;
+ }
+}
+
+void nvhost_scale3d_notify_idle(struct nvhost_device *dev)
+{
+ ktime_t t;
+ unsigned long dt;
+ int delay;
+
+ if (!scale3d.enable)
+ return;
+
+ update_load_estimate(1);
+
+ t = ktime_get();
+
+ /* if throughput hint enabled, and last hint is recent enough, return */
+ if (scale3d.p_use_throughput_hint) {
+ dt = ktime_us_delta(t, scale3d.last_throughput_hint);
+ if (dt < GR3D_TIMEFRAME)
+ return;
+ }
+
+ mutex_lock(&scale3d.lock);
+
+ scaling_state_check(t);
+
+ /* delay idle_max % of 2 * estimation_window (given in microseconds) */
+ delay = (scale3d.idle_max * scale3d.p_estimation_window) / 500000;
+ schedule_delayed_work(&scale3d.idle_timer, msecs_to_jiffies(delay));
+
+ mutex_unlock(&scale3d.lock);
+}
+
+void nvhost_scale3d_notify_busy(struct nvhost_device *dev)
+{
+ ktime_t t;
+
+ if (!scale3d.enable)
+ return;
+
+ update_load_estimate(0);
+
+ t = ktime_get();
+
+ /* if throughput hint enabled, and last hint is recent enough, return */
+ if (scale3d.p_use_throughput_hint) {
+ unsigned long dt;
+ dt = ktime_us_delta(t, scale3d.last_throughput_hint);
+ if (dt < GR3D_TIMEFRAME)
+ return;
+ }
+
+ mutex_lock(&scale3d.lock);
+
+ cancel_delayed_work(&scale3d.idle_timer);
+ scaling_state_check(t);
+
+ mutex_unlock(&scale3d.lock);
+}
+
+struct score {
+ int size; /* number of elements */
+ int pos; /* position in ring buffer */
+ int count; /* actual item count */
+ unsigned int sum; /* running sum */
+ unsigned int prev; /* previous score after 'reset' operation */
+ unsigned int list[]; /* ring buffer */
+};
+
+static struct score *score_init(int capacity)
+{
+ struct score *s;
+
+ s = kzalloc(sizeof(struct score) + capacity * sizeof(int), GFP_KERNEL);
+ if (s == NULL)
+ return NULL;
+
+ s->size = capacity;
+
+ return s;
+}
+
+static void score_delete(struct score *s)
+{
+ kfree(s);
+}
+
+#define score_get_average(s) ((s)->count ? (s)->sum / (s)->count : 0)
+
+static void score_add(struct score *s, unsigned int reading)
+{
+ if (s->count < s->size) {
+ s->sum += reading;
+ s->count++;
+ } else
+ s->sum = s->sum - s->list[s->pos] + reading;
+
+ s->list[s->pos] = reading;
+ s->pos = (s->pos + 1) % s->size;
+}
+
+
+static unsigned int score_reset(struct score *s)
+{
+ s->prev = s->sum;
+
+ s->count = 0;
+ s->pos = 0;
+ s->sum = 0;
+
+ return s->prev;
+}
+
+int freqlist_up(long target, int steps)
+{
+ int i, pos;
+
+ for (i = 0; i < scale3d.freq_count; i++)
+ if (scale3d.freqlist[i] >= target)
+ break;
+
+ pos = min(scale3d.freq_count - 1, i + steps);
+ return scale3d.freqlist[pos];
+}
+
+int freqlist_down(long target, int steps)
+{
+ int i, pos;
+
+ for (i = scale3d.freq_count - 1; i >= 0; i--)
+ if (scale3d.freqlist[i] <= target)
+ break;
+
+ pos = max(0, i - steps);
+ return scale3d.freqlist[pos];
+}
+
+static struct score *busy_history;
+static struct score *hint_history;
+
+/* When a throughput hint is given, perform scaling based on the hint and on
+ * the current idle estimation. This is done as follows:
+ *
+ * 1. On moderate loads force min frequency if the throughput hint is not too
+ * low.
+ * 2. Otherwise, calculate target-rate = max-rate * load-percentage
+ * 3. Unless the current or average throughput hint is below the minimum
+ * limit, in which case, choose a higher rate
+ * 4. Or the average throughput hint is above the maximum limit, in which case,
+ * choose a lower rate.
+ */
+void nvhost_scale3d_set_throughput_hint(int hint)
+{
+ ktime_t now;
+ long busy;
+ long curr;
+ long target;
+ long dt;
+ int avg_busy, avg_hint;
+
+ if (!scale3d.enable)
+ return;
+
+ if (!scale3d.p_use_throughput_hint)
+ return;
+
+ if (scale3d.p_verbosity & GR3D_PRINT_HINT)
+ pr_info("3fds: idle %ld, hint %d\n",
+ scale3d.idle_estimate, hint);
+
+ now = ktime_get();
+ dt = ktime_us_delta(now, scale3d.last_throughput_hint);
+ if (dt > GR3D_TIMEFRAME) {
+ score_reset(busy_history);
+ score_reset(hint_history);
+ }
+
+ scale3d.last_throughput_hint = now;
+
+ busy = 1000 - scale3d.idle_estimate;
+ curr = clk_get_rate(scale3d.clk_3d);
+ target = scale3d.min_rate_3d;
+
+ score_add(busy_history, busy);
+ score_add(hint_history, hint);
+
+ avg_busy = score_get_average(busy_history);
+ avg_hint = score_get_average(hint_history);
+
+ if (busy > 0)
+ target = (curr / 1000) * busy;
+
+ /* In practice, running the gpu at min frequency is typically
+ * sufficient to keep up performance at loads up to 70% on cases,
+ * but the average hint value is tested to keep performance up if
+ * needed.
+ */
+ if (avg_busy <= scale3d.p_busy_cutoff &&
+ avg_hint >= scale3d.p_throughput_lower_limit)
+ target = scale3d.min_rate_3d;
+ else {
+ target = (scale3d.max_rate_3d / 1000) * avg_busy;
+
+ /* Scale up if either the current hint or the running average
+ * are below the target to prevent performance drop.
+ */
+ if (hint <= scale3d.p_throughput_lo_limit ||
+ avg_hint <= scale3d.p_throughput_lo_limit) {
+ if (target < curr)
+ target = curr;
+ target = freqlist_up(target, scale3d.p_scale_step);
+ } else if (avg_hint >= scale3d.p_throughput_hi_limit) {
+ if (target > curr)
+ target = curr;
+ target = freqlist_down(target, scale3d.p_scale_step);
+ }
+ }
+
+ scale_to_freq(target);
+
+ if (scale3d.p_verbosity & GR3D_PRINT_TARGET)
+ pr_info("3dfs: busy %ld <%d>, curr %ld, t %ld, hint %d <%d>\n",
+ busy, avg_busy, curr / 1000000, target, hint, avg_hint);
+}
+EXPORT_SYMBOL(nvhost_scale3d_set_throughput_hint);
+
+static void scale3d_idle_handler(struct work_struct *work)
+{
+ int notify_idle = 0;
+
+ if (!scale3d.enable)
+ return;
+
+ mutex_lock(&scale3d.lock);
+
+ if (scale3d.is_idle && tegra_is_clk_enabled(scale3d.clk_3d)) {
+ unsigned long curr = clk_get_rate(scale3d.clk_3d);
+ if (curr > scale3d.min_rate_3d)
+ notify_idle = 1;
+ }
+
+ mutex_unlock(&scale3d.lock);
+
+ if (notify_idle)
+ nvhost_scale3d_notify_idle(NULL);
+}
+
+/*
+ * debugfs parameters to control 3d clock scaling
+ */
+
+void nvhost_scale3d_debug_init(struct dentry *de)
+{
+ struct dentry *d, *f;
+
+ d = debugfs_create_dir("scaling", de);
+ if (!d) {
+ pr_err("scale3d: can\'t create debugfs directory\n");
+ return;
+ }
+
+#define CREATE_SCALE3D_FILE(fname) \
+ do {\
+ f = debugfs_create_u32(#fname, S_IRUGO | S_IWUSR, d,\
+ &scale3d.p_##fname);\
+ if (NULL == f) {\
+ pr_err("scale3d: can\'t create file " #fname "\n");\
+ return;\
+ } \
+ } while (0)
+
+ CREATE_SCALE3D_FILE(estimation_window);
+ CREATE_SCALE3D_FILE(idle_min);
+ CREATE_SCALE3D_FILE(idle_max);
+ CREATE_SCALE3D_FILE(adjust);
+ CREATE_SCALE3D_FILE(scale_emc);
+ CREATE_SCALE3D_FILE(emc_dip);
+ CREATE_SCALE3D_FILE(use_throughput_hint);
+ CREATE_SCALE3D_FILE(throughput_hi_limit);
+ CREATE_SCALE3D_FILE(throughput_lo_limit);
+ CREATE_SCALE3D_FILE(throughput_lower_limit);
+ CREATE_SCALE3D_FILE(scale_step);
+ CREATE_SCALE3D_FILE(verbosity);
+#undef CREATE_SCALE3D_FILE
+}
+
+static ssize_t enable_3d_scaling_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t res;
+
+ res = snprintf(buf, PAGE_SIZE, "%d\n", scale3d_is_enabled());
+
+ return res;
+}
+
+static ssize_t enable_3d_scaling_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long val = 0;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ scale3d_enable(val);
+
+ return count;
+}
+
+static DEVICE_ATTR(enable_3d_scaling, S_IRUGO | S_IWUSR,
+ enable_3d_scaling_show, enable_3d_scaling_store);
+
+#define MAX_FREQ_COUNT 0x40 /* 64 frequencies should be enough for anyone */
+
+void nvhost_scale3d_init(struct nvhost_device *d)
+{
+ if (!scale3d.init) {
+ int error;
+ unsigned long max_emc, min_emc;
+ long correction;
+ long rate;
+ int freqs[MAX_FREQ_COUNT];
+
+ mutex_init(&scale3d.lock);
+
+ INIT_WORK(&scale3d.work, scale3d_clocks_handler);
+ INIT_DELAYED_WORK(&scale3d.idle_timer, scale3d_idle_handler);
+
+ scale3d.clk_3d = d->clk[0];
+ if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) {
+ scale3d.clk_3d2 = d->clk[1];
+ scale3d.clk_3d_emc = d->clk[2];
+ } else
+ scale3d.clk_3d_emc = d->clk[1];
+
+ scale3d.max_rate_3d = clk_round_rate(scale3d.clk_3d, UINT_MAX);
+ scale3d.min_rate_3d = clk_round_rate(scale3d.clk_3d, 0);
+
+ if (scale3d.max_rate_3d == scale3d.min_rate_3d) {
+ pr_warn("scale3d: 3d max rate = min rate (%lu), "
+ "disabling\n", scale3d.max_rate_3d);
+ scale3d.enable = 0;
+ return;
+ }
+
+ /* emc scaling:
+ *
+ * Remc = S * R3d + O - (Sd * (R3d - Rm)^2 + Od)
+ *
+ * Remc - 3d.emc rate
+ * R3d - 3d.cbus rate
+ * Rm - 3d.cbus 'middle' rate = (max + min)/2
+ * S - emc_slope
+ * O - emc_offset
+ * Sd - emc_dip_slope
+ * Od - emc_dip_offset
+ *
+ * this superposes a quadratic dip centered around the middle 3d
+ * frequency over a linear correlation of 3d.emc to 3d clock
+ * rates.
+ *
+ * S, O are chosen so that the maximum 3d rate produces the
+ * maximum 3d.emc rate exactly, and the minimum 3d rate produces
+ * at least the minimum 3d.emc rate.
+ *
+ * Sd and Od are chosen to produce the largest dip that will
+ * keep 3d.emc frequencies monotonously decreasing with 3d
+ * frequencies. To achieve this, the first derivative of Remc
+ * with respect to R3d should be zero for the minimal 3d rate:
+ *
+ * R'emc = S - 2 * Sd * (R3d - Rm)
+ * R'emc(R3d-min) = 0
+ * S = 2 * Sd * (R3d-min - Rm)
+ * = 2 * Sd * (R3d-min - R3d-max) / 2
+ * Sd = S / (R3d-min - R3d-max)
+ *
+ * +---------------------------------------------------+
+ * | Sd = -(emc-max - emc-min) / (R3d-min - R3d-max)^2 |
+ * +---------------------------------------------------+
+ *
+ * dip = Sd * (R3d - Rm)^2 + Od
+ *
+ * requiring dip(R3d-min) = 0 and dip(R3d-max) = 0 gives
+ *
+ * Sd * (R3d-min - Rm)^2 + Od = 0
+ * Od = -Sd * ((R3d-min - R3d-max) / 2)^2
+ * = -Sd * ((R3d-min - R3d-max)^2) / 4
+ *
+ * +------------------------------+
+ * | Od = (emc-max - emc-min) / 4 |
+ * +------------------------------+
+ */
+
+ max_emc = clk_round_rate(scale3d.clk_3d_emc, UINT_MAX);
+ min_emc = clk_round_rate(scale3d.clk_3d_emc, 0);
+
+ scale3d.emc_slope = (max_emc - min_emc) /
+ (scale3d.max_rate_3d - scale3d.min_rate_3d);
+ scale3d.emc_offset = max_emc -
+ scale3d.emc_slope * scale3d.max_rate_3d;
+ /* guarantee max 3d rate maps to max emc rate */
+ scale3d.emc_offset += max_emc -
+ (scale3d.emc_slope * scale3d.max_rate_3d +
+ scale3d.emc_offset);
+
+ scale3d.emc_dip_offset = (max_emc - min_emc) / 4;
+ scale3d.emc_dip_slope =
+ -4 * (scale3d.emc_dip_offset /
+ (POW2(scale3d.max_rate_3d - scale3d.min_rate_3d)));
+ scale3d.emc_xmid =
+ (scale3d.max_rate_3d + scale3d.min_rate_3d) / 2;
+ correction =
+ scale3d.emc_dip_offset +
+ scale3d.emc_dip_slope *
+ POW2(scale3d.max_rate_3d - scale3d.emc_xmid);
+ scale3d.emc_dip_offset -= correction;
+
+ scale3d.is_idle = 1;
+
+ /* set scaling parameter defaults */
+ scale3d.enable = 1;
+ scale3d.idle_min = scale3d.p_idle_min = 100;
+ scale3d.idle_max = scale3d.p_idle_max = 150;
+ scale3d.p_scale_emc = 1;
+ scale3d.p_emc_dip = 1;
+ scale3d.p_verbosity = 0;
+ scale3d.p_adjust = 1;
+ scale3d.p_use_throughput_hint = 1;
+ scale3d.p_throughput_lower_limit = 940;
+ scale3d.p_throughput_lo_limit = 990;
+ scale3d.p_throughput_hi_limit = 1010;
+ scale3d.p_scale_step = 1;
+ scale3d.p_estimation_window = 8000;
+ scale3d.p_busy_cutoff = 750;
+
+ error = device_create_file(&d->dev,
+ &dev_attr_enable_3d_scaling);
+ if (error)
+ dev_err(&d->dev, "failed to create sysfs attributes");
+
+ rate = 0;
+ scale3d.freq_count = 0;
+ while (rate <= scale3d.max_rate_3d) {
+ long rounded_rate;
+ if (unlikely(scale3d.freq_count == MAX_FREQ_COUNT)) {
+ pr_err("%s: too many frequencies\n", __func__);
+ break;
+ }
+ rounded_rate =
+ clk_round_rate(scale3d.clk_3d, rate);
+ freqs[scale3d.freq_count++] = rounded_rate;
+ rate = rounded_rate + 2000;
+ }
+ scale3d.freqlist =
+ kmalloc(scale3d.freq_count * sizeof(int), GFP_KERNEL);
+ if (scale3d.freqlist == NULL)
+ pr_err("%s: can\'t allocate freq table\n", __func__);
+
+ memcpy(scale3d.freqlist, freqs,
+ scale3d.freq_count * sizeof(int));
+
+ busy_history = score_init(GR3D_FRAME_SPAN);
+ if (busy_history == NULL)
+ pr_err("%s: can\'t init load tracking array\n",
+ __func__);
+
+ hint_history = score_init(GR3D_FRAME_SPAN);
+ if (hint_history == NULL)
+ pr_err("%s: can\'t init throughput tracking array\n",
+ __func__);
+
+ scale3d.init = 1;
+ }
+}
+
+void nvhost_scale3d_deinit(struct nvhost_device *dev)
+{
+ device_remove_file(&dev->dev, &dev_attr_enable_3d_scaling);
+ scale3d.init = 0;
+ if (scale3d.freqlist != NULL) {
+ kfree(scale3d.freqlist);
+ scale3d.freq_count = 0;
+ scale3d.freqlist = NULL;
+ }
+
+ score_delete(busy_history);
+ score_delete(hint_history);
+}
diff --git a/drivers/video/tegra/host/gr3d/scale3d.h b/drivers/video/tegra/host/gr3d/scale3d.h
new file mode 100644
index 000000000000..f8aae1d591a6
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/scale3d.h
@@ -0,0 +1,47 @@
+/*
+ * drivers/video/tegra/host/t30/scale3d.h
+ *
+ * Tegra Graphics Host 3D Clock Scaling
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef NVHOST_T30_SCALE3D_H
+#define NVHOST_T30_SCALE3D_H
+
+struct nvhost_device;
+struct device;
+struct dentry;
+
+/* Initialization and de-initialization for module */
+void nvhost_scale3d_init(struct nvhost_device *);
+void nvhost_scale3d_deinit(struct nvhost_device *);
+
+/* Suspend is called when powering down module */
+void nvhost_scale3d_suspend(struct nvhost_device *);
+
+/* reset 3d module load counters, called on resume */
+void nvhost_scale3d_reset(void);
+
+/*
+ * call when performing submit to notify scaling mechanism that 3d module is
+ * in use
+ */
+void nvhost_scale3d_notify_busy(struct nvhost_device *);
+void nvhost_scale3d_notify_idle(struct nvhost_device *);
+
+void nvhost_scale3d_debug_init(struct dentry *de);
+
+#endif
diff --git a/drivers/video/tegra/host/host1x/Makefile b/drivers/video/tegra/host/host1x/Makefile
new file mode 100644
index 000000000000..76664945e12b
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/Makefile
@@ -0,0 +1,8 @@
+GCOV_PROFILE := y
+
+EXTRA_CFLAGS += -Idrivers/video/tegra/host
+
+nvhost-host1x-objs = \
+ host1x.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-host1x.o
diff --git a/drivers/video/tegra/host/host1x/host1x.c b/drivers/video/tegra/host/host1x/host1x.c
new file mode 100644
index 000000000000..31899c78065b
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x.c
@@ -0,0 +1,552 @@
+/*
+ * drivers/video/tegra/host/dev.c
+ *
+ * Tegra Graphics Host Driver Entrypoint
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+#include <linux/clk.h>
+
+#include "dev.h"
+#include "bus.h"
+#include <trace/events/nvhost.h>
+
+#include <linux/nvhost.h>
+#include <linux/nvhost_ioctl.h>
+
+#include "debug.h"
+#include "bus_client.h"
+#include "nvhost_acm.h"
+#include "nvhost_channel.h"
+#include "nvhost_job.h"
+
+#define DRIVER_NAME "host1x"
+
+struct nvhost_ctrl_userctx {
+ struct nvhost_master *dev;
+ u32 *mod_locks;
+};
+
+static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
+{
+ struct nvhost_ctrl_userctx *priv = filp->private_data;
+ int i;
+
+ trace_nvhost_ctrlrelease(priv->dev->dev->name);
+
+ filp->private_data = NULL;
+ if (priv->mod_locks[0])
+ nvhost_module_idle(priv->dev->dev);
+ for (i = 1; i < nvhost_syncpt_nb_mlocks(&priv->dev->syncpt); i++)
+ if (priv->mod_locks[i])
+ nvhost_mutex_unlock(&priv->dev->syncpt, i);
+ kfree(priv->mod_locks);
+ kfree(priv);
+ return 0;
+}
+
+static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
+{
+ struct nvhost_master *host =
+ container_of(inode->i_cdev, struct nvhost_master, cdev);
+ struct nvhost_ctrl_userctx *priv;
+ u32 *mod_locks;
+
+ trace_nvhost_ctrlopen(host->dev->name);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ mod_locks = kzalloc(sizeof(u32)
+ * nvhost_syncpt_nb_mlocks(&host->syncpt),
+ GFP_KERNEL);
+
+ if (!(priv && mod_locks)) {
+ kfree(priv);
+ kfree(mod_locks);
+ return -ENOMEM;
+ }
+
+ priv->dev = host;
+ priv->mod_locks = mod_locks;
+ filp->private_data = priv;
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_read(struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_read_args *args)
+{
+ if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
+ return -EINVAL;
+ args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
+ trace_nvhost_ioctl_ctrl_syncpt_read(args->id, args->value);
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_incr(struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_incr_args *args)
+{
+ if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
+ return -EINVAL;
+ trace_nvhost_ioctl_ctrl_syncpt_incr(args->id);
+ nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_waitex(struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_waitex_args *args)
+{
+ u32 timeout;
+ int err;
+ if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
+ return -EINVAL;
+ if (args->timeout == NVHOST_NO_TIMEOUT)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = (u32)msecs_to_jiffies(args->timeout);
+
+ err = nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
+ args->thresh, timeout, &args->value);
+ trace_nvhost_ioctl_ctrl_syncpt_wait(args->id, args->thresh,
+ args->timeout, args->value, err);
+
+ return err;
+}
+
+static int nvhost_ioctl_ctrl_module_mutex(struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_module_mutex_args *args)
+{
+ int err = 0;
+ if (args->id >= nvhost_syncpt_nb_mlocks(&ctx->dev->syncpt) ||
+ args->lock > 1)
+ return -EINVAL;
+
+ trace_nvhost_ioctl_ctrl_module_mutex(args->lock, args->id);
+ if (args->lock && !ctx->mod_locks[args->id]) {
+ if (args->id == 0)
+ nvhost_module_busy(ctx->dev->dev);
+ else
+ err = nvhost_mutex_try_lock(&ctx->dev->syncpt,
+ args->id);
+ if (!err)
+ ctx->mod_locks[args->id] = 1;
+ } else if (!args->lock && ctx->mod_locks[args->id]) {
+ if (args->id == 0)
+ nvhost_module_idle(ctx->dev->dev);
+ else
+ nvhost_mutex_unlock(&ctx->dev->syncpt, args->id);
+ ctx->mod_locks[args->id] = 0;
+ }
+ return err;
+}
+
+static int match_by_moduleid(struct device *dev, void *data)
+{
+ struct nvhost_device *ndev = to_nvhost_device(dev);
+ u32 id = (u32)data;
+
+ return id == ndev->moduleid;
+}
+
+static struct nvhost_device *get_ndev_by_moduleid(struct nvhost_master *host,
+ u32 id)
+{
+ struct device *dev = bus_find_device(&nvhost_bus_inst->nvhost_bus_type,
+ NULL, (void *)id, match_by_moduleid);
+
+ return dev ? to_nvhost_device(dev) : NULL;
+}
+
+static int nvhost_ioctl_ctrl_module_regrdwr(struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_module_regrdwr_args *args)
+{
+ u32 num_offsets = args->num_offsets;
+ u32 *offsets = args->offsets;
+ u32 *values = args->values;
+ u32 vals[64];
+ struct nvhost_device *ndev;
+
+ trace_nvhost_ioctl_ctrl_module_regrdwr(args->id,
+ args->num_offsets, args->write);
+ /* Check that there is something to read and that block size is
+ * u32 aligned */
+ if (num_offsets == 0 || args->block_size & 3)
+ return -EINVAL;
+
+ ndev = get_ndev_by_moduleid(ctx->dev, args->id);
+ if (!ndev)
+ return -EINVAL;
+
+ while (num_offsets--) {
+ int err;
+ int remaining = args->block_size >> 2;
+ u32 offs;
+ if (get_user(offs, offsets))
+ return -EFAULT;
+ offsets++;
+ while (remaining) {
+ int batch = min(remaining, 64);
+ if (args->write) {
+ if (copy_from_user(vals, values,
+ batch*sizeof(u32)))
+ return -EFAULT;
+ err = nvhost_write_module_regs(ndev,
+ offs, batch, vals);
+ if (err)
+ return err;
+ } else {
+ err = nvhost_read_module_regs(ndev,
+ offs, batch, vals);
+ if (err)
+ return err;
+ if (copy_to_user(values, vals,
+ batch*sizeof(u32)))
+ return -EFAULT;
+ }
+ remaining -= batch;
+ offs += batch*sizeof(u32);
+ values += batch;
+ }
+ }
+
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_get_version(struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_get_param_args *args)
+{
+ args->value = NVHOST_SUBMIT_VERSION_MAX_SUPPORTED;
+ return 0;
+}
+
+static long nvhost_ctrlctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvhost_ctrl_userctx *priv = filp->private_data;
+ u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
+ int err = 0;
+
+ if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
+ (_IOC_NR(cmd) == 0) ||
+ (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST) ||
+ (_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE))
+ return -EFAULT;
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case NVHOST_IOCTL_CTRL_SYNCPT_READ:
+ err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
+ err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
+ err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
+ err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
+ err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_SYNCPT_WAITEX:
+ err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_GET_VERSION:
+ err = nvhost_ioctl_ctrl_get_version(priv, (void *)buf);
+ break;
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
+ err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
+
+ return err;
+}
+
+static const struct file_operations nvhost_ctrlops = {
+ .owner = THIS_MODULE,
+ .release = nvhost_ctrlrelease,
+ .open = nvhost_ctrlopen,
+ .unlocked_ioctl = nvhost_ctrlctl
+};
+
+static void power_on_host(struct nvhost_device *dev)
+{
+ struct nvhost_master *host = nvhost_get_drvdata(dev);
+ nvhost_syncpt_reset(&host->syncpt);
+ nvhost_intr_start(&host->intr, clk_get_rate(dev->clk[0]));
+}
+
+static int power_off_host(struct nvhost_device *dev)
+{
+ struct nvhost_master *host = nvhost_get_drvdata(dev);
+ nvhost_syncpt_save(&host->syncpt);
+ nvhost_intr_stop(&host->intr);
+ return 0;
+}
+
+static void clock_on_host(struct nvhost_device *dev)
+{
+ struct nvhost_master *host = nvhost_get_drvdata(dev);
+ nvhost_intr_start(&host->intr, clk_get_rate(dev->clk[0]));
+}
+
+static int clock_off_host(struct nvhost_device *dev)
+{
+ struct nvhost_master *host = nvhost_get_drvdata(dev);
+ nvhost_intr_stop(&host->intr);
+ return 0;
+}
+
+static int __devinit nvhost_user_init(struct nvhost_master *host)
+{
+ int err, devno;
+
+ host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
+ if (IS_ERR(host->nvhost_class)) {
+ err = PTR_ERR(host->nvhost_class);
+ dev_err(&host->dev->dev, "failed to create class\n");
+ goto fail;
+ }
+
+ err = alloc_chrdev_region(&devno, 0, 1, IFACE_NAME);
+ if (err < 0) {
+ dev_err(&host->dev->dev, "failed to reserve chrdev region\n");
+ goto fail;
+ }
+
+ cdev_init(&host->cdev, &nvhost_ctrlops);
+ host->cdev.owner = THIS_MODULE;
+ err = cdev_add(&host->cdev, devno, 1);
+ if (err < 0)
+ goto fail;
+ host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
+ IFACE_NAME "-ctrl");
+ if (IS_ERR(host->ctrl)) {
+ err = PTR_ERR(host->ctrl);
+ dev_err(&host->dev->dev, "failed to create ctrl device\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return err;
+}
+
+struct nvhost_channel *nvhost_alloc_channel(struct nvhost_device *dev)
+{
+ BUG_ON(!host_device_op().alloc_nvhost_channel);
+ return host_device_op().alloc_nvhost_channel(dev);
+}
+
+void nvhost_free_channel(struct nvhost_channel *ch)
+{
+ BUG_ON(!host_device_op().free_nvhost_channel);
+ host_device_op().free_nvhost_channel(ch);
+}
+
+static void nvhost_free_resources(struct nvhost_master *host)
+{
+ kfree(host->intr.syncpt);
+ host->intr.syncpt = 0;
+}
+
+static int __devinit nvhost_alloc_resources(struct nvhost_master *host)
+{
+ int err;
+
+ err = nvhost_init_chip_support(host);
+ if (err)
+ return err;
+
+ host->intr.syncpt = kzalloc(sizeof(struct nvhost_intr_syncpt) *
+ nvhost_syncpt_nb_pts(&host->syncpt),
+ GFP_KERNEL);
+
+ if (!host->intr.syncpt) {
+ /* frees happen in the support removal phase */
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int __devinit nvhost_probe(struct nvhost_device *dev,
+ struct nvhost_device_id *id_table)
+{
+ struct nvhost_master *host;
+ struct resource *regs, *intr0, *intr1;
+ int i, err;
+
+ regs = nvhost_get_resource(dev, IORESOURCE_MEM, 0);
+ intr0 = nvhost_get_resource(dev, IORESOURCE_IRQ, 0);
+ intr1 = nvhost_get_resource(dev, IORESOURCE_IRQ, 1);
+
+ if (!regs || !intr0 || !intr1) {
+ dev_err(&dev->dev, "missing required platform resources\n");
+ return -ENXIO;
+ }
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ /* Register host1x device as bus master */
+ host->dev = dev;
+
+ /* Copy host1x parameters */
+ memcpy(&host->info, dev->dev.platform_data,
+ sizeof(struct host1x_device_info));
+
+ host->reg_mem = request_mem_region(regs->start,
+ resource_size(regs), dev->name);
+ if (!host->reg_mem) {
+ dev_err(&dev->dev, "failed to get host register memory\n");
+ err = -ENXIO;
+ goto fail;
+ }
+
+ host->aperture = ioremap(regs->start, resource_size(regs));
+ if (!host->aperture) {
+ dev_err(&dev->dev, "failed to remap host registers\n");
+ err = -ENXIO;
+ goto fail;
+ }
+
+ err = nvhost_alloc_resources(host);
+ if (err) {
+ dev_err(&dev->dev, "failed to init chip support\n");
+ goto fail;
+ }
+
+ host->memmgr = mem_op().alloc_mgr();
+ if (!host->memmgr) {
+ dev_err(&dev->dev, "unable to create nvmap client\n");
+ err = -EIO;
+ goto fail;
+ }
+
+ /* Register host1x device as bus master */
+ host->dev = dev;
+
+ /* Give pointer to host1x via driver */
+ nvhost_set_drvdata(dev, host);
+
+ nvhost_bus_add_host(host);
+
+ err = nvhost_syncpt_init(dev, &host->syncpt);
+ if (err)
+ goto fail;
+
+ err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
+ if (err)
+ goto fail;
+
+ err = nvhost_user_init(host);
+ if (err)
+ goto fail;
+
+ err = nvhost_module_init(dev);
+ if (err)
+ goto fail;
+
+ for (i = 0; i < host->dev->num_clks; i++)
+ clk_enable(host->dev->clk[i]);
+ nvhost_syncpt_reset(&host->syncpt);
+ for (i = 0; i < host->dev->num_clks; i++)
+ clk_disable(host->dev->clk[0]);
+
+ nvhost_debug_init(host);
+
+ dev_info(&dev->dev, "initialized\n");
+ return 0;
+
+fail:
+ nvhost_free_resources(host);
+ if (host->memmgr)
+ mem_op().put_mgr(host->memmgr);
+ kfree(host);
+ return err;
+}
+
+static int __exit nvhost_remove(struct nvhost_device *dev)
+{
+ struct nvhost_master *host = nvhost_get_drvdata(dev);
+ nvhost_intr_deinit(&host->intr);
+ nvhost_syncpt_deinit(&host->syncpt);
+ nvhost_free_resources(host);
+ return 0;
+}
+
+static int nvhost_suspend(struct nvhost_device *dev, pm_message_t state)
+{
+ struct nvhost_master *host = nvhost_get_drvdata(dev);
+ int ret = 0;
+
+ ret = nvhost_module_suspend(host->dev);
+ dev_info(&dev->dev, "suspend status: %d\n", ret);
+
+ return ret;
+}
+
+static int nvhost_resume(struct nvhost_device *dev)
+{
+ dev_info(&dev->dev, "resuming\n");
+ return 0;
+}
+
+static struct nvhost_driver nvhost_driver = {
+ .probe = nvhost_probe,
+ .remove = __exit_p(nvhost_remove),
+ .suspend = nvhost_suspend,
+ .resume = nvhost_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME
+ },
+ .finalize_poweron = power_on_host,
+ .prepare_poweroff = power_off_host,
+ .finalize_clockon = clock_on_host,
+ .prepare_clockoff = clock_off_host,
+};
+
+static int __init nvhost_mod_init(void)
+{
+ return nvhost_driver_register(&nvhost_driver);
+}
+
+static void __exit nvhost_mod_exit(void)
+{
+ nvhost_driver_unregister(&nvhost_driver);
+}
+
+/* host1x master device needs nvmap to be instantiated first.
+ * nvmap is instantiated via fs_initcall.
+ * Hence instantiate host1x master device using rootfs_initcall
+ * which is one level after fs_initcall. */
+rootfs_initcall(nvhost_mod_init);
+module_exit(nvhost_mod_exit);
+
diff --git a/drivers/video/tegra/host/host1x/host1x.h b/drivers/video/tegra/host/host1x/host1x.h
new file mode 100644
index 000000000000..49916b0757d4
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x.h
@@ -0,0 +1,78 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x.h
+ *
+ * Tegra Graphics Host Driver Entrypoint
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_HOST1X_H
+#define __NVHOST_HOST1X_H
+
+#include <linux/cdev.h>
+#include <linux/nvhost.h>
+
+#include "nvhost_syncpt.h"
+#include "nvhost_intr.h"
+
+#define TRACE_MAX_LENGTH 128U
+#define IFACE_NAME "nvhost"
+
+struct nvhost_channel;
+struct mem_mgr;
+
+struct host1x_device_info {
+ int nb_channels; /* host1x: num channels supported */
+ int nb_pts; /* host1x: num syncpoints supported */
+ int nb_bases; /* host1x: num syncpoints supported */
+ u32 client_managed; /* host1x: client managed syncpts */
+ int nb_mlocks; /* host1x: number of mlocks */
+ const char **syncpt_names; /* names of sync points */
+};
+
+struct nvhost_master {
+ void __iomem *aperture;
+ void __iomem *sync_aperture;
+ struct resource *reg_mem;
+ struct class *nvhost_class;
+ struct cdev cdev;
+ struct device *ctrl;
+ struct nvhost_syncpt syncpt;
+ struct mem_mgr *memmgr;
+ struct nvhost_intr intr;
+ struct nvhost_device *dev;
+ atomic_t clientid;
+
+ struct host1x_device_info info;
+};
+
+extern struct nvhost_master *nvhost;
+
+void nvhost_debug_init(struct nvhost_master *master);
+void nvhost_debug_dump(struct nvhost_master *master);
+
+struct nvhost_channel *nvhost_alloc_channel(struct nvhost_device *dev);
+void nvhost_free_channel(struct nvhost_channel *ch);
+
+extern pid_t nvhost_debug_null_kickoff_pid;
+
+static inline struct nvhost_master *nvhost_get_host(struct nvhost_device *_dev)
+{
+ return (_dev->dev.parent) ? \
+ ((struct nvhost_master *) dev_get_drvdata(_dev->dev.parent)) : \
+ ((struct nvhost_master *) dev_get_drvdata(&(_dev->dev)));
+}
+
+#endif
diff --git a/drivers/video/tegra/host/host1x/host1x01_hardware.h b/drivers/video/tegra/host/host1x/host1x01_hardware.h
new file mode 100644
index 000000000000..1d30cc74266a
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x01_hardware.h
@@ -0,0 +1,170 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x01_hardware.h
+ *
+ * Tegra Graphics Host Register Offsets for T20/T30
+ *
+ * Copyright (c) 2010-2012 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_HOST1X01_HARDWARE_H
+#define __NVHOST_HOST1X01_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include "hw_host1x01_channel.h"
+#include "hw_host1x01_sync.h"
+#include "hw_host1x01_uclass.h"
+
+/* class ids */
+enum {
+ NV_HOST1X_CLASS_ID = 0x1,
+ NV_VIDEO_ENCODE_MPEG_CLASS_ID = 0x20,
+ NV_GRAPHICS_3D_CLASS_ID = 0x60
+};
+
+
+/* channel registers */
+#define NV_HOST1X_CHANNEL_MAP_SIZE_BYTES 16384
+#define NV_HOST1X_SYNC_MLOCK_NUM 16
+
+/* sync registers */
+#define HOST1X_CHANNEL_SYNC_REG_BASE 0x3000
+#define NV_HOST1X_NB_MLOCKS 16
+
+static inline u32 nvhost_class_host_wait_syncpt(
+ unsigned indx, unsigned threshold)
+{
+ return (indx << 24) | (threshold & 0xffffff);
+}
+
+static inline u32 nvhost_class_host_load_syncpt_base(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_wait_syncpt_indx_f(indx)
+ | host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 nvhost_class_host_wait_syncpt_base(
+ unsigned indx, unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_wait_syncpt_base_indx_f(indx)
+ | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 nvhost_class_host_incr_syncpt_base(
+ unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 nvhost_class_host_incr_syncpt(
+ unsigned cond, unsigned indx)
+{
+ return host1x_uclass_incr_syncpt_cond_f(cond)
+ | host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+enum {
+ NV_HOST_MODULE_HOST1X = 0,
+ NV_HOST_MODULE_MPE = 1,
+ NV_HOST_MODULE_GR3D = 6
+};
+
+static inline u32 nvhost_class_host_indoff_reg_write(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indbe_f(0xf)
+ | host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset);
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+static inline u32 nvhost_class_host_indoff_reg_read(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset)
+ | host1x_uclass_indoff_rwn_read_v();
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+
+/* cdma opcodes */
+static inline u32 nvhost_opcode_setclass(
+ unsigned class_id, unsigned offset, unsigned mask)
+{
+ return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 nvhost_opcode_incr(unsigned offset, unsigned count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_nonincr(unsigned offset, unsigned count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_mask(unsigned offset, unsigned mask)
+{
+ return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 nvhost_opcode_imm(unsigned offset, unsigned value)
+{
+ return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 nvhost_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+ return nvhost_opcode_imm(host1x_uclass_incr_syncpt_r(),
+ nvhost_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 nvhost_opcode_restart(unsigned address)
+{
+ return (5 << 28) | (address >> 4);
+}
+
+static inline u32 nvhost_opcode_gather(unsigned count)
+{
+ return (6 << 28) | count;
+}
+
+static inline u32 nvhost_opcode_gather_nonincr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 nvhost_opcode_gather_incr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define NVHOST_OPCODE_NOOP nvhost_opcode_nonincr(0, 0)
+
+static inline u32 nvhost_mask2(unsigned x, unsigned y)
+{
+ return 1 | (1 << (y - x));
+}
+
+#endif
diff --git a/drivers/video/tegra/host/host1x/host1x_cdma.c b/drivers/video/tegra/host/host1x/host1x_cdma.c
new file mode 100644
index 000000000000..5a29ff652efe
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_cdma.c
@@ -0,0 +1,517 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_cdma.c
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include "nvhost_acm.h"
+#include "nvhost_cdma.h"
+#include "nvhost_channel.h"
+#include "dev.h"
+#include "chip_support.h"
+#include "nvhost_memmgr.h"
+
+#include "host1x_cdma.h"
+#include "host1x_hwctx.h"
+
+static inline u32 host1x_channel_dmactrl(int stop, int get_rst, int init_get)
+{
+ return host1x_channel_dmactrl_dmastop_f(stop)
+ | host1x_channel_dmactrl_dmagetrst_f(get_rst)
+ | host1x_channel_dmactrl_dmainitget_f(init_get);
+}
+
+static void cdma_timeout_handler(struct work_struct *work);
+
+/*
+ * push_buffer
+ *
+ * The push buffer is a circular array of words to be fetched by command DMA.
+ * Note that it works slightly differently to the sync queue; fence == cur
+ * means that the push buffer is full, not empty.
+ */
+
+
+/**
+ * Reset to empty push buffer
+ */
+static void push_buffer_reset(struct push_buffer *pb)
+{
+ pb->fence = PUSH_BUFFER_SIZE - 8;
+ pb->cur = 0;
+}
+
+/**
+ * Init push buffer resources
+ */
+static int push_buffer_init(struct push_buffer *pb)
+{
+ struct nvhost_cdma *cdma = pb_to_cdma(pb);
+ struct mem_mgr *mgr = cdma_to_memmgr(cdma);
+ pb->mem = NULL;
+ pb->mapped = NULL;
+ pb->phys = 0;
+ pb->client_handle = NULL;
+
+ BUG_ON(!cdma_pb_op().reset);
+ cdma_pb_op().reset(pb);
+
+ /* allocate and map pushbuffer memory */
+ pb->mem = mem_op().alloc(mgr, PUSH_BUFFER_SIZE + 4, 32,
+ mem_mgr_flag_write_combine);
+ if (IS_ERR_OR_NULL(pb->mem)) {
+ pb->mem = NULL;
+ goto fail;
+ }
+ pb->mapped = mem_op().mmap(pb->mem);
+ if (pb->mapped == NULL)
+ goto fail;
+
+ /* pin pushbuffer and get physical address */
+ pb->phys = mem_op().pin(mgr, pb->mem);
+ if (pb->phys >= 0xfffff000) {
+ pb->phys = 0;
+ goto fail;
+ }
+
+ /* memory for storing nvmap client and handles for each opcode pair */
+ pb->client_handle = kzalloc(NVHOST_GATHER_QUEUE_SIZE *
+ sizeof(struct mem_mgr_handle),
+ GFP_KERNEL);
+ if (!pb->client_handle)
+ goto fail;
+
+ /* put the restart at the end of pushbuffer memory */
+ *(pb->mapped + (PUSH_BUFFER_SIZE >> 2)) =
+ nvhost_opcode_restart(pb->phys);
+
+ return 0;
+
+fail:
+ cdma_pb_op().destroy(pb);
+ return -ENOMEM;
+}
+
+/**
+ * Clean up push buffer resources
+ */
+static void push_buffer_destroy(struct push_buffer *pb)
+{
+ struct nvhost_cdma *cdma = pb_to_cdma(pb);
+ struct mem_mgr *mgr = cdma_to_memmgr(cdma);
+ if (pb->mapped)
+ mem_op().munmap(pb->mem, pb->mapped);
+
+ if (pb->phys != 0)
+ mem_op().unpin(mgr, pb->mem);
+
+ if (pb->mem)
+ mem_op().put(mgr, pb->mem);
+
+ kfree(pb->client_handle);
+
+ pb->mem = NULL;
+ pb->mapped = NULL;
+ pb->phys = 0;
+ pb->client_handle = 0;
+}
+
+/**
+ * Push two words to the push buffer
+ * Caller must ensure push buffer is not full
+ */
+static void push_buffer_push_to(struct push_buffer *pb,
+ struct mem_mgr *client, struct mem_handle *handle,
+ u32 op1, u32 op2)
+{
+ u32 cur = pb->cur;
+ u32 *p = (u32 *)((u32)pb->mapped + cur);
+ u32 cur_nvmap = (cur/8) & (NVHOST_GATHER_QUEUE_SIZE - 1);
+ BUG_ON(cur == pb->fence);
+ *(p++) = op1;
+ *(p++) = op2;
+ pb->client_handle[cur_nvmap].client = client;
+ pb->client_handle[cur_nvmap].handle = handle;
+ pb->cur = (cur + 8) & (PUSH_BUFFER_SIZE - 1);
+}
+
+/**
+ * Pop a number of two word slots from the push buffer
+ * Caller must ensure push buffer is not empty
+ */
+static void push_buffer_pop_from(struct push_buffer *pb,
+ unsigned int slots)
+{
+ /* Clear the nvmap references for old items from pb */
+ unsigned int i;
+ u32 fence_nvmap = pb->fence/8;
+ for (i = 0; i < slots; i++) {
+ int cur_fence_nvmap = (fence_nvmap+i)
+ & (NVHOST_GATHER_QUEUE_SIZE - 1);
+ struct mem_mgr_handle *h = &pb->client_handle[cur_fence_nvmap];
+ h->client = NULL;
+ h->handle = NULL;
+ }
+ /* Advance the next write position */
+ pb->fence = (pb->fence + slots * 8) & (PUSH_BUFFER_SIZE - 1);
+}
+
+/**
+ * Return the number of two word slots free in the push buffer
+ */
+static u32 push_buffer_space(struct push_buffer *pb)
+{
+ return ((pb->fence - pb->cur) & (PUSH_BUFFER_SIZE - 1)) / 8;
+}
+
+static u32 push_buffer_putptr(struct push_buffer *pb)
+{
+ return pb->phys + pb->cur;
+}
+
+/*
+ * The syncpt incr buffer is filled with methods to increment syncpts, which
+ * is later GATHER-ed into the mainline PB. It's used when a timed out context
+ * is interleaved with other work, so needs to inline the syncpt increments
+ * to maintain the count (but otherwise does no work).
+ */
+
+/**
+ * Init timeout resources
+ */
+static int cdma_timeout_init(struct nvhost_cdma *cdma,
+ u32 syncpt_id)
+{
+ if (syncpt_id == NVSYNCPT_INVALID)
+ return -EINVAL;
+
+ INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
+ cdma->timeout.initialized = true;
+
+ return 0;
+}
+
+/**
+ * Clean up timeout resources
+ */
+static void cdma_timeout_destroy(struct nvhost_cdma *cdma)
+{
+ if (cdma->timeout.initialized)
+ cancel_delayed_work(&cdma->timeout.wq);
+ cdma->timeout.initialized = false;
+}
+
+/**
+ * Increment timedout buffer's syncpt via CPU.
+ */
+static void cdma_timeout_cpu_incr(struct nvhost_cdma *cdma, u32 getptr,
+ u32 syncpt_incrs, u32 syncval, u32 nr_slots,
+ u32 waitbases)
+{
+ struct nvhost_master *dev = cdma_to_dev(cdma);
+ struct push_buffer *pb = &cdma->push_buffer;
+ u32 i, getidx;
+
+ for (i = 0; i < syncpt_incrs; i++)
+ nvhost_syncpt_cpu_incr(&dev->syncpt, cdma->timeout.syncpt_id);
+
+ /* after CPU incr, ensure shadow is up to date */
+ nvhost_syncpt_update_min(&dev->syncpt, cdma->timeout.syncpt_id);
+
+ /* Synchronize wait bases. 2D wait bases are synchronized with
+ * syncpoint 19. Hence wait bases are not updated when syncptid=18. */
+
+ if (cdma->timeout.syncpt_id != NVSYNCPT_2D_0 && waitbases) {
+ void __iomem *p;
+ p = dev->sync_aperture + host1x_sync_syncpt_base_0_r() +
+ (__ffs(waitbases) * sizeof(u32));
+ writel(syncval, p);
+ dev->syncpt.base_val[__ffs(waitbases)] = syncval;
+ }
+
+ /* NOP all the PB slots */
+ getidx = getptr - pb->phys;
+ while (nr_slots--) {
+ u32 *p = (u32 *)((u32)pb->mapped + getidx);
+ *(p++) = NVHOST_OPCODE_NOOP;
+ *(p++) = NVHOST_OPCODE_NOOP;
+ dev_dbg(&dev->dev->dev, "%s: NOP at 0x%x\n",
+ __func__, pb->phys + getidx);
+ getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1);
+ }
+ wmb();
+}
+
+/**
+ * Start channel DMA
+ */
+static void cdma_start(struct nvhost_cdma *cdma)
+{
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+ if (cdma->running)
+ return;
+
+ BUG_ON(!cdma_pb_op().putptr);
+ cdma->last_put = cdma_pb_op().putptr(&cdma->push_buffer);
+
+ writel(host1x_channel_dmactrl(true, false, false),
+ chan_regs + host1x_channel_dmactrl_r());
+
+ /* set base, put, end pointer (all of memory) */
+ writel(0, chan_regs + host1x_channel_dmastart_r());
+ writel(cdma->last_put, chan_regs + host1x_channel_dmaput_r());
+ writel(0xFFFFFFFF, chan_regs + host1x_channel_dmaend_r());
+
+ /* reset GET */
+ writel(host1x_channel_dmactrl(true, true, true),
+ chan_regs + host1x_channel_dmactrl_r());
+
+ /* start the command DMA */
+ writel(host1x_channel_dmactrl(false, false, false),
+ chan_regs + host1x_channel_dmactrl_r());
+
+ cdma->running = true;
+}
+
+/**
+ * Similar to cdma_start(), but rather than starting from an idle
+ * state (where DMA GET is set to DMA PUT), on a timeout we restore
+ * DMA GET from an explicit value (so DMA may again be pending).
+ */
+static void cdma_timeout_restart(struct nvhost_cdma *cdma, u32 getptr)
+{
+ struct nvhost_master *dev = cdma_to_dev(cdma);
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+ if (cdma->running)
+ return;
+
+ BUG_ON(!cdma_pb_op().putptr);
+ cdma->last_put = cdma_pb_op().putptr(&cdma->push_buffer);
+
+ writel(host1x_channel_dmactrl(true, false, false),
+ chan_regs + host1x_channel_dmactrl_r());
+
+ /* set base, end pointer (all of memory) */
+ writel(0, chan_regs + host1x_channel_dmastart_r());
+ writel(0xFFFFFFFF, chan_regs + host1x_channel_dmaend_r());
+
+ /* set GET, by loading the value in PUT (then reset GET) */
+ writel(getptr, chan_regs + host1x_channel_dmaput_r());
+ writel(host1x_channel_dmactrl(true, true, true),
+ chan_regs + host1x_channel_dmactrl_r());
+
+ dev_dbg(&dev->dev->dev,
+ "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
+ __func__,
+ readl(chan_regs + host1x_channel_dmaget_r()),
+ readl(chan_regs + host1x_channel_dmaput_r()),
+ cdma->last_put);
+
+ /* deassert GET reset and set PUT */
+ writel(host1x_channel_dmactrl(true, false, false),
+ chan_regs + host1x_channel_dmactrl_r());
+ writel(cdma->last_put, chan_regs + host1x_channel_dmaput_r());
+
+ /* start the command DMA */
+ writel(host1x_channel_dmactrl(false, false, false),
+ chan_regs + host1x_channel_dmactrl_r());
+
+ cdma->running = true;
+}
+
+/**
+ * Kick channel DMA into action by writing its PUT offset (if it has changed)
+ */
+static void cdma_kick(struct nvhost_cdma *cdma)
+{
+ u32 put;
+ BUG_ON(!cdma_pb_op().putptr);
+
+ put = cdma_pb_op().putptr(&cdma->push_buffer);
+
+ if (put != cdma->last_put) {
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+ wmb();
+ writel(put, chan_regs + host1x_channel_dmaput_r());
+ cdma->last_put = put;
+ }
+}
+
+static void cdma_stop(struct nvhost_cdma *cdma)
+{
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+ mutex_lock(&cdma->lock);
+ if (cdma->running) {
+ nvhost_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
+ writel(host1x_channel_dmactrl(true, false, false),
+ chan_regs + host1x_channel_dmactrl_r());
+ cdma->running = false;
+ }
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Stops both channel's command processor and CDMA immediately.
+ * Also, tears down the channel and resets corresponding module.
+ */
+static void cdma_timeout_teardown_begin(struct nvhost_cdma *cdma)
+{
+ struct nvhost_master *dev = cdma_to_dev(cdma);
+ struct nvhost_channel *ch = cdma_to_channel(cdma);
+ u32 cmdproc_stop;
+
+ BUG_ON(cdma->torndown);
+
+ dev_dbg(&dev->dev->dev,
+ "begin channel teardown (channel id %d)\n", ch->chid);
+
+ cmdproc_stop = readl(dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+ cmdproc_stop |= BIT(ch->chid);
+ writel(cmdproc_stop, dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+
+ dev_dbg(&dev->dev->dev,
+ "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
+ __func__,
+ readl(ch->aperture + host1x_channel_dmaget_r()),
+ readl(ch->aperture + host1x_channel_dmaput_r()),
+ cdma->last_put);
+
+ writel(host1x_channel_dmactrl(true, false, false),
+ ch->aperture + host1x_channel_dmactrl_r());
+
+ writel(BIT(ch->chid), dev->sync_aperture + host1x_sync_ch_teardown_r());
+ nvhost_module_reset(ch->dev);
+
+ cdma->running = false;
+ cdma->torndown = true;
+}
+
+static void cdma_timeout_teardown_end(struct nvhost_cdma *cdma, u32 getptr)
+{
+ struct nvhost_master *dev = cdma_to_dev(cdma);
+ struct nvhost_channel *ch = cdma_to_channel(cdma);
+ u32 cmdproc_stop;
+
+ BUG_ON(!cdma->torndown || cdma->running);
+
+ dev_dbg(&dev->dev->dev,
+ "end channel teardown (id %d, DMAGET restart = 0x%x)\n",
+ ch->chid, getptr);
+
+ cmdproc_stop = readl(dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+ cmdproc_stop &= ~(BIT(ch->chid));
+ writel(cmdproc_stop, dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+
+ cdma->torndown = false;
+ cdma_timeout_restart(cdma, getptr);
+}
+
+/**
+ * If this timeout fires, it indicates the current sync_queue entry has
+ * exceeded its TTL and the userctx should be timed out and remaining
+ * submits already issued cleaned up (future submits return an error).
+ */
+static void cdma_timeout_handler(struct work_struct *work)
+{
+ struct nvhost_cdma *cdma;
+ struct nvhost_master *dev;
+ struct nvhost_syncpt *sp;
+ struct nvhost_channel *ch;
+
+ u32 syncpt_val;
+
+ u32 prev_cmdproc, cmdproc_stop;
+
+ cdma = container_of(to_delayed_work(work), struct nvhost_cdma,
+ timeout.wq);
+ dev = cdma_to_dev(cdma);
+ sp = &dev->syncpt;
+ ch = cdma_to_channel(cdma);
+
+ mutex_lock(&cdma->lock);
+
+ if (!cdma->timeout.clientid) {
+ dev_dbg(&dev->dev->dev,
+ "cdma_timeout: expired, but has no clientid\n");
+ mutex_unlock(&cdma->lock);
+ return;
+ }
+
+ /* stop processing to get a clean snapshot */
+ prev_cmdproc = readl(dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+ cmdproc_stop = prev_cmdproc | BIT(ch->chid);
+ writel(cmdproc_stop, dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+
+ dev_dbg(&dev->dev->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n",
+ prev_cmdproc, cmdproc_stop);
+
+ syncpt_val = nvhost_syncpt_update_min(&dev->syncpt,
+ cdma->timeout.syncpt_id);
+
+ /* has buffer actually completed? */
+ if ((s32)(syncpt_val - cdma->timeout.syncpt_val) >= 0) {
+ dev_dbg(&dev->dev->dev,
+ "cdma_timeout: expired, but buffer had completed\n");
+ /* restore */
+ cmdproc_stop = prev_cmdproc & ~(BIT(ch->chid));
+ writel(cmdproc_stop,
+ dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+ mutex_unlock(&cdma->lock);
+ return;
+ }
+
+ dev_warn(&dev->dev->dev,
+ "%s: timeout: %d (%s) ctx 0x%p, HW thresh %d, done %d\n",
+ __func__,
+ cdma->timeout.syncpt_id,
+ syncpt_op().name(sp, cdma->timeout.syncpt_id),
+ cdma->timeout.ctx,
+ syncpt_val, cdma->timeout.syncpt_val);
+
+ /* stop HW, resetting channel/module */
+ cdma_op().timeout_teardown_begin(cdma);
+
+ nvhost_cdma_update_sync_queue(cdma, sp, ch->dev);
+ mutex_unlock(&cdma->lock);
+}
+
+static const struct nvhost_cdma_ops host1x_cdma_ops = {
+ .start = cdma_start,
+ .stop = cdma_stop,
+ .kick = cdma_kick,
+
+ .timeout_init = cdma_timeout_init,
+ .timeout_destroy = cdma_timeout_destroy,
+ .timeout_teardown_begin = cdma_timeout_teardown_begin,
+ .timeout_teardown_end = cdma_timeout_teardown_end,
+ .timeout_cpu_incr = cdma_timeout_cpu_incr,
+};
+
+static const struct nvhost_pushbuffer_ops host1x_pushbuffer_ops = {
+ .reset = push_buffer_reset,
+ .init = push_buffer_init,
+ .destroy = push_buffer_destroy,
+ .push_to = push_buffer_push_to,
+ .pop_from = push_buffer_pop_from,
+ .space = push_buffer_space,
+ .putptr = push_buffer_putptr,
+};
+
diff --git a/drivers/video/tegra/host/host1x/host1x_cdma.h b/drivers/video/tegra/host/host1x/host1x_cdma.h
new file mode 100644
index 000000000000..94bfc092c8c9
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_cdma.h
@@ -0,0 +1,39 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_cdma.h
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_HOST1X_HOST1X_CDMA_H
+#define __NVHOST_HOST1X_HOST1X_CDMA_H
+
+/* Size of the sync queue. If it is too small, we won't be able to queue up
+ * many command buffers. If it is too large, we waste memory. */
+#define NVHOST_SYNC_QUEUE_SIZE 512
+
+/* Number of gathers we allow to be queued up per channel. Must be a
+ * power of two. Currently sized such that pushbuffer is 4KB (512*8B). */
+#define NVHOST_GATHER_QUEUE_SIZE 512
+
+/* 8 bytes per slot. (This number does not include the final RESTART.) */
+#define PUSH_BUFFER_SIZE (NVHOST_GATHER_QUEUE_SIZE * 8)
+
+/* 4K page containing GATHERed methods to increment channel syncpts
+ * and replaces the original timed out contexts GATHER slots */
+#define SYNCPT_INCR_BUFFER_SIZE_WORDS (4096 / sizeof(u32))
+
+#endif
diff --git a/drivers/video/tegra/host/host1x/host1x_channel.c b/drivers/video/tegra/host/host1x/host1x_channel.c
new file mode 100644
index 000000000000..0274413ff698
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_channel.c
@@ -0,0 +1,681 @@
+/*
+ * drivers/video/tegra/host/host1x/channel_host1x.c
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvhost_channel.h"
+#include "dev.h"
+#include "nvhost_acm.h"
+#include "nvhost_job.h"
+#include "nvhost_hwctx.h"
+#include <trace/events/nvhost.h>
+#include <linux/slab.h>
+
+#include "host1x_hwctx.h"
+#include "nvhost_intr.h"
+
+#define NV_FIFO_READ_TIMEOUT 200000
+
+static int host1x_drain_read_fifo(struct nvhost_channel *ch,
+ u32 *ptr, unsigned int count, unsigned int *pending);
+
+static void sync_waitbases(struct nvhost_channel *ch, u32 syncpt_val)
+{
+ unsigned long waitbase;
+ unsigned long int waitbase_mask = ch->dev->waitbases;
+ if (ch->dev->waitbasesync) {
+ waitbase = find_first_bit(&waitbase_mask, BITS_PER_LONG);
+ nvhost_cdma_push(&ch->cdma,
+ nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ host1x_uclass_load_syncpt_base_r(),
+ 1),
+ nvhost_class_host_load_syncpt_base(waitbase,
+ syncpt_val));
+ }
+}
+
+static void *pre_submit_ctxsave(struct nvhost_job *job,
+ struct nvhost_hwctx *cur_ctx)
+{
+ struct nvhost_channel *ch = job->ch;
+ void *ctxsave_waiter = NULL;
+
+ /* Is a save needed? */
+ if (!cur_ctx || ch->cur_ctx == job->hwctx)
+ return NULL;
+
+ if (cur_ctx->has_timedout) {
+ dev_dbg(&ch->dev->dev,
+ "%s: skip save of timed out context (0x%p)\n",
+ __func__, ch->cur_ctx);
+
+ return NULL;
+ }
+
+ /* Allocate save waiter if needed */
+ if (ch->ctxhandler->save_service) {
+ ctxsave_waiter = nvhost_intr_alloc_waiter();
+ if (!ctxsave_waiter)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return ctxsave_waiter;
+}
+
+static void submit_ctxsave(struct nvhost_job *job, void *ctxsave_waiter,
+ struct nvhost_hwctx *cur_ctx)
+{
+ struct nvhost_master *host = nvhost_get_host(job->ch->dev);
+ struct nvhost_channel *ch = job->ch;
+ u32 syncval;
+ int err;
+ u32 save_thresh = 0;
+
+ /* Is a save needed? */
+ if (!cur_ctx || cur_ctx == job->hwctx || cur_ctx->has_timedout)
+ return;
+
+ /* Retrieve save threshold if we have a waiter */
+ if (ctxsave_waiter)
+ save_thresh =
+ nvhost_syncpt_read_max(&host->syncpt, job->syncpt_id)
+ + to_host1x_hwctx(cur_ctx)->save_thresh;
+
+ /* Adjust the syncpoint max */
+ job->syncpt_incrs += to_host1x_hwctx(cur_ctx)->save_incrs;
+ syncval = nvhost_syncpt_incr_max(&host->syncpt,
+ job->syncpt_id,
+ to_host1x_hwctx(cur_ctx)->save_incrs);
+
+ /* Send the save to channel */
+ cur_ctx->valid = true;
+ ch->ctxhandler->save_push(cur_ctx, &ch->cdma);
+ nvhost_job_get_hwctx(job, cur_ctx);
+
+ /* Notify save service */
+ if (ctxsave_waiter) {
+ err = nvhost_intr_add_action(&host->intr,
+ job->syncpt_id,
+ save_thresh,
+ NVHOST_INTR_ACTION_CTXSAVE, cur_ctx,
+ ctxsave_waiter,
+ NULL);
+ ctxsave_waiter = NULL;
+ WARN(err, "Failed to set ctx save interrupt");
+ }
+
+ trace_nvhost_channel_context_save(ch->dev->name, cur_ctx);
+}
+
+static void submit_ctxrestore(struct nvhost_job *job)
+{
+ struct nvhost_master *host = nvhost_get_host(job->ch->dev);
+ struct nvhost_channel *ch = job->ch;
+ u32 syncval;
+ struct host1x_hwctx *ctx =
+ job->hwctx ? to_host1x_hwctx(job->hwctx) : NULL;
+
+ /* First check if we have a valid context to restore */
+ if(ch->cur_ctx == job->hwctx || !job->hwctx || !job->hwctx->valid)
+ return;
+
+ /* Increment syncpt max */
+ job->syncpt_incrs += ctx->restore_incrs;
+ syncval = nvhost_syncpt_incr_max(&host->syncpt,
+ job->syncpt_id,
+ ctx->restore_incrs);
+
+ /* Send restore buffer to channel */
+ nvhost_cdma_push_gather(&ch->cdma,
+ host->memmgr,
+ ctx->restore,
+ 0,
+ nvhost_opcode_gather(ctx->restore_size),
+ ctx->restore_phys);
+
+ trace_nvhost_channel_context_restore(ch->dev->name, &ctx->hwctx);
+}
+
+static void submit_nullkickoff(struct nvhost_job *job, int user_syncpt_incrs)
+{
+ struct nvhost_channel *ch = job->ch;
+ int incr;
+ u32 op_incr;
+
+ /* push increments that correspond to nulled out commands */
+ op_incr = nvhost_opcode_imm_incr_syncpt(
+ host1x_uclass_incr_syncpt_cond_op_done_v(),
+ job->syncpt_id);
+ for (incr = 0; incr < (user_syncpt_incrs >> 1); incr++)
+ nvhost_cdma_push(&ch->cdma, op_incr, op_incr);
+ if (user_syncpt_incrs & 1)
+ nvhost_cdma_push(&ch->cdma, op_incr, NVHOST_OPCODE_NOOP);
+
+ /* for 3d, waitbase needs to be incremented after each submit */
+ if (ch->dev->class == NV_GRAPHICS_3D_CLASS_ID) {
+ u32 waitbase = to_host1x_hwctx_handler(job->hwctx->h)->waitbase;
+ nvhost_cdma_push(&ch->cdma,
+ nvhost_opcode_setclass(
+ NV_HOST1X_CLASS_ID,
+ host1x_uclass_incr_syncpt_base_r(),
+ 1),
+ nvhost_class_host_incr_syncpt_base(
+ waitbase,
+ user_syncpt_incrs));
+ }
+}
+
+static void submit_gathers(struct nvhost_job *job)
+{
+ /* push user gathers */
+ int i;
+ for (i = 0 ; i < job->num_gathers; i++) {
+ u32 op1 = nvhost_opcode_gather(job->gathers[i].words);
+ u32 op2 = job->gathers[i].mem;
+ nvhost_cdma_push_gather(&job->ch->cdma,
+ job->memmgr,
+ job->gathers[i].ref,
+ job->gathers[i].offset,
+ op1, op2);
+ }
+}
+
+static int host1x_channel_submit(struct nvhost_job *job)
+{
+ struct nvhost_channel *ch = job->ch;
+ struct nvhost_syncpt *sp = &nvhost_get_host(job->ch->dev)->syncpt;
+ u32 user_syncpt_incrs = job->syncpt_incrs;
+ u32 prev_max = 0;
+ u32 syncval;
+ int err;
+ void *completed_waiter = NULL, *ctxsave_waiter = NULL;
+ struct nvhost_driver *drv = to_nvhost_driver(ch->dev->dev.driver);
+
+ /* Bail out on timed out contexts */
+ if (job->hwctx && job->hwctx->has_timedout)
+ return -ETIMEDOUT;
+
+ /* Turn on the client module and host1x */
+ nvhost_module_busy(ch->dev);
+ if (drv->busy)
+ drv->busy(ch->dev);
+
+ /* before error checks, return current max */
+ prev_max = job->syncpt_end =
+ nvhost_syncpt_read_max(sp, job->syncpt_id);
+
+ /* get submit lock */
+ err = mutex_lock_interruptible(&ch->submitlock);
+ if (err) {
+ nvhost_module_idle(ch->dev);
+ goto error;
+ }
+
+ /* Do the needed allocations */
+ ctxsave_waiter = pre_submit_ctxsave(job, ch->cur_ctx);
+ if (IS_ERR(ctxsave_waiter)) {
+ err = PTR_ERR(ctxsave_waiter);
+ nvhost_module_idle(ch->dev);
+ mutex_unlock(&ch->submitlock);
+ goto error;
+ }
+
+ completed_waiter = nvhost_intr_alloc_waiter();
+ if (!completed_waiter) {
+ nvhost_module_idle(ch->dev);
+ mutex_unlock(&ch->submitlock);
+ err = -ENOMEM;
+ goto error;
+ }
+
+ /* begin a CDMA submit */
+ err = nvhost_cdma_begin(&ch->cdma, job);
+ if (err) {
+ mutex_unlock(&ch->submitlock);
+ nvhost_module_idle(ch->dev);
+ goto error;
+ }
+
+ if (ch->dev->serialize) {
+ /* Force serialization by inserting a host wait for the
+ * previous job to finish before this one can commence. */
+ nvhost_cdma_push(&ch->cdma,
+ nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ host1x_uclass_wait_syncpt_r(),
+ 1),
+ nvhost_class_host_wait_syncpt(job->syncpt_id,
+ nvhost_syncpt_read_max(sp,
+ job->syncpt_id)));
+ }
+
+ submit_ctxsave(job, ctxsave_waiter, ch->cur_ctx);
+ submit_ctxrestore(job);
+ ch->cur_ctx = job->hwctx;
+
+ syncval = nvhost_syncpt_incr_max(sp,
+ job->syncpt_id, user_syncpt_incrs);
+
+ job->syncpt_end = syncval;
+
+ /* add a setclass for modules that require it */
+ if (ch->dev->class)
+ nvhost_cdma_push(&ch->cdma,
+ nvhost_opcode_setclass(ch->dev->class, 0, 0),
+ NVHOST_OPCODE_NOOP);
+
+ if (job->null_kickoff)
+ submit_nullkickoff(job, user_syncpt_incrs);
+ else
+ submit_gathers(job);
+
+ sync_waitbases(ch, job->syncpt_end);
+
+ /* end CDMA submit & stash pinned hMems into sync queue */
+ nvhost_cdma_end(&ch->cdma, job);
+
+ trace_nvhost_channel_submitted(ch->dev->name,
+ prev_max, syncval);
+
+ /* schedule a submit complete interrupt */
+ err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr,
+ job->syncpt_id, syncval,
+ NVHOST_INTR_ACTION_SUBMIT_COMPLETE, ch,
+ completed_waiter,
+ NULL);
+ completed_waiter = NULL;
+ WARN(err, "Failed to set submit complete interrupt");
+
+ mutex_unlock(&ch->submitlock);
+
+ return 0;
+
+error:
+ kfree(ctxsave_waiter);
+ kfree(completed_waiter);
+ return err;
+}
+
+static int host1x_channel_read_3d_reg(
+ struct nvhost_channel *channel,
+ struct nvhost_hwctx *hwctx,
+ u32 offset,
+ u32 *value)
+{
+ struct host1x_hwctx *hwctx_to_save = NULL;
+ struct nvhost_hwctx_handler *h = hwctx->h;
+ struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);
+ bool need_restore = false;
+ u32 syncpt_incrs = 4;
+ unsigned int pending = 0;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ void *ref;
+ void *ctx_waiter, *read_waiter, *completed_waiter;
+ struct nvhost_job *job;
+ u32 syncval;
+ int err;
+
+ if (hwctx && hwctx->has_timedout)
+ return -ETIMEDOUT;
+
+ ctx_waiter = nvhost_intr_alloc_waiter();
+ read_waiter = nvhost_intr_alloc_waiter();
+ completed_waiter = nvhost_intr_alloc_waiter();
+ if (!ctx_waiter || !read_waiter || !completed_waiter) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ job = nvhost_job_alloc(channel, hwctx,
+ NULL,
+ nvhost_get_host(channel->dev)->memmgr, 0, 0);
+ if (!job) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ /* keep module powered */
+ nvhost_module_busy(channel->dev);
+
+ /* get submit lock */
+ err = mutex_lock_interruptible(&channel->submitlock);
+ if (err) {
+ nvhost_module_idle(channel->dev);
+ return err;
+ }
+
+ /* context switch */
+ if (channel->cur_ctx != hwctx) {
+ hwctx_to_save = channel->cur_ctx ?
+ to_host1x_hwctx(channel->cur_ctx) : NULL;
+ if (hwctx_to_save) {
+ syncpt_incrs += hwctx_to_save->save_incrs;
+ hwctx_to_save->hwctx.valid = true;
+ nvhost_job_get_hwctx(job, &hwctx_to_save->hwctx);
+ }
+ channel->cur_ctx = hwctx;
+ if (channel->cur_ctx && channel->cur_ctx->valid) {
+ need_restore = true;
+ syncpt_incrs += to_host1x_hwctx(channel->cur_ctx)
+ ->restore_incrs;
+ }
+ }
+
+ syncval = nvhost_syncpt_incr_max(&nvhost_get_host(channel->dev)->syncpt,
+ p->syncpt, syncpt_incrs);
+
+ job->syncpt_id = p->syncpt;
+ job->syncpt_incrs = syncpt_incrs;
+ job->syncpt_end = syncval;
+
+ /* begin a CDMA submit */
+ nvhost_cdma_begin(&channel->cdma, job);
+
+ /* push save buffer (pre-gather setup depends on unit) */
+ if (hwctx_to_save)
+ h->save_push(&hwctx_to_save->hwctx, &channel->cdma);
+
+ /* gather restore buffer */
+ if (need_restore)
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_gather(to_host1x_hwctx(channel->cur_ctx)
+ ->restore_size),
+ to_host1x_hwctx(channel->cur_ctx)->restore_phys);
+
+ /* Switch to 3D - wait for it to complete what it was doing */
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
+ nvhost_opcode_imm_incr_syncpt(
+ host1x_uclass_incr_syncpt_cond_op_done_v(),
+ p->syncpt));
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ host1x_uclass_wait_syncpt_base_r(), 1),
+ nvhost_class_host_wait_syncpt_base(p->syncpt,
+ p->waitbase, 1));
+ /* Tell 3D to send register value to FIFO */
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_nonincr(host1x_uclass_indoff_r(), 1),
+ nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ offset, false));
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_imm(host1x_uclass_inddata_r(), 0),
+ NVHOST_OPCODE_NOOP);
+ /* Increment syncpt to indicate that FIFO can be read */
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_imm_incr_syncpt(
+ host1x_uclass_incr_syncpt_cond_immediate_v(),
+ p->syncpt),
+ NVHOST_OPCODE_NOOP);
+ /* Wait for value to be read from FIFO */
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_nonincr(host1x_uclass_wait_syncpt_base_r(), 1),
+ nvhost_class_host_wait_syncpt_base(p->syncpt,
+ p->waitbase, 3));
+ /* Indicate submit complete */
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_nonincr(host1x_uclass_incr_syncpt_base_r(), 1),
+ nvhost_class_host_incr_syncpt_base(p->waitbase, 4));
+ nvhost_cdma_push(&channel->cdma,
+ NVHOST_OPCODE_NOOP,
+ nvhost_opcode_imm_incr_syncpt(
+ host1x_uclass_incr_syncpt_cond_immediate_v(),
+ p->syncpt));
+
+ /* end CDMA submit */
+ nvhost_cdma_end(&channel->cdma, job);
+ nvhost_job_put(job);
+ job = NULL;
+
+ /*
+ * schedule a context save interrupt (to drain the host FIFO
+ * if necessary, and to release the restore buffer)
+ */
+ if (hwctx_to_save) {
+ err = nvhost_intr_add_action(
+ &nvhost_get_host(channel->dev)->intr,
+ p->syncpt,
+ syncval - syncpt_incrs
+ + hwctx_to_save->save_incrs
+ - 1,
+ NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save,
+ ctx_waiter,
+ NULL);
+ ctx_waiter = NULL;
+ WARN(err, "Failed to set context save interrupt");
+ }
+
+ /* Wait for FIFO to be ready */
+ err = nvhost_intr_add_action(&nvhost_get_host(channel->dev)->intr,
+ p->syncpt, syncval - 2,
+ NVHOST_INTR_ACTION_WAKEUP, &wq,
+ read_waiter,
+ &ref);
+ read_waiter = NULL;
+ WARN(err, "Failed to set wakeup interrupt");
+ wait_event(wq,
+ nvhost_syncpt_is_expired(&nvhost_get_host(channel->dev)->syncpt,
+ p->syncpt, syncval - 2));
+ nvhost_intr_put_ref(&nvhost_get_host(channel->dev)->intr, p->syncpt,
+ ref);
+
+ /* Read the register value from FIFO */
+ err = host1x_drain_read_fifo(channel, value, 1, &pending);
+
+ /* Indicate we've read the value */
+ nvhost_syncpt_cpu_incr(&nvhost_get_host(channel->dev)->syncpt,
+ p->syncpt);
+
+ /* Schedule a submit complete interrupt */
+ err = nvhost_intr_add_action(&nvhost_get_host(channel->dev)->intr,
+ p->syncpt, syncval,
+ NVHOST_INTR_ACTION_SUBMIT_COMPLETE, channel,
+ completed_waiter, NULL);
+ completed_waiter = NULL;
+ WARN(err, "Failed to set submit complete interrupt");
+
+ mutex_unlock(&channel->submitlock);
+
+done:
+ kfree(ctx_waiter);
+ kfree(read_waiter);
+ kfree(completed_waiter);
+ return err;
+}
+
+
+static int host1x_drain_read_fifo(struct nvhost_channel *ch,
+ u32 *ptr, unsigned int count, unsigned int *pending)
+{
+ unsigned int entries = *pending;
+ unsigned long timeout = jiffies + NV_FIFO_READ_TIMEOUT;
+ void __iomem *chan_regs = ch->aperture;
+ while (count) {
+ unsigned int num;
+
+ while (!entries && time_before(jiffies, timeout)) {
+ /* query host for number of entries in fifo */
+ entries = host1x_channel_fifostat_outfentries_v(
+ readl(chan_regs + host1x_channel_fifostat_r()));
+ if (!entries)
+ cpu_relax();
+ }
+
+ /* timeout -> return error */
+ if (!entries)
+ return -EIO;
+
+ num = min(entries, count);
+ entries -= num;
+ count -= num;
+
+ while (num & ~0x3) {
+ u32 arr[4];
+ arr[0] = readl(chan_regs + host1x_channel_inddata_r());
+ arr[1] = readl(chan_regs + host1x_channel_inddata_r());
+ arr[2] = readl(chan_regs + host1x_channel_inddata_r());
+ arr[3] = readl(chan_regs + host1x_channel_inddata_r());
+ memcpy(ptr, arr, 4*sizeof(u32));
+ ptr += 4;
+ num -= 4;
+ }
+ while (num--)
+ *ptr++ = readl(chan_regs + host1x_channel_inddata_r());
+ }
+ *pending = entries;
+
+ return 0;
+}
+
+static int host1x_save_context(struct nvhost_channel *ch)
+{
+ struct nvhost_device *dev = ch->dev;
+ struct nvhost_hwctx *hwctx_to_save;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ u32 syncpt_incrs, syncpt_val;
+ int err = 0;
+ void *ref;
+ void *ctx_waiter = NULL, *wakeup_waiter = NULL;
+ struct nvhost_job *job;
+ struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+ u32 syncpt_id;
+
+ ctx_waiter = nvhost_intr_alloc_waiter();
+ wakeup_waiter = nvhost_intr_alloc_waiter();
+ if (!ctx_waiter || !wakeup_waiter) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ if (drv->busy)
+ drv->busy(dev);
+
+ mutex_lock(&ch->submitlock);
+ hwctx_to_save = ch->cur_ctx;
+ if (!hwctx_to_save) {
+ mutex_unlock(&ch->submitlock);
+ goto done;
+ }
+
+ job = nvhost_job_alloc(ch, hwctx_to_save,
+ NULL,
+ nvhost_get_host(ch->dev)->memmgr, 0, 0);
+ if (IS_ERR_OR_NULL(job)) {
+ err = PTR_ERR(job);
+ mutex_unlock(&ch->submitlock);
+ goto done;
+ }
+
+ hwctx_to_save->valid = true;
+ ch->cur_ctx = NULL;
+ syncpt_id = to_host1x_hwctx_handler(hwctx_to_save->h)->syncpt;
+
+ syncpt_incrs = to_host1x_hwctx(hwctx_to_save)->save_incrs;
+ syncpt_val = nvhost_syncpt_incr_max(&nvhost_get_host(ch->dev)->syncpt,
+ syncpt_id, syncpt_incrs);
+
+ job->syncpt_id = syncpt_id;
+ job->syncpt_incrs = syncpt_incrs;
+ job->syncpt_end = syncpt_val;
+
+ err = nvhost_cdma_begin(&ch->cdma, job);
+ if (err) {
+ mutex_unlock(&ch->submitlock);
+ goto done;
+ }
+
+ ch->ctxhandler->save_push(hwctx_to_save, &ch->cdma);
+ nvhost_cdma_end(&ch->cdma, job);
+ nvhost_job_put(job);
+ job = NULL;
+
+ err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr, syncpt_id,
+ syncpt_val - syncpt_incrs +
+ to_host1x_hwctx(hwctx_to_save)->save_thresh,
+ NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save,
+ ctx_waiter,
+ NULL);
+ ctx_waiter = NULL;
+ WARN(err, "Failed to set context save interrupt");
+
+ err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr,
+ syncpt_id, syncpt_val,
+ NVHOST_INTR_ACTION_WAKEUP, &wq,
+ wakeup_waiter,
+ &ref);
+ wakeup_waiter = NULL;
+ WARN(err, "Failed to set wakeup interrupt");
+ wait_event(wq,
+ nvhost_syncpt_is_expired(&nvhost_get_host(ch->dev)->syncpt,
+ syncpt_id, syncpt_val));
+
+ nvhost_intr_put_ref(&nvhost_get_host(ch->dev)->intr, syncpt_id, ref);
+
+ nvhost_cdma_update(&ch->cdma);
+
+ mutex_unlock(&ch->submitlock);
+
+done:
+ kfree(ctx_waiter);
+ kfree(wakeup_waiter);
+ return err;
+}
+
+static inline void __iomem *host1x_channel_aperture(void __iomem *p, int ndx)
+{
+ p += ndx * NV_HOST1X_CHANNEL_MAP_SIZE_BYTES;
+ return p;
+}
+
+static inline int host1x_hwctx_handler_init(struct nvhost_channel *ch)
+{
+ int err = 0;
+ unsigned long syncpts = ch->dev->syncpts;
+ unsigned long waitbases = ch->dev->waitbases;
+ u32 syncpt = find_first_bit(&syncpts, BITS_PER_LONG);
+ u32 waitbase = find_first_bit(&waitbases, BITS_PER_LONG);
+ struct nvhost_driver *drv = to_nvhost_driver(ch->dev->dev.driver);
+
+ if (drv->alloc_hwctx_handler) {
+ ch->ctxhandler = drv->alloc_hwctx_handler(syncpt,
+ waitbase, ch);
+ if (!ch->ctxhandler)
+ err = -ENOMEM;
+ }
+
+ return err;
+}
+
+static int host1x_channel_init(struct nvhost_channel *ch,
+ struct nvhost_master *dev, int index)
+{
+ ch->chid = index;
+ mutex_init(&ch->reflock);
+ mutex_init(&ch->submitlock);
+
+ ch->aperture = host1x_channel_aperture(dev->aperture, index);
+
+ return host1x_hwctx_handler_init(ch);
+}
+
+static const struct nvhost_channel_ops host1x_channel_ops = {
+ .init = host1x_channel_init,
+ .submit = host1x_channel_submit,
+ .read3dreg = host1x_channel_read_3d_reg,
+ .save_context = host1x_save_context,
+ .drain_read_fifo = host1x_drain_read_fifo,
+};
diff --git a/drivers/video/tegra/host/host1x/host1x_debug.c b/drivers/video/tegra/host/host1x/host1x_debug.c
new file mode 100644
index 000000000000..1c4ed684dd84
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_debug.c
@@ -0,0 +1,405 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_debug.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/mm.h>
+
+#include <linux/io.h>
+
+#include "dev.h"
+#include "debug.h"
+#include "nvhost_cdma.h"
+#include "nvhost_channel.h"
+#include "nvhost_job.h"
+#include "chip_support.h"
+#include "nvhost_memmgr.h"
+
+#define NVHOST_DEBUG_MAX_PAGE_OFFSET 102400
+
+enum {
+ NVHOST_DBG_STATE_CMD = 0,
+ NVHOST_DBG_STATE_DATA = 1,
+ NVHOST_DBG_STATE_GATHER = 2
+};
+
+static int show_channel_command(struct output *o, u32 addr, u32 val, int *count)
+{
+ unsigned mask;
+ unsigned subop;
+
+ switch (val >> 28) {
+ case 0x0:
+ mask = val & 0x3f;
+ if (mask) {
+ nvhost_debug_output(o,
+ "SETCL(class=%03x, offset=%03x, mask=%02x, [",
+ val >> 6 & 0x3ff, val >> 16 & 0xfff, mask);
+ *count = hweight8(mask);
+ return NVHOST_DBG_STATE_DATA;
+ } else {
+ nvhost_debug_output(o, "SETCL(class=%03x)\n",
+ val >> 6 & 0x3ff);
+ return NVHOST_DBG_STATE_CMD;
+ }
+
+ case 0x1:
+ nvhost_debug_output(o, "INCR(offset=%03x, [",
+ val >> 16 & 0xfff);
+ *count = val & 0xffff;
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0x2:
+ nvhost_debug_output(o, "NONINCR(offset=%03x, [",
+ val >> 16 & 0xfff);
+ *count = val & 0xffff;
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0x3:
+ mask = val & 0xffff;
+ nvhost_debug_output(o, "MASK(offset=%03x, mask=%03x, [",
+ val >> 16 & 0xfff, mask);
+ *count = hweight16(mask);
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0x4:
+ nvhost_debug_output(o, "IMM(offset=%03x, data=%03x)\n",
+ val >> 16 & 0xfff, val & 0xffff);
+ return NVHOST_DBG_STATE_CMD;
+
+ case 0x5:
+ nvhost_debug_output(o, "RESTART(offset=%08x)\n", val << 4);
+ return NVHOST_DBG_STATE_CMD;
+
+ case 0x6:
+ nvhost_debug_output(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
+ val >> 16 & 0xfff, val >> 15 & 0x1, val >> 14 & 0x1,
+ val & 0x3fff);
+ *count = val & 0x3fff; /* TODO: insert */
+ return NVHOST_DBG_STATE_GATHER;
+
+ case 0xe:
+ subop = val >> 24 & 0xf;
+ if (subop == 0)
+ nvhost_debug_output(o, "ACQUIRE_MLOCK(index=%d)\n",
+ val & 0xff);
+ else if (subop == 1)
+ nvhost_debug_output(o, "RELEASE_MLOCK(index=%d)\n",
+ val & 0xff);
+ else
+ nvhost_debug_output(o, "EXTEND_UNKNOWN(%08x)\n", val);
+ return NVHOST_DBG_STATE_CMD;
+
+ default:
+ return NVHOST_DBG_STATE_CMD;
+ }
+}
+
+static void show_channel_gather(struct output *o, u32 addr,
+ phys_addr_t phys_addr, u32 words, struct nvhost_cdma *cdma);
+
+static void show_channel_word(struct output *o, int *state, int *count,
+ u32 addr, u32 val, struct nvhost_cdma *cdma)
+{
+ static int start_count, dont_print;
+
+ switch (*state) {
+ case NVHOST_DBG_STATE_CMD:
+ if (addr)
+ nvhost_debug_output(o, "%08x: %08x:", addr, val);
+ else
+ nvhost_debug_output(o, "%08x:", val);
+
+ *state = show_channel_command(o, addr, val, count);
+ dont_print = 0;
+ start_count = *count;
+ if (*state == NVHOST_DBG_STATE_DATA && *count == 0) {
+ *state = NVHOST_DBG_STATE_CMD;
+ nvhost_debug_output(o, "])\n");
+ }
+ break;
+
+ case NVHOST_DBG_STATE_DATA:
+ (*count)--;
+ if (start_count - *count < 64)
+ nvhost_debug_output(o, "%08x%s",
+ val, *count > 0 ? ", " : "])\n");
+ else if (!dont_print && (*count > 0)) {
+ nvhost_debug_output(o, "[truncated; %d more words]\n",
+ *count);
+ dont_print = 1;
+ }
+ if (*count == 0)
+ *state = NVHOST_DBG_STATE_CMD;
+ break;
+
+ case NVHOST_DBG_STATE_GATHER:
+ *state = NVHOST_DBG_STATE_CMD;
+ nvhost_debug_output(o, "%08x]):\n", val);
+ if (cdma) {
+ show_channel_gather(o, addr, val,
+ *count, cdma);
+ }
+ break;
+ }
+}
+
+static void do_show_channel_gather(struct output *o,
+ phys_addr_t phys_addr,
+ u32 words, struct nvhost_cdma *cdma,
+ phys_addr_t pin_addr, u32 *map_addr)
+{
+ /* Map dmaget cursor to corresponding nvmap_handle */
+ u32 offset;
+ int state, count, i;
+
+ offset = phys_addr - pin_addr;
+ /*
+ * Sometimes we're given different hardware address to the same
+ * page - in these cases the offset will get an invalid number and
+ * we just have to bail out.
+ */
+ if (offset > NVHOST_DEBUG_MAX_PAGE_OFFSET) {
+ nvhost_debug_output(o, "[address mismatch]\n");
+ } else {
+ /* GATHER buffer starts always with commands */
+ state = NVHOST_DBG_STATE_CMD;
+ for (i = 0; i < words; i++)
+ show_channel_word(o, &state, &count,
+ phys_addr + i * 4,
+ *(map_addr + offset/4 + i),
+ cdma);
+ }
+}
+
+static void show_channel_gather(struct output *o, u32 addr,
+ phys_addr_t phys_addr,
+ u32 words, struct nvhost_cdma *cdma)
+{
+#if defined(CONFIG_TEGRA_NVMAP)
+ /* Map dmaget cursor to corresponding nvmap_handle */
+ struct push_buffer *pb = &cdma->push_buffer;
+ u32 cur = addr - pb->phys;
+ struct mem_mgr_handle *nvmap = &pb->client_handle[cur/8];
+ u32 *map_addr, offset;
+ phys_addr_t pin_addr;
+
+ if (!nvmap || !nvmap->handle || !nvmap->client) {
+ nvhost_debug_output(o, "[already deallocated]\n");
+ return;
+ }
+
+ map_addr = mem_op().mmap(nvmap->handle);
+ if (!map_addr) {
+ nvhost_debug_output(o, "[could not mmap]\n");
+ return;
+ }
+
+ /* Get base address from nvmap */
+ pin_addr = mem_op().pin(nvmap->client, nvmap->handle);
+ if (IS_ERR_VALUE(pin_addr)) {
+ nvhost_debug_output(o, "[couldn't pin]\n");
+ mem_op().munmap(nvmap->handle, map_addr);
+ return;
+ }
+
+ offset = phys_addr - pin_addr;
+ do_show_channel_gather(o, phys_addr, words, cdma,
+ pin_addr, map_addr);
+ mem_op().unpin(nvmap->client, nvmap->handle);
+ mem_op().munmap(nvmap->handle, map_addr);
+#endif
+}
+
+static void show_channel_gathers(struct output *o, struct nvhost_cdma *cdma)
+{
+ struct nvhost_job *job;
+
+ list_for_each_entry(job, &cdma->sync_queue, list) {
+ int i;
+ nvhost_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d,"
+ " first_get=%08x, timeout=%d, ctx=%p,"
+ " num_slots=%d, num_handles=%d\n",
+ job,
+ job->syncpt_id,
+ job->syncpt_end,
+ job->first_get,
+ job->timeout,
+ job->hwctx,
+ job->num_slots,
+ job->num_unpins);
+
+ for (i = 0; i < job->num_gathers; i++) {
+ struct nvhost_job_gather *g = &job->gathers[i];
+ u32 *mapped = mem_op().mmap(g->ref);
+ if (!mapped) {
+ nvhost_debug_output(o, "[could not mmap]\n");
+ continue;
+ }
+
+ nvhost_debug_output(o, " GATHER at %08x, %d words\n",
+ g->mem, g->words);
+
+ do_show_channel_gather(o, g->mem + g->offset,
+ g->words, cdma, g->mem, mapped);
+ mem_op().munmap(g->ref, mapped);
+ }
+ }
+}
+
+static void t20_debug_show_channel_cdma(struct nvhost_master *m,
+ struct nvhost_channel *ch, struct output *o, int chid)
+{
+ struct nvhost_channel *channel = ch;
+ struct nvhost_cdma *cdma = &channel->cdma;
+ u32 dmaput, dmaget, dmactrl;
+ u32 cbstat, cbread;
+ u32 val, base, baseval;
+
+ dmaput = readl(channel->aperture + host1x_channel_dmaput_r());
+ dmaget = readl(channel->aperture + host1x_channel_dmaget_r());
+ dmactrl = readl(channel->aperture + host1x_channel_dmactrl_r());
+ cbread = readl(m->sync_aperture + host1x_sync_cbread0_r() + 4 * chid);
+ cbstat = readl(m->sync_aperture + host1x_sync_cbstat_0_r() + 4 * chid);
+
+ nvhost_debug_output(o, "%d-%s (%d): ", chid,
+ channel->dev->name,
+ channel->dev->refcount);
+
+ if (host1x_channel_dmactrl_dmastop_v(dmactrl)
+ || !channel->cdma.push_buffer.mapped) {
+ nvhost_debug_output(o, "inactive\n\n");
+ return;
+ }
+
+ switch (cbstat) {
+ case 0x00010008:
+ nvhost_debug_output(o, "waiting on syncpt %d val %d\n",
+ cbread >> 24, cbread & 0xffffff);
+ break;
+
+ case 0x00010009:
+ base = (cbread >> 16) & 0xff;
+ baseval = readl(m->sync_aperture +
+ host1x_sync_syncpt_base_0_r() + 4 * base);
+ val = cbread & 0xffff;
+ nvhost_debug_output(o, "waiting on syncpt %d val %d "
+ "(base %d = %d; offset = %d)\n",
+ cbread >> 24, baseval + val,
+ base, baseval, val);
+ break;
+
+ default:
+ nvhost_debug_output(o,
+ "active class %02x, offset %04x, val %08x\n",
+ host1x_sync_cbstat_0_cbclass0_v(cbstat),
+ host1x_sync_cbstat_0_cboffset0_v(cbstat),
+ cbread);
+ break;
+ }
+
+ nvhost_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
+ dmaput, dmaget, dmactrl);
+ nvhost_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
+
+ show_channel_gathers(o, cdma);
+ nvhost_debug_output(o, "\n");
+}
+
+static void t20_debug_show_channel_fifo(struct nvhost_master *m,
+ struct nvhost_channel *ch, struct output *o, int chid)
+{
+ u32 val, rd_ptr, wr_ptr, start, end;
+ struct nvhost_channel *channel = ch;
+ int state, count;
+
+ nvhost_debug_output(o, "%d: fifo:\n", chid);
+
+ val = readl(channel->aperture + host1x_channel_fifostat_r());
+ nvhost_debug_output(o, "FIFOSTAT %08x\n", val);
+ if (host1x_channel_fifostat_cfempty_v(val)) {
+ nvhost_debug_output(o, "[empty]\n");
+ return;
+ }
+
+ writel(0x0, m->sync_aperture + host1x_sync_cfpeek_ctrl_r());
+ writel(host1x_sync_cfpeek_ctrl_cfpeek_ena_f(1)
+ | host1x_sync_cfpeek_ctrl_cfpeek_channr_f(chid),
+ m->sync_aperture + host1x_sync_cfpeek_ctrl_r());
+
+ val = readl(m->sync_aperture + host1x_sync_cfpeek_ptrs_r());
+ rd_ptr = host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(val);
+ wr_ptr = host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(val);
+
+ val = readl(m->sync_aperture + host1x_sync_cf0_setup_r() + 4 * chid);
+ start = host1x_sync_cf0_setup_cf0_base_v(val);
+ end = host1x_sync_cf0_setup_cf0_limit_v(val);
+
+ state = NVHOST_DBG_STATE_CMD;
+
+ do {
+ writel(0x0, m->sync_aperture + host1x_sync_cfpeek_ctrl_r());
+ writel(host1x_sync_cfpeek_ctrl_cfpeek_ena_f(1)
+ | host1x_sync_cfpeek_ctrl_cfpeek_channr_f(chid)
+ | host1x_sync_cfpeek_ctrl_cfpeek_addr_f(rd_ptr),
+ m->sync_aperture + host1x_sync_cfpeek_ctrl_r());
+ val = readl(m->sync_aperture + host1x_sync_cfpeek_read_r());
+
+ show_channel_word(o, &state, &count, 0, val, NULL);
+
+ if (rd_ptr == end)
+ rd_ptr = start;
+ else
+ rd_ptr++;
+ } while (rd_ptr != wr_ptr);
+
+ if (state == NVHOST_DBG_STATE_DATA)
+ nvhost_debug_output(o, ", ...])\n");
+ nvhost_debug_output(o, "\n");
+
+ writel(0x0, m->sync_aperture + host1x_sync_cfpeek_ctrl_r());
+}
+
+static void t20_debug_show_mlocks(struct nvhost_master *m, struct output *o)
+{
+ u32 __iomem *mlo_regs = m->sync_aperture +
+ host1x_sync_mlock_owner_0_r();
+ int i;
+
+ nvhost_debug_output(o, "---- mlocks ----\n");
+ for (i = 0; i < NV_HOST1X_NB_MLOCKS; i++) {
+ u32 owner = readl(mlo_regs + i);
+ if (host1x_sync_mlock_owner_0_mlock_ch_owns_0_v(owner))
+ nvhost_debug_output(o, "%d: locked by channel %d\n",
+ i,
+ host1x_sync_mlock_owner_0_mlock_owner_chid_0_f(
+ owner));
+ else if (host1x_sync_mlock_owner_0_mlock_cpu_owns_0_v(owner))
+ nvhost_debug_output(o, "%d: locked by cpu\n", i);
+ else
+ nvhost_debug_output(o, "%d: unlocked\n", i);
+ }
+ nvhost_debug_output(o, "\n");
+}
+
+static const struct nvhost_debug_ops host1x_debug_ops = {
+ .show_channel_cdma = t20_debug_show_channel_cdma,
+ .show_channel_fifo = t20_debug_show_channel_fifo,
+ .show_mlocks = t20_debug_show_mlocks,
+};
diff --git a/drivers/video/tegra/host/host1x/host1x_hwctx.h b/drivers/video/tegra/host/host1x/host1x_hwctx.h
new file mode 100644
index 000000000000..13f0071d1e33
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_hwctx.h
@@ -0,0 +1,66 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_hwctx.h
+ *
+ * Tegra Graphics Host HOST1X Hardware Context Interface
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_HOST1X_HWCTX_H
+#define __NVHOST_HOST1X_HWCTX_H
+
+#include <linux/kref.h>
+#include "nvhost_hwctx.h"
+
+struct nvhost_hwctx_handler;
+struct nvhost_channel;
+
+#define to_host1x_hwctx_handler(handler) \
+ container_of((handler), struct host1x_hwctx_handler, h)
+#define to_host1x_hwctx(h) container_of((h), struct host1x_hwctx, hwctx)
+#define host1x_hwctx_handler(_hwctx) to_host1x_hwctx_handler((_hwctx)->hwctx.h)
+
+struct host1x_hwctx {
+ struct nvhost_hwctx hwctx;
+
+ u32 save_incrs;
+ u32 save_thresh;
+ u32 save_slots;
+
+ struct mem_handle *restore;
+ u32 *restore_virt;
+ phys_addr_t restore_phys;
+ u32 restore_size;
+ u32 restore_incrs;
+};
+
+struct host1x_hwctx_handler {
+ struct nvhost_hwctx_handler h;
+
+ u32 syncpt;
+ u32 waitbase;
+ u32 restore_size;
+ u32 restore_incrs;
+ struct mem_handle *save_buf;
+ u32 save_incrs;
+ u32 save_thresh;
+ u32 save_slots;
+ phys_addr_t save_phys;
+ u32 save_size;
+};
+
+#endif
diff --git a/drivers/video/tegra/host/host1x/host1x_intr.c b/drivers/video/tegra/host/host1x/host1x_intr.c
new file mode 100644
index 000000000000..facb818a0c24
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_intr.c
@@ -0,0 +1,294 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_intr.c
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <asm/mach/irq.h>
+
+#include "nvhost_intr.h"
+#include "dev.h"
+
+/* Spacing between sync registers */
+#define REGISTER_STRIDE 4
+
+/*** HW host sync management ***/
+
+static void syncpt_thresh_mask(struct irq_data *data)
+{
+ (void)data;
+}
+
+static void syncpt_thresh_unmask(struct irq_data *data)
+{
+ (void)data;
+}
+
+static void syncpt_thresh_cascade(unsigned int irq, struct irq_desc *desc)
+{
+ struct nvhost_master *dev = irq_desc_get_handler_data(desc);
+ void __iomem *sync_regs = dev->sync_aperture;
+ unsigned long reg;
+ int i, id;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ for (i = 0; i < dev->info.nb_pts / BITS_PER_LONG; i++) {
+ reg = readl(sync_regs +
+ host1x_sync_syncpt_thresh_cpu0_int_status_r() +
+ i * REGISTER_STRIDE);
+ for_each_set_bit(id, &reg, BITS_PER_LONG)
+ generic_handle_irq(id +
+ dev->intr.host_syncpt_irq_base +
+ i * BITS_PER_LONG);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip syncpt_thresh_irq = {
+ .name = "syncpt",
+ .irq_mask = syncpt_thresh_mask,
+ .irq_unmask = syncpt_thresh_unmask
+};
+
+static void t20_intr_init_host_sync(struct nvhost_intr *intr)
+{
+ struct nvhost_master *dev = intr_to_dev(intr);
+ void __iomem *sync_regs = dev->sync_aperture;
+ int i, irq;
+
+ writel(0xffffffffUL,
+ sync_regs + host1x_sync_syncpt_thresh_int_disable_r());
+ writel(0xffffffffUL,
+ sync_regs + host1x_sync_syncpt_thresh_cpu0_int_status_r());
+
+ for (i = 0; i < dev->info.nb_pts; i++) {
+ irq = intr->host_syncpt_irq_base + i;
+ irq_set_chip_and_handler(irq, &syncpt_thresh_irq,
+ handle_simple_irq);
+ irq_set_chip_data(irq, sync_regs);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+ irq_set_chained_handler(INT_HOST1X_MPCORE_SYNCPT,
+ syncpt_thresh_cascade);
+ irq_set_handler_data(INT_HOST1X_MPCORE_SYNCPT, dev);
+ /* disable the ip_busy_timeout. this prevents write drops, etc.
+ * there's no real way to recover from a hung client anyway.
+ */
+ writel(0, sync_regs + host1x_sync_ip_busy_timeout_r());
+
+ /* increase the auto-ack timout to the maximum value. 2d will hang
+ * otherwise on ap20.
+ */
+ writel(0xff, sync_regs + host1x_sync_ctxsw_timeout_cfg_r());
+}
+
+static void t20_intr_set_host_clocks_per_usec(struct nvhost_intr *intr, u32 cpm)
+{
+ struct nvhost_master *dev = intr_to_dev(intr);
+ void __iomem *sync_regs = dev->sync_aperture;
+ /* write microsecond clock register */
+ writel(cpm, sync_regs + host1x_sync_usec_clk_r());
+}
+
+static void t20_intr_set_syncpt_threshold(struct nvhost_intr *intr,
+ u32 id, u32 thresh)
+{
+ struct nvhost_master *dev = intr_to_dev(intr);
+ void __iomem *sync_regs = dev->sync_aperture;
+ thresh &= 0xffff;
+ writel(thresh, sync_regs +
+ (host1x_sync_syncpt_int_thresh_0_r() + id * REGISTER_STRIDE));
+}
+
+static void t20_intr_enable_syncpt_intr(struct nvhost_intr *intr, u32 id)
+{
+ struct nvhost_master *dev = intr_to_dev(intr);
+ void __iomem *sync_regs = dev->sync_aperture;
+
+ writel(BIT_MASK(id), sync_regs +
+ host1x_sync_syncpt_thresh_int_enable_cpu0_r() +
+ BIT_WORD(id) * REGISTER_STRIDE);
+}
+
+static void t20_intr_disable_syncpt_intr(struct nvhost_intr *intr, u32 id)
+{
+ struct nvhost_master *dev = intr_to_dev(intr);
+ void __iomem *sync_regs = dev->sync_aperture;
+
+ writel(BIT_MASK(id), sync_regs +
+ host1x_sync_syncpt_thresh_int_disable_r() +
+ BIT_WORD(id) * REGISTER_STRIDE);
+}
+
+static void t20_intr_disable_all_syncpt_intrs(struct nvhost_intr *intr)
+{
+ struct nvhost_master *dev = intr_to_dev(intr);
+ void __iomem *sync_regs = dev->sync_aperture;
+ u32 reg;
+
+ for (reg = 0; reg <= BIT_WORD(dev->info.nb_pts) * REGISTER_STRIDE;
+ reg += REGISTER_STRIDE) {
+ /* disable interrupts for both cpu's */
+ writel(0xffffffffu, sync_regs +
+ host1x_sync_syncpt_thresh_int_disable_r() +
+ reg);
+
+ /* clear status for both cpu's */
+ writel(0xffffffffu, sync_regs +
+ host1x_sync_syncpt_thresh_cpu0_int_status_r() + reg);
+ writel(0xffffffffu, sync_regs +
+ host1x_sync_syncpt_thresh_cpu1_int_status_r() + reg);
+ }
+}
+
+/**
+ * Sync point threshold interrupt service function
+ * Handles sync point threshold triggers, in interrupt context
+ */
+static irqreturn_t t20_intr_syncpt_thresh_isr(int irq, void *dev_id)
+{
+ struct nvhost_intr_syncpt *syncpt = dev_id;
+ unsigned int id = syncpt->id;
+ struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt);
+
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+
+ u32 reg = BIT_WORD(id) * REGISTER_STRIDE;
+
+ writel(BIT_MASK(id), sync_regs +
+ host1x_sync_syncpt_thresh_int_disable_r() + reg);
+ writel(BIT_MASK(id), sync_regs +
+ host1x_sync_syncpt_thresh_cpu0_int_status_r() + reg);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * Host general interrupt service function
+ * Handles read / write failures
+ */
+static irqreturn_t t20_intr_host1x_isr(int irq, void *dev_id)
+{
+ struct nvhost_intr *intr = dev_id;
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+ u32 stat;
+ u32 ext_stat;
+ u32 addr;
+
+ stat = readl(sync_regs + host1x_sync_hintstatus_r());
+ ext_stat = readl(sync_regs + host1x_sync_hintstatus_ext_r());
+
+ if (host1x_sync_hintstatus_ext_ip_read_int_v(ext_stat)) {
+ addr = readl(sync_regs + host1x_sync_ip_read_timeout_addr_r());
+ pr_err("Host read timeout at address %x\n", addr);
+ }
+
+ if (host1x_sync_hintstatus_ext_ip_write_int_v(ext_stat)) {
+ addr = readl(sync_regs + host1x_sync_ip_write_timeout_addr_r());
+ pr_err("Host write timeout at address %x\n", addr);
+ }
+
+ writel(ext_stat, sync_regs + host1x_sync_hintstatus_ext_r());
+ writel(stat, sync_regs + host1x_sync_hintstatus_r());
+
+ return IRQ_HANDLED;
+}
+static int t20_intr_request_host_general_irq(struct nvhost_intr *intr)
+{
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+ int err;
+
+ if (intr->host_general_irq_requested)
+ return 0;
+
+ /* master disable for general (not syncpt) host interrupts */
+ writel(0, sync_regs + host1x_sync_intmask_r());
+
+ /* clear status & extstatus */
+ writel(0xfffffffful, sync_regs + host1x_sync_hintstatus_ext_r());
+ writel(0xfffffffful, sync_regs + host1x_sync_hintstatus_r());
+
+ err = request_irq(intr->host_general_irq, t20_intr_host1x_isr, 0,
+ "host_status", intr);
+ if (err)
+ return err;
+
+ /* enable extra interrupt sources IP_READ_INT and IP_WRITE_INT */
+ writel(BIT(30) | BIT(31), sync_regs + host1x_sync_hintmask_ext_r());
+
+ /* enable extra interrupt sources */
+ writel(BIT(31), sync_regs + host1x_sync_hintmask_r());
+
+ /* enable host module interrupt to CPU0 */
+ writel(BIT(0), sync_regs + host1x_sync_intc0mask_r());
+
+ /* master enable for general (not syncpt) host interrupts */
+ writel(BIT(0), sync_regs + host1x_sync_intmask_r());
+
+ intr->host_general_irq_requested = true;
+
+ return err;
+}
+
+static void t20_intr_free_host_general_irq(struct nvhost_intr *intr)
+{
+ if (intr->host_general_irq_requested) {
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+
+ /* master disable for general (not syncpt) host interrupts */
+ writel(0, sync_regs + host1x_sync_intmask_r());
+
+ free_irq(intr->host_general_irq, intr);
+ intr->host_general_irq_requested = false;
+ }
+}
+
+static int t20_request_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
+{
+ int err;
+ if (syncpt->irq_requested)
+ return 0;
+
+ err = request_threaded_irq(syncpt->irq,
+ t20_intr_syncpt_thresh_isr,
+ nvhost_syncpt_thresh_fn,
+ 0, syncpt->thresh_irq_name, syncpt);
+ if (err)
+ return err;
+
+ syncpt->irq_requested = 1;
+ return 0;
+}
+
+static const struct nvhost_intr_ops host1x_intr_ops = {
+ .init_host_sync = t20_intr_init_host_sync,
+ .set_host_clocks_per_usec = t20_intr_set_host_clocks_per_usec,
+ .set_syncpt_threshold = t20_intr_set_syncpt_threshold,
+ .enable_syncpt_intr = t20_intr_enable_syncpt_intr,
+ .disable_syncpt_intr = t20_intr_disable_syncpt_intr,
+ .disable_all_syncpt_intrs = t20_intr_disable_all_syncpt_intrs,
+ .request_host_general_irq = t20_intr_request_host_general_irq,
+ .free_host_general_irq = t20_intr_free_host_general_irq,
+ .request_syncpt_irq = t20_request_syncpt_irq,
+};
diff --git a/drivers/video/tegra/host/host1x/host1x_syncpt.c b/drivers/video/tegra/host/host1x/host1x_syncpt.c
new file mode 100644
index 000000000000..8cca9dbbbc08
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_syncpt.c
@@ -0,0 +1,180 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_syncpt.c
+ *
+ * Tegra Graphics Host Syncpoints for HOST1X
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/nvhost_ioctl.h>
+#include <linux/io.h>
+#include <trace/events/nvhost.h>
+#include "nvhost_syncpt.h"
+#include "nvhost_acm.h"
+#include "host1x.h"
+#include "chip_support.h"
+
+/**
+ * Write the current syncpoint value back to hw.
+ */
+static void t20_syncpt_reset(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ int min = nvhost_syncpt_read_min(sp, id);
+ writel(min, dev->sync_aperture + (host1x_sync_syncpt_0_r() + id * 4));
+}
+
+/**
+ * Write the current waitbase value back to hw.
+ */
+static void t20_syncpt_reset_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ writel(sp->base_val[id],
+ dev->sync_aperture + (host1x_sync_syncpt_base_0_r() + id * 4));
+}
+
+/**
+ * Read waitbase value from hw.
+ */
+static void t20_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ sp->base_val[id] = readl(dev->sync_aperture +
+ (host1x_sync_syncpt_base_0_r() + id * 4));
+}
+
+/**
+ * Updates the last value read from hardware.
+ * (was nvhost_syncpt_update_min)
+ */
+static u32 t20_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ void __iomem *sync_regs = dev->sync_aperture;
+ u32 old, live;
+
+ do {
+ old = nvhost_syncpt_read_min(sp, id);
+ live = readl(sync_regs + (host1x_sync_syncpt_0_r() + id * 4));
+ } while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old);
+
+ if (!nvhost_syncpt_check_max(sp, id, live))
+ dev_err(&syncpt_to_dev(sp)->dev->dev,
+ "%s failed: id=%u, min=%d, max=%d\n",
+ __func__,
+ nvhost_syncpt_read_min(sp, id),
+ nvhost_syncpt_read_max(sp, id),
+ id);
+
+ return live;
+}
+
+/**
+ * Write a cpu syncpoint increment to the hardware, without touching
+ * the cache. Caller is responsible for host being powered.
+ */
+static void t20_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ u32 reg_offset = id / 32;
+
+ BUG_ON(!nvhost_module_powered(dev->dev));
+ if (!nvhost_syncpt_client_managed(sp, id)
+ && nvhost_syncpt_min_eq_max(sp, id)) {
+ dev_err(&syncpt_to_dev(sp)->dev->dev,
+ "Trying to increment syncpoint id %d beyond max\n",
+ id);
+ nvhost_debug_dump(syncpt_to_dev(sp));
+ return;
+ }
+ writel(BIT_MASK(id), dev->sync_aperture +
+ host1x_sync_syncpt_cpu_incr_r() + reg_offset * 4);
+ wmb();
+}
+
+/* remove a wait pointed to by patch_addr */
+static int host1x_syncpt_patch_wait(struct nvhost_syncpt *sp,
+ void *patch_addr)
+{
+ u32 override = nvhost_class_host_wait_syncpt(
+ NVSYNCPT_GRAPHICS_HOST, 0);
+ __raw_writel(override, patch_addr);
+ return 0;
+}
+
+
+static const char *t20_syncpt_name(struct nvhost_syncpt *sp, u32 id)
+{
+ struct host1x_device_info *info = &syncpt_to_dev(sp)->info;
+ return (id >= info->nb_pts) ? NULL : info->syncpt_names[id];
+}
+
+static void t20_syncpt_debug(struct nvhost_syncpt *sp)
+{
+ u32 i;
+ for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) {
+ u32 max = nvhost_syncpt_read_max(sp, i);
+ u32 min = nvhost_syncpt_update_min(sp, i);
+ if (!max && !min)
+ continue;
+ dev_info(&syncpt_to_dev(sp)->dev->dev,
+ "id %d (%s) min %d max %d\n",
+ i, syncpt_op().name(sp, i),
+ min, max);
+
+ }
+
+ for (i = 0; i < nvhost_syncpt_nb_bases(sp); i++) {
+ u32 base_val;
+ t20_syncpt_read_wait_base(sp, i);
+ base_val = sp->base_val[i];
+ if (base_val)
+ dev_info(&syncpt_to_dev(sp)->dev->dev,
+ "waitbase id %d val %d\n",
+ i, base_val);
+
+ }
+}
+
+static int syncpt_mutex_try_lock(struct nvhost_syncpt *sp,
+ unsigned int idx)
+{
+ void __iomem *sync_regs = syncpt_to_dev(sp)->sync_aperture;
+ /* mlock registers returns 0 when the lock is aquired.
+ * writing 0 clears the lock. */
+ return !!readl(sync_regs + (host1x_sync_mlock_0_r() + idx * 4));
+}
+
+static void syncpt_mutex_unlock(struct nvhost_syncpt *sp,
+ unsigned int idx)
+{
+ void __iomem *sync_regs = syncpt_to_dev(sp)->sync_aperture;
+
+ writel(0, sync_regs + (host1x_sync_mlock_0_r() + idx * 4));
+}
+
+static const struct nvhost_syncpt_ops host1x_syncpt_ops = {
+ .reset = t20_syncpt_reset,
+ .reset_wait_base = t20_syncpt_reset_wait_base,
+ .read_wait_base = t20_syncpt_read_wait_base,
+ .update_min = t20_syncpt_update_min,
+ .cpu_incr = t20_syncpt_cpu_incr,
+ .patch_wait = host1x_syncpt_patch_wait,
+ .debug = t20_syncpt_debug,
+ .name = t20_syncpt_name,
+ .mutex_try_lock = syncpt_mutex_try_lock,
+ .mutex_unlock = syncpt_mutex_unlock,
+};
diff --git a/drivers/video/tegra/host/host1x/host1x_syncpt.h b/drivers/video/tegra/host/host1x/host1x_syncpt.h
new file mode 100644
index 000000000000..a971db8b1d94
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_syncpt.h
@@ -0,0 +1,62 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_syncpt.h
+ *
+ * Tegra Graphics Host Syncpoints for HOST1X
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_HOST1X_HOST1X_SYNCPT_H
+#define __NVHOST_HOST1X_HOST1X_SYNCPT_H
+
+/* FIXME:
+ * Sync point ids are now split into 2 files.
+ * 1 if this one and other is in include/linux/nvhost.h
+ * So if someone decides to add new sync point in future
+ * please check both the header files
+ */
+#define NVSYNCPT_CSI_VI_0 (11)
+#define NVSYNCPT_CSI_VI_1 (12)
+#define NVSYNCPT_VI_ISP_0 (13)
+#define NVSYNCPT_VI_ISP_1 (14)
+#define NVSYNCPT_VI_ISP_2 (15)
+#define NVSYNCPT_VI_ISP_3 (16)
+#define NVSYNCPT_VI_ISP_4 (17)
+#define NVSYNCPT_2D_0 (18)
+#define NVSYNCPT_2D_1 (19)
+#define NVSYNCPT_3D (22)
+#define NVSYNCPT_MPE (23)
+#define NVSYNCPT_MPE_EBM_EOF (28)
+#define NVSYNCPT_MPE_WR_SAFE (29)
+
+/* sync points that are wholly managed by the client */
+#define NVSYNCPTS_CLIENT_MANAGED ( \
+ BIT(NVSYNCPT_DISP0_A) | BIT(NVSYNCPT_DISP1_A) | \
+ BIT(NVSYNCPT_DISP0_B) | BIT(NVSYNCPT_DISP1_B) | \
+ BIT(NVSYNCPT_DISP0_C) | BIT(NVSYNCPT_DISP1_C) | \
+ BIT(NVSYNCPT_DSI) | \
+ BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1) | \
+ BIT(NVSYNCPT_CSI_VI_0) | BIT(NVSYNCPT_CSI_VI_1) | \
+ BIT(NVSYNCPT_VI_ISP_1) | BIT(NVSYNCPT_VI_ISP_2) | \
+ BIT(NVSYNCPT_VI_ISP_3) | BIT(NVSYNCPT_VI_ISP_4) | \
+ BIT(NVSYNCPT_MPE_EBM_EOF) | BIT(NVSYNCPT_MPE_WR_SAFE) | \
+ BIT(NVSYNCPT_2D_1) | BIT(NVSYNCPT_AVP_0))
+
+#define NVWAITBASE_2D_0 (1)
+#define NVWAITBASE_2D_1 (2)
+#define NVWAITBASE_3D (3)
+#define NVWAITBASE_MPE (4)
+
+#endif
diff --git a/drivers/video/tegra/host/host1x/hw_host1x01_channel.h b/drivers/video/tegra/host/host1x/hw_host1x01_channel.h
new file mode 100644
index 000000000000..ca2f9a0778cd
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/hw_host1x01_channel.h
@@ -0,0 +1,182 @@
+/*
+ * drivers/video/tegra/host/host1x/hw_host1x_channel_host1x.h
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef __hw_host1x_channel_host1x_h__
+#define __hw_host1x_channel_host1x_h__
+/*This file is autogenerated. Do not edit. */
+
+static inline u32 host1x_channel_fifostat_r(void)
+{
+ return 0x0;
+}
+static inline u32 host1x_channel_fifostat_cfempty_s(void)
+{
+ return 1;
+}
+static inline u32 host1x_channel_fifostat_cfempty_f(u32 v)
+{
+ return (v & 0x1) << 10;
+}
+static inline u32 host1x_channel_fifostat_cfempty_m(void)
+{
+ return 0x1 << 10;
+}
+static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
+{
+ return (r >> 10) & 0x1;
+}
+static inline u32 host1x_channel_fifostat_cfempty_notempty_v(void)
+{
+ return 0;
+}
+static inline u32 host1x_channel_fifostat_cfempty_empty_v(void)
+{
+ return 1;
+}
+static inline u32 host1x_channel_fifostat_outfentries_s(void)
+{
+ return 5;
+}
+static inline u32 host1x_channel_fifostat_outfentries_f(u32 v)
+{
+ return (v & 0x1f) << 24;
+}
+static inline u32 host1x_channel_fifostat_outfentries_m(void)
+{
+ return 0x1f << 24;
+}
+static inline u32 host1x_channel_fifostat_outfentries_v(u32 r)
+{
+ return (r >> 24) & 0x1f;
+}
+static inline u32 host1x_channel_inddata_r(void)
+{
+ return 0xc;
+}
+static inline u32 host1x_channel_dmastart_r(void)
+{
+ return 0x14;
+}
+static inline u32 host1x_channel_dmaput_r(void)
+{
+ return 0x18;
+}
+static inline u32 host1x_channel_dmaget_r(void)
+{
+ return 0x1c;
+}
+static inline u32 host1x_channel_dmaend_r(void)
+{
+ return 0x20;
+}
+static inline u32 host1x_channel_dmactrl_r(void)
+{
+ return 0x24;
+}
+static inline u32 host1x_channel_dmactrl_dmastop_s(void)
+{
+ return 1;
+}
+static inline u32 host1x_channel_dmactrl_dmastop_f(u32 v)
+{
+ return (v & 0x1) << 0;
+}
+static inline u32 host1x_channel_dmactrl_dmastop_m(void)
+{
+ return 0x1 << 0;
+}
+static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
+{
+ return (r >> 0) & 0x1;
+}
+static inline u32 host1x_channel_dmactrl_dmastop_run_v(void)
+{
+ return 0;
+}
+static inline u32 host1x_channel_dmactrl_dmastop_stop_v(void)
+{
+ return 1;
+}
+static inline u32 host1x_channel_dmactrl_dmagetrst_s(void)
+{
+ return 1;
+}
+static inline u32 host1x_channel_dmactrl_dmagetrst_f(u32 v)
+{
+ return (v & 0x1) << 1;
+}
+static inline u32 host1x_channel_dmactrl_dmagetrst_m(void)
+{
+ return 0x1 << 1;
+}
+static inline u32 host1x_channel_dmactrl_dmagetrst_v(u32 r)
+{
+ return (r >> 1) & 0x1;
+}
+static inline u32 host1x_channel_dmactrl_dmainitget_s(void)
+{
+ return 1;
+}
+static inline u32 host1x_channel_dmactrl_dmainitget_f(u32 v)
+{
+ return (v & 0x1) << 2;
+}
+static inline u32 host1x_channel_dmactrl_dmainitget_m(void)
+{
+ return 0x1 << 2;
+}
+static inline u32 host1x_channel_dmactrl_dmainitget_v(u32 r)
+{
+ return (r >> 2) & 0x1;
+}
+
+#endif /* __hw_host1x_channel_host1x_h__ */
diff --git a/drivers/video/tegra/host/host1x/hw_host1x01_sync.h b/drivers/video/tegra/host/host1x/hw_host1x01_sync.h
new file mode 100644
index 000000000000..67f0cbfb85b9
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/hw_host1x01_sync.h
@@ -0,0 +1,398 @@
+/*
+ * drivers/video/tegra/host/host1x/hw_host1x_sync_host1x.h
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef __hw_host1x_sync_host1x_h__
+#define __hw_host1x_sync_host1x_h__
+/*This file is autogenerated. Do not edit. */
+
+static inline u32 host1x_sync_intmask_r(void)
+{
+ return 0x4;
+}
+static inline u32 host1x_sync_intc0mask_r(void)
+{
+ return 0x8;
+}
+static inline u32 host1x_sync_hintstatus_r(void)
+{
+ return 0x20;
+}
+static inline u32 host1x_sync_hintmask_r(void)
+{
+ return 0x24;
+}
+static inline u32 host1x_sync_hintstatus_ext_r(void)
+{
+ return 0x28;
+}
+static inline u32 host1x_sync_hintstatus_ext_ip_read_int_s(void)
+{
+ return 1;
+}
+static inline u32 host1x_sync_hintstatus_ext_ip_read_int_f(u32 v)
+{
+ return (v & 0x1) << 30;
+}
+static inline u32 host1x_sync_hintstatus_ext_ip_read_int_m(void)
+{
+ return 0x1 << 30;
+}
+static inline u32 host1x_sync_hintstatus_ext_ip_read_int_v(u32 r)
+{
+ return (r >> 30) & 0x1;
+}
+static inline u32 host1x_sync_hintstatus_ext_ip_write_int_s(void)
+{
+ return 1;
+}
+static inline u32 host1x_sync_hintstatus_ext_ip_write_int_f(u32 v)
+{
+ return (v & 0x1) << 31;
+}
+static inline u32 host1x_sync_hintstatus_ext_ip_write_int_m(void)
+{
+ return 0x1 << 31;
+}
+static inline u32 host1x_sync_hintstatus_ext_ip_write_int_v(u32 r)
+{
+ return (r >> 31) & 0x1;
+}
+static inline u32 host1x_sync_hintmask_ext_r(void)
+{
+ return 0x2c;
+}
+static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(void)
+{
+ return 0x40;
+}
+static inline u32 host1x_sync_syncpt_thresh_cpu1_int_status_r(void)
+{
+ return 0x48;
+}
+static inline u32 host1x_sync_syncpt_thresh_int_disable_r(void)
+{
+ return 0x60;
+}
+static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(void)
+{
+ return 0x68;
+}
+static inline u32 host1x_sync_cf0_setup_r(void)
+{
+ return 0x80;
+}
+static inline u32 host1x_sync_cf0_setup_cf0_base_s(void)
+{
+ return 9;
+}
+static inline u32 host1x_sync_cf0_setup_cf0_base_f(u32 v)
+{
+ return (v & 0x1ff) << 0;
+}
+static inline u32 host1x_sync_cf0_setup_cf0_base_m(void)
+{
+ return 0x1ff << 0;
+}
+static inline u32 host1x_sync_cf0_setup_cf0_base_v(u32 r)
+{
+ return (r >> 0) & 0x1ff;
+}
+static inline u32 host1x_sync_cf0_setup_cf0_limit_s(void)
+{
+ return 9;
+}
+static inline u32 host1x_sync_cf0_setup_cf0_limit_f(u32 v)
+{
+ return (v & 0x1ff) << 16;
+}
+static inline u32 host1x_sync_cf0_setup_cf0_limit_m(void)
+{
+ return 0x1ff << 16;
+}
+static inline u32 host1x_sync_cf0_setup_cf0_limit_v(u32 r)
+{
+ return (r >> 16) & 0x1ff;
+}
+static inline u32 host1x_sync_cmdproc_stop_r(void)
+{
+ return 0xac;
+}
+static inline u32 host1x_sync_ch_teardown_r(void)
+{
+ return 0xb0;
+}
+static inline u32 host1x_sync_usec_clk_r(void)
+{
+ return 0x1a4;
+}
+static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
+{
+ return 0x1a8;
+}
+static inline u32 host1x_sync_ip_busy_timeout_r(void)
+{
+ return 0x1bc;
+}
+static inline u32 host1x_sync_ip_read_timeout_addr_r(void)
+{
+ return 0x1c0;
+}
+static inline u32 host1x_sync_ip_write_timeout_addr_r(void)
+{
+ return 0x1c4;
+}
+static inline u32 host1x_sync_mlock_0_r(void)
+{
+ return 0x2c0;
+}
+static inline u32 host1x_sync_mlock_owner_0_r(void)
+{
+ return 0x340;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_owner_chid_0_s(void)
+{
+ return 4;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_owner_chid_0_f(u32 v)
+{
+ return (v & 0xf) << 8;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_owner_chid_0_m(void)
+{
+ return 0xf << 8;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_owner_chid_0_v(u32 r)
+{
+ return (r >> 8) & 0xf;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_cpu_owns_0_s(void)
+{
+ return 1;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_cpu_owns_0_f(u32 v)
+{
+ return (v & 0x1) << 1;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_cpu_owns_0_m(void)
+{
+ return 0x1 << 1;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_cpu_owns_0_v(u32 r)
+{
+ return (r >> 1) & 0x1;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_ch_owns_0_s(void)
+{
+ return 1;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_ch_owns_0_f(u32 v)
+{
+ return (v & 0x1) << 0;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_ch_owns_0_m(void)
+{
+ return 0x1 << 0;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_ch_owns_0_v(u32 r)
+{
+ return (r >> 0) & 0x1;
+}
+static inline u32 host1x_sync_syncpt_0_r(void)
+{
+ return 0x400;
+}
+static inline u32 host1x_sync_syncpt_int_thresh_0_r(void)
+{
+ return 0x500;
+}
+static inline u32 host1x_sync_syncpt_base_0_r(void)
+{
+ return 0x600;
+}
+static inline u32 host1x_sync_syncpt_cpu_incr_r(void)
+{
+ return 0x700;
+}
+static inline u32 host1x_sync_cbread0_r(void)
+{
+ return 0x720;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_r(void)
+{
+ return 0x74c;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_addr_s(void)
+{
+ return 9;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_addr_f(u32 v)
+{
+ return (v & 0x1ff) << 0;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_addr_m(void)
+{
+ return 0x1ff << 0;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_addr_v(u32 r)
+{
+ return (r >> 0) & 0x1ff;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_channr_s(void)
+{
+ return 3;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_channr_f(u32 v)
+{
+ return (v & 0x7) << 16;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_channr_m(void)
+{
+ return 0x7 << 16;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_channr_v(u32 r)
+{
+ return (r >> 16) & 0x7;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_ena_s(void)
+{
+ return 1;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_ena_f(u32 v)
+{
+ return (v & 0x1) << 31;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_ena_m(void)
+{
+ return 0x1 << 31;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_ena_v(u32 r)
+{
+ return (r >> 31) & 0x1;
+}
+static inline u32 host1x_sync_cfpeek_read_r(void)
+{
+ return 0x750;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_r(void)
+{
+ return 0x754;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_s(void)
+{
+ return 9;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_f(u32 v)
+{
+ return (v & 0x1ff) << 0;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_m(void)
+{
+ return 0x1ff << 0;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
+{
+ return (r >> 0) & 0x1ff;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_s(void)
+{
+ return 9;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_f(u32 v)
+{
+ return (v & 0x1ff) << 16;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_m(void)
+{
+ return 0x1ff << 16;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
+{
+ return (r >> 16) & 0x1ff;
+}
+static inline u32 host1x_sync_cbstat_0_r(void)
+{
+ return 0x758;
+}
+static inline u32 host1x_sync_cbstat_0_cboffset0_s(void)
+{
+ return 16;
+}
+static inline u32 host1x_sync_cbstat_0_cboffset0_f(u32 v)
+{
+ return (v & 0xffff) << 0;
+}
+static inline u32 host1x_sync_cbstat_0_cboffset0_m(void)
+{
+ return 0xffff << 0;
+}
+static inline u32 host1x_sync_cbstat_0_cboffset0_v(u32 r)
+{
+ return (r >> 0) & 0xffff;
+}
+static inline u32 host1x_sync_cbstat_0_cbclass0_s(void)
+{
+ return 10;
+}
+static inline u32 host1x_sync_cbstat_0_cbclass0_f(u32 v)
+{
+ return (v & 0x3ff) << 16;
+}
+static inline u32 host1x_sync_cbstat_0_cbclass0_m(void)
+{
+ return 0x3ff << 16;
+}
+static inline u32 host1x_sync_cbstat_0_cbclass0_v(u32 r)
+{
+ return (r >> 16) & 0x3ff;
+}
+
+#endif /* __hw_host1x_sync_host1x_h__ */
diff --git a/drivers/video/tegra/host/host1x/hw_host1x01_uclass.h b/drivers/video/tegra/host/host1x/hw_host1x01_uclass.h
new file mode 100644
index 000000000000..ed6e4b706ab9
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/hw_host1x01_uclass.h
@@ -0,0 +1,474 @@
+/*
+ * drivers/video/tegra/host/host1x/hw_host1x_uclass_host1x.h
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef __hw_host1x_uclass_host1x_h__
+#define __hw_host1x_uclass_host1x_h__
+/*This file is autogenerated. Do not edit. */
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+ return 0x0;
+}
+static inline u32 host1x_uclass_incr_syncpt_cond_s(void)
+{
+ return 8;
+}
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+ return (v & 0xff) << 8;
+}
+static inline u32 host1x_uclass_incr_syncpt_cond_m(void)
+{
+ return 0xff << 8;
+}
+static inline u32 host1x_uclass_incr_syncpt_cond_v(u32 r)
+{
+ return (r >> 8) & 0xff;
+}
+static inline u32 host1x_uclass_incr_syncpt_cond_immediate_v(void)
+{
+ return 0;
+}
+static inline u32 host1x_uclass_incr_syncpt_cond_op_done_v(void)
+{
+ return 1;
+}
+static inline u32 host1x_uclass_incr_syncpt_cond_rd_done_v(void)
+{
+ return 2;
+}
+static inline u32 host1x_uclass_incr_syncpt_cond_reg_wr_safe_v(void)
+{
+ return 3;
+}
+static inline u32 host1x_uclass_incr_syncpt_indx_s(void)
+{
+ return 8;
+}
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 0;
+}
+static inline u32 host1x_uclass_incr_syncpt_indx_m(void)
+{
+ return 0xff << 0;
+}
+static inline u32 host1x_uclass_incr_syncpt_indx_v(u32 r)
+{
+ return (r >> 0) & 0xff;
+}
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+ return 0x8;
+}
+static inline u32 host1x_uclass_wait_syncpt_indx_s(void)
+{
+ return 8;
+}
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+static inline u32 host1x_uclass_wait_syncpt_indx_m(void)
+{
+ return 0xff << 24;
+}
+static inline u32 host1x_uclass_wait_syncpt_indx_v(u32 r)
+{
+ return (r >> 24) & 0xff;
+}
+static inline u32 host1x_uclass_wait_syncpt_thresh_s(void)
+{
+ return 24;
+}
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+static inline u32 host1x_uclass_wait_syncpt_thresh_m(void)
+{
+ return 0xffffff << 0;
+}
+static inline u32 host1x_uclass_wait_syncpt_thresh_v(u32 r)
+{
+ return (r >> 0) & 0xffffff;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+ return 0x9;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_indx_s(void)
+{
+ return 8;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_indx_m(void)
+{
+ return 0xff << 24;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_indx_v(u32 r)
+{
+ return (r >> 24) & 0xff;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_s(void)
+{
+ return 8;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 16;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_m(void)
+{
+ return 0xff << 16;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_v(u32 r)
+{
+ return (r >> 16) & 0xff;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_offset_s(void)
+{
+ return 16;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffff) << 0;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_offset_m(void)
+{
+ return 0xffff << 0;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_offset_v(u32 r)
+{
+ return (r >> 0) & 0xffff;
+}
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+ return 0xb;
+}
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_s(void)
+{
+ return 8;
+}
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_m(void)
+{
+ return 0xff << 24;
+}
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_v(u32 r)
+{
+ return (r >> 24) & 0xff;
+}
+static inline u32 host1x_uclass_load_syncpt_base_value_s(void)
+{
+ return 24;
+}
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+static inline u32 host1x_uclass_load_syncpt_base_value_m(void)
+{
+ return 0xffffff << 0;
+}
+static inline u32 host1x_uclass_load_syncpt_base_value_v(u32 r)
+{
+ return (r >> 0) & 0xffffff;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_r(void)
+{
+ return 0xc;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_s(void)
+{
+ return 8;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_m(void)
+{
+ return 0xff << 24;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_v(u32 r)
+{
+ return (r >> 24) & 0xff;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_offset_s(void)
+{
+ return 24;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_offset_m(void)
+{
+ return 0xffffff << 0;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_offset_v(u32 r)
+{
+ return (r >> 0) & 0xffffff;
+}
+static inline u32 host1x_uclass_indoff_r(void)
+{
+ return 0x2d;
+}
+static inline u32 host1x_uclass_indoff_indbe_s(void)
+{
+ return 4;
+}
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+ return (v & 0xf) << 28;
+}
+static inline u32 host1x_uclass_indoff_indbe_m(void)
+{
+ return 0xf << 28;
+}
+static inline u32 host1x_uclass_indoff_indbe_v(u32 r)
+{
+ return (r >> 28) & 0xf;
+}
+static inline u32 host1x_uclass_indoff_autoinc_s(void)
+{
+ return 1;
+}
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+ return (v & 0x1) << 27;
+}
+static inline u32 host1x_uclass_indoff_autoinc_m(void)
+{
+ return 0x1 << 27;
+}
+static inline u32 host1x_uclass_indoff_autoinc_v(u32 r)
+{
+ return (r >> 27) & 0x1;
+}
+static inline u32 host1x_uclass_indoff_spool_s(void)
+{
+ return 1;
+}
+static inline u32 host1x_uclass_indoff_spool_f(u32 v)
+{
+ return (v & 0x1) << 26;
+}
+static inline u32 host1x_uclass_indoff_spool_m(void)
+{
+ return 0x1 << 26;
+}
+static inline u32 host1x_uclass_indoff_spool_v(u32 r)
+{
+ return (r >> 26) & 0x1;
+}
+static inline u32 host1x_uclass_indoff_indoffset_s(void)
+{
+ return 24;
+}
+static inline u32 host1x_uclass_indoff_indoffset_f(u32 v)
+{
+ return (v & 0xffffff) << 2;
+}
+static inline u32 host1x_uclass_indoff_indoffset_m(void)
+{
+ return 0xffffff << 2;
+}
+static inline u32 host1x_uclass_indoff_indoffset_v(u32 r)
+{
+ return (r >> 2) & 0xffffff;
+}
+static inline u32 host1x_uclass_indoff_indmodid_s(void)
+{
+ return 8;
+}
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+ return (v & 0xff) << 18;
+}
+static inline u32 host1x_uclass_indoff_indmodid_m(void)
+{
+ return 0xff << 18;
+}
+static inline u32 host1x_uclass_indoff_indmodid_v(u32 r)
+{
+ return (r >> 18) & 0xff;
+}
+static inline u32 host1x_uclass_indoff_indmodid_host1x_v(void)
+{
+ return 0;
+}
+static inline u32 host1x_uclass_indoff_indmodid_mpe_v(void)
+{
+ return 1;
+}
+static inline u32 host1x_uclass_indoff_indmodid_vi_v(void)
+{
+ return 2;
+}
+static inline u32 host1x_uclass_indoff_indmodid_epp_v(void)
+{
+ return 3;
+}
+static inline u32 host1x_uclass_indoff_indmodid_isp_v(void)
+{
+ return 4;
+}
+static inline u32 host1x_uclass_indoff_indmodid_gr2d_v(void)
+{
+ return 5;
+}
+static inline u32 host1x_uclass_indoff_indmodid_gr3d_v(void)
+{
+ return 6;
+}
+static inline u32 host1x_uclass_indoff_indmodid_display_v(void)
+{
+ return 8;
+}
+static inline u32 host1x_uclass_indoff_indmodid_tvo_v(void)
+{
+ return 11;
+}
+static inline u32 host1x_uclass_indoff_indmodid_displayb_v(void)
+{
+ return 9;
+}
+static inline u32 host1x_uclass_indoff_indmodid_dsi_v(void)
+{
+ return 12;
+}
+static inline u32 host1x_uclass_indoff_indmodid_hdmi_v(void)
+{
+ return 10;
+}
+static inline u32 host1x_uclass_indoff_indmodid_dsib_v(void)
+{
+ return 16;
+}
+static inline u32 host1x_uclass_indoff_indroffset_s(void)
+{
+ return 16;
+}
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+ return (v & 0xffff) << 2;
+}
+static inline u32 host1x_uclass_indoff_indroffset_m(void)
+{
+ return 0xffff << 2;
+}
+static inline u32 host1x_uclass_indoff_indroffset_v(u32 r)
+{
+ return (r >> 2) & 0xffff;
+}
+static inline u32 host1x_uclass_indoff_acctype_s(void)
+{
+ return 1;
+}
+static inline u32 host1x_uclass_indoff_acctype_f(u32 v)
+{
+ return (v & 0x1) << 1;
+}
+static inline u32 host1x_uclass_indoff_acctype_m(void)
+{
+ return 0x1 << 1;
+}
+static inline u32 host1x_uclass_indoff_acctype_v(u32 r)
+{
+ return (r >> 1) & 0x1;
+}
+static inline u32 host1x_uclass_indoff_acctype_reg_v(void)
+{
+ return 0;
+}
+static inline u32 host1x_uclass_indoff_acctype_fb_v(void)
+{
+ return 1;
+}
+static inline u32 host1x_uclass_indoff_rwn_s(void)
+{
+ return 1;
+}
+static inline u32 host1x_uclass_indoff_rwn_f(u32 v)
+{
+ return (v & 0x1) << 0;
+}
+static inline u32 host1x_uclass_indoff_rwn_m(void)
+{
+ return 0x1 << 0;
+}
+static inline u32 host1x_uclass_indoff_rwn_v(u32 r)
+{
+ return (r >> 0) & 0x1;
+}
+static inline u32 host1x_uclass_indoff_rwn_write_v(void)
+{
+ return 0;
+}
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+ return 1;
+}
+static inline u32 host1x_uclass_inddata_r(void)
+{
+ return 0x2e;
+}
+
+#endif /* __hw_host1x_uclass_host1x_h__ */
diff --git a/drivers/video/tegra/host/isp/Makefile b/drivers/video/tegra/host/isp/Makefile
new file mode 100644
index 000000000000..7bcdc33c83dc
--- /dev/null
+++ b/drivers/video/tegra/host/isp/Makefile
@@ -0,0 +1,7 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/video/tegra/host
+
+nvhost-isp-objs = \
+ isp.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-isp.o
diff --git a/drivers/video/tegra/host/isp/isp.c b/drivers/video/tegra/host/isp/isp.c
new file mode 100644
index 000000000000..0a3cc3b03578
--- /dev/null
+++ b/drivers/video/tegra/host/isp/isp.c
@@ -0,0 +1,79 @@
+/*
+ * drivers/video/tegra/host/isp/isp.c
+ *
+ * Tegra Graphics ISP
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "dev.h"
+#include "bus_client.h"
+
+static int __devinit isp_probe(struct nvhost_device *dev,
+ struct nvhost_device_id *id_table)
+{
+ int err = 0;
+
+ err = nvhost_client_device_get_resources(dev);
+ if (err)
+ return err;
+
+ return nvhost_client_device_init(dev);
+}
+
+static int __exit isp_remove(struct nvhost_device *dev)
+{
+ /* Add clean-up */
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int isp_suspend(struct nvhost_device *dev, pm_message_t state)
+{
+ return nvhost_client_device_suspend(dev);
+}
+
+static int isp_resume(struct nvhost_device *dev)
+{
+ dev_info(&dev->dev, "resuming\n");
+ return 0;
+}
+#endif
+
+static struct nvhost_driver isp_driver = {
+ .probe = isp_probe,
+ .remove = __exit_p(isp_remove),
+#ifdef CONFIG_PM
+ .suspend = isp_suspend,
+ .resume = isp_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "isp",
+ }
+};
+
+static int __init isp_init(void)
+{
+ return nvhost_driver_register(&isp_driver);
+}
+
+static void __exit isp_exit(void)
+{
+ nvhost_driver_unregister(&isp_driver);
+}
+
+module_init(isp_init);
+module_exit(isp_exit);
diff --git a/drivers/video/tegra/host/mpe/Makefile b/drivers/video/tegra/host/mpe/Makefile
new file mode 100644
index 000000000000..efd77bb88fe7
--- /dev/null
+++ b/drivers/video/tegra/host/mpe/Makefile
@@ -0,0 +1,7 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/video/tegra/host
+
+nvhost-mpe-objs = \
+ mpe.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-mpe.o
diff --git a/drivers/video/tegra/host/mpe/mpe.c b/drivers/video/tegra/host/mpe/mpe.c
new file mode 100644
index 000000000000..d76ee0108eef
--- /dev/null
+++ b/drivers/video/tegra/host/mpe/mpe.c
@@ -0,0 +1,680 @@
+/*
+ * drivers/video/tegra/host/mpe/mpe.c
+ *
+ * Tegra Graphics Host MPE
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvhost_hwctx.h"
+#include "nvhost_channel.h"
+#include "dev.h"
+#include "host1x/host1x01_hardware.h"
+#include "host1x/host1x_hwctx.h"
+#include "t20/t20.h"
+#include "chip_support.h"
+#include "nvhost_memmgr.h"
+
+#include <linux/slab.h>
+
+#include "bus_client.h"
+
+enum {
+ HWCTX_REGINFO_NORMAL = 0,
+ HWCTX_REGINFO_STASH,
+ HWCTX_REGINFO_CALCULATE,
+ HWCTX_REGINFO_WRITEBACK
+};
+
+const struct hwctx_reginfo ctxsave_regs_mpe[] = {
+ HWCTX_REGINFO(0x124, 1, STASH),
+ HWCTX_REGINFO(0x123, 1, STASH),
+ HWCTX_REGINFO(0x103, 1, STASH),
+ HWCTX_REGINFO(0x074, 1, STASH),
+ HWCTX_REGINFO(0x021, 1, NORMAL),
+ HWCTX_REGINFO(0x020, 1, STASH),
+ HWCTX_REGINFO(0x024, 2, NORMAL),
+ HWCTX_REGINFO(0x0e6, 1, NORMAL),
+ HWCTX_REGINFO(0x3fc, 1, NORMAL),
+ HWCTX_REGINFO(0x3d0, 1, NORMAL),
+ HWCTX_REGINFO(0x3d4, 1, NORMAL),
+ HWCTX_REGINFO(0x013, 1, NORMAL),
+ HWCTX_REGINFO(0x022, 1, NORMAL),
+ HWCTX_REGINFO(0x030, 4, NORMAL),
+ HWCTX_REGINFO(0x023, 1, NORMAL),
+ HWCTX_REGINFO(0x070, 1, NORMAL),
+ HWCTX_REGINFO(0x0a0, 9, NORMAL),
+ HWCTX_REGINFO(0x071, 1, NORMAL),
+ HWCTX_REGINFO(0x100, 4, NORMAL),
+ HWCTX_REGINFO(0x104, 2, NORMAL),
+ HWCTX_REGINFO(0x108, 9, NORMAL),
+ HWCTX_REGINFO(0x112, 2, NORMAL),
+ HWCTX_REGINFO(0x114, 1, STASH),
+ HWCTX_REGINFO(0x014, 1, NORMAL),
+ HWCTX_REGINFO(0x072, 1, NORMAL),
+ HWCTX_REGINFO(0x200, 1, NORMAL),
+ HWCTX_REGINFO(0x0d1, 1, NORMAL),
+ HWCTX_REGINFO(0x0d0, 1, NORMAL),
+ HWCTX_REGINFO(0x0c0, 1, NORMAL),
+ HWCTX_REGINFO(0x0c3, 2, NORMAL),
+ HWCTX_REGINFO(0x0d2, 1, NORMAL),
+ HWCTX_REGINFO(0x0d8, 1, NORMAL),
+ HWCTX_REGINFO(0x0e0, 2, NORMAL),
+ HWCTX_REGINFO(0x07f, 2, NORMAL),
+ HWCTX_REGINFO(0x084, 8, NORMAL),
+ HWCTX_REGINFO(0x0d3, 1, NORMAL),
+ HWCTX_REGINFO(0x040, 13, NORMAL),
+ HWCTX_REGINFO(0x050, 6, NORMAL),
+ HWCTX_REGINFO(0x058, 1, NORMAL),
+ HWCTX_REGINFO(0x057, 1, NORMAL),
+ HWCTX_REGINFO(0x111, 1, NORMAL),
+ HWCTX_REGINFO(0x130, 3, NORMAL),
+ HWCTX_REGINFO(0x201, 1, NORMAL),
+ HWCTX_REGINFO(0x068, 2, NORMAL),
+ HWCTX_REGINFO(0x08c, 1, NORMAL),
+ HWCTX_REGINFO(0x0cf, 1, NORMAL),
+ HWCTX_REGINFO(0x082, 2, NORMAL),
+ HWCTX_REGINFO(0x075, 1, NORMAL),
+ HWCTX_REGINFO(0x0e8, 1, NORMAL),
+ HWCTX_REGINFO(0x056, 1, NORMAL),
+ HWCTX_REGINFO(0x057, 1, NORMAL),
+ HWCTX_REGINFO(0x073, 1, CALCULATE),
+ HWCTX_REGINFO(0x074, 1, NORMAL),
+ HWCTX_REGINFO(0x075, 1, NORMAL),
+ HWCTX_REGINFO(0x076, 1, STASH),
+ HWCTX_REGINFO(0x11a, 9, NORMAL),
+ HWCTX_REGINFO(0x123, 1, NORMAL),
+ HWCTX_REGINFO(0x124, 1, NORMAL),
+ HWCTX_REGINFO(0x12a, 5, NORMAL),
+ HWCTX_REGINFO(0x12f, 1, STASH),
+ HWCTX_REGINFO(0x125, 2, NORMAL),
+ HWCTX_REGINFO(0x034, 1, NORMAL),
+ HWCTX_REGINFO(0x133, 2, NORMAL),
+ HWCTX_REGINFO(0x127, 1, NORMAL),
+ HWCTX_REGINFO(0x106, 1, WRITEBACK),
+ HWCTX_REGINFO(0x107, 1, WRITEBACK)
+};
+
+#define NR_STASHES 8
+#define NR_WRITEBACKS 2
+
+#define RC_RAM_LOAD_CMD 0x115
+#define RC_RAM_LOAD_DATA 0x116
+#define RC_RAM_READ_CMD 0x128
+#define RC_RAM_READ_DATA 0x129
+#define RC_RAM_SIZE 692
+
+#define IRFR_RAM_LOAD_CMD 0xc5
+#define IRFR_RAM_LOAD_DATA 0xc6
+#define IRFR_RAM_READ_CMD 0xcd
+#define IRFR_RAM_READ_DATA 0xce
+#define IRFR_RAM_SIZE 408
+
+struct mpe_save_info {
+ u32 in[NR_STASHES];
+ u32 out[NR_WRITEBACKS];
+ unsigned in_pos;
+ unsigned out_pos;
+ u32 h264_mode;
+};
+
+/*** restore ***/
+
+static unsigned int restore_size;
+
+static void restore_begin(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+ /* set class to host */
+ ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ host1x_uclass_incr_syncpt_base_r(), 1);
+ /* increment sync point base */
+ ptr[1] = nvhost_class_host_incr_syncpt_base(h->waitbase, 1);
+ /* set class to MPE */
+ ptr[2] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0);
+}
+#define RESTORE_BEGIN_SIZE 3
+
+static void restore_ram(u32 *ptr, unsigned words,
+ unsigned cmd_reg, unsigned data_reg)
+{
+ ptr[0] = nvhost_opcode_imm(cmd_reg, words);
+ ptr[1] = nvhost_opcode_nonincr(data_reg, words);
+}
+#define RESTORE_RAM_SIZE 2
+
+static void restore_end(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+ /* syncpt increment to track restore gather. */
+ ptr[0] = nvhost_opcode_imm_incr_syncpt(
+ host1x_uclass_incr_syncpt_cond_op_done_v(),
+ h->syncpt);
+}
+#define RESTORE_END_SIZE 1
+
+static u32 *setup_restore_regs(u32 *ptr,
+ const struct hwctx_reginfo *regs,
+ unsigned int nr_regs)
+{
+ const struct hwctx_reginfo *rend = regs + nr_regs;
+
+ for ( ; regs != rend; ++regs) {
+ u32 offset = regs->offset;
+ u32 count = regs->count;
+ *ptr++ = nvhost_opcode_incr(offset, count);
+ ptr += count;
+ }
+ return ptr;
+}
+
+static u32 *setup_restore_ram(u32 *ptr, unsigned words,
+ unsigned cmd_reg, unsigned data_reg)
+{
+ restore_ram(ptr, words, cmd_reg, data_reg);
+ return ptr + (RESTORE_RAM_SIZE + words);
+}
+
+static void setup_restore(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+ restore_begin(h, ptr);
+ ptr += RESTORE_BEGIN_SIZE;
+
+ ptr = setup_restore_regs(ptr, ctxsave_regs_mpe,
+ ARRAY_SIZE(ctxsave_regs_mpe));
+
+ ptr = setup_restore_ram(ptr, RC_RAM_SIZE,
+ RC_RAM_LOAD_CMD, RC_RAM_LOAD_DATA);
+
+ ptr = setup_restore_ram(ptr, IRFR_RAM_SIZE,
+ IRFR_RAM_LOAD_CMD, IRFR_RAM_LOAD_DATA);
+
+ restore_end(h, ptr);
+
+ wmb();
+}
+
+/*** save ***/
+
+struct save_info {
+ u32 *ptr;
+ unsigned int save_count;
+ unsigned int restore_count;
+};
+
+static void save_begin(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+ /* MPE: when done, increment syncpt to base+1 */
+ ptr[0] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0);
+ ptr[1] = nvhost_opcode_imm_incr_syncpt(
+ host1x_uclass_incr_syncpt_cond_op_done_v(), h->syncpt);
+ /* host: wait for syncpt base+1 */
+ ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ host1x_uclass_wait_syncpt_base_r(), 1);
+ ptr[3] = nvhost_class_host_wait_syncpt_base(h->syncpt, h->waitbase, 1);
+ /* host: signal context read thread to start reading */
+ ptr[4] = nvhost_opcode_imm_incr_syncpt(
+ host1x_uclass_incr_syncpt_cond_immediate_v(),
+ h->syncpt);
+}
+#define SAVE_BEGIN_SIZE 5
+
+static void save_direct(u32 *ptr, u32 start_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ host1x_uclass_indoff_r(), 1);
+ ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_MPE,
+ start_reg, true);
+ ptr[2] = nvhost_opcode_nonincr(host1x_uclass_inddata_r(), count);
+}
+#define SAVE_DIRECT_SIZE 3
+
+static void save_set_ram_cmd(u32 *ptr, u32 cmd_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID,
+ cmd_reg, 1);
+ ptr[1] = count;
+}
+#define SAVE_SET_RAM_CMD_SIZE 2
+
+static void save_read_ram_data_nasty(u32 *ptr, u32 data_reg)
+{
+ ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ host1x_uclass_indoff_r(), 1);
+ ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_MPE,
+ data_reg, false);
+ ptr[2] = nvhost_opcode_imm(host1x_uclass_inddata_r(), 0);
+ /* write junk data to avoid 'cached problem with register memory' */
+ ptr[3] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID,
+ data_reg, 1);
+ ptr[4] = 0x99;
+}
+#define SAVE_READ_RAM_DATA_NASTY_SIZE 5
+
+static void save_end(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+ /* Wait for context read service to finish (cpu incr 3) */
+ ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ host1x_uclass_wait_syncpt_base_r(), 1);
+ ptr[1] = nvhost_class_host_wait_syncpt_base(h->syncpt, h->waitbase, 3);
+ /* Advance syncpoint base */
+ ptr[2] = nvhost_opcode_nonincr(host1x_uclass_incr_syncpt_base_r(), 1);
+ ptr[3] = nvhost_class_host_incr_syncpt_base(h->waitbase, 3);
+ /* set class back to the unit */
+ ptr[4] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0);
+}
+#define SAVE_END_SIZE 5
+
+static void setup_save_regs(struct save_info *info,
+ const struct hwctx_reginfo *regs,
+ unsigned int nr_regs)
+{
+ const struct hwctx_reginfo *rend = regs + nr_regs;
+ u32 *ptr = info->ptr;
+ unsigned int save_count = info->save_count;
+ unsigned int restore_count = info->restore_count;
+
+ for ( ; regs != rend; ++regs) {
+ u32 offset = regs->offset;
+ u32 count = regs->count;
+ if (regs->type != HWCTX_REGINFO_WRITEBACK) {
+ if (ptr) {
+ save_direct(ptr, offset, count);
+ ptr += SAVE_DIRECT_SIZE;
+ memset(ptr, 0, count * 4);
+ ptr += count;
+ }
+ save_count += (SAVE_DIRECT_SIZE + count);
+ }
+ restore_count += (1 + count);
+ }
+
+ info->ptr = ptr;
+ info->save_count = save_count;
+ info->restore_count = restore_count;
+}
+
+static void setup_save_ram_nasty(struct save_info *info, unsigned words,
+ unsigned cmd_reg, unsigned data_reg)
+{
+ u32 *ptr = info->ptr;
+ unsigned int save_count = info->save_count;
+ unsigned int restore_count = info->restore_count;
+ unsigned i;
+
+ if (ptr) {
+ save_set_ram_cmd(ptr, cmd_reg, words);
+ ptr += SAVE_SET_RAM_CMD_SIZE;
+ for (i = words; i; --i) {
+ save_read_ram_data_nasty(ptr, data_reg);
+ ptr += SAVE_READ_RAM_DATA_NASTY_SIZE;
+ }
+ }
+
+ save_count += SAVE_SET_RAM_CMD_SIZE;
+ save_count += words * SAVE_READ_RAM_DATA_NASTY_SIZE;
+ restore_count += (RESTORE_RAM_SIZE + words);
+
+ info->ptr = ptr;
+ info->save_count = save_count;
+ info->restore_count = restore_count;
+}
+
+static void setup_save(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+ struct save_info info = {
+ ptr,
+ SAVE_BEGIN_SIZE,
+ RESTORE_BEGIN_SIZE
+ };
+
+ if (info.ptr) {
+ save_begin(h, info.ptr);
+ info.ptr += SAVE_BEGIN_SIZE;
+ }
+
+ setup_save_regs(&info, ctxsave_regs_mpe,
+ ARRAY_SIZE(ctxsave_regs_mpe));
+
+ setup_save_ram_nasty(&info, RC_RAM_SIZE,
+ RC_RAM_READ_CMD, RC_RAM_READ_DATA);
+
+ setup_save_ram_nasty(&info, IRFR_RAM_SIZE,
+ IRFR_RAM_READ_CMD, IRFR_RAM_READ_DATA);
+
+ if (info.ptr) {
+ save_end(h, info.ptr);
+ info.ptr += SAVE_END_SIZE;
+ }
+
+ wmb();
+
+ h->save_size = info.save_count + SAVE_END_SIZE;
+ restore_size = info.restore_count + RESTORE_END_SIZE;
+}
+
+static u32 calculate_mpe(u32 word, struct mpe_save_info *msi)
+{
+ u32 buffer_full_read = msi->in[0] & 0x01ffffff;
+ u32 byte_len = msi->in[1];
+ u32 drain = (msi->in[2] >> 2) & 0x007fffff;
+ u32 rep_frame = msi->in[3] & 0x0000ffff;
+ u32 h264_mode = (msi->in[4] >> 11) & 1;
+ int new_buffer_full;
+
+ if (h264_mode)
+ byte_len >>= 3;
+ new_buffer_full = buffer_full_read + byte_len - (drain * 4);
+ msi->out[0] = max(0, new_buffer_full);
+ msi->out[1] = rep_frame;
+ if (rep_frame == 0)
+ word &= 0xffff0000;
+ return word;
+}
+
+static u32 *save_regs(u32 *ptr, unsigned int *pending,
+ struct nvhost_channel *channel,
+ const struct hwctx_reginfo *regs,
+ unsigned int nr_regs,
+ struct mpe_save_info *msi)
+{
+ const struct hwctx_reginfo *rend = regs + nr_regs;
+
+ for ( ; regs != rend; ++regs) {
+ u32 count = regs->count;
+ ++ptr; /* restore incr */
+ if (regs->type == HWCTX_REGINFO_NORMAL) {
+ nvhost_channel_drain_read_fifo(channel,
+ ptr, count, pending);
+ ptr += count;
+ } else {
+ u32 word;
+ if (regs->type == HWCTX_REGINFO_WRITEBACK) {
+ BUG_ON(msi->out_pos >= NR_WRITEBACKS);
+ word = msi->out[msi->out_pos++];
+ } else {
+ nvhost_channel_drain_read_fifo(channel,
+ &word, 1, pending);
+ if (regs->type == HWCTX_REGINFO_STASH) {
+ BUG_ON(msi->in_pos >= NR_STASHES);
+ msi->in[msi->in_pos++] = word;
+ } else {
+ word = calculate_mpe(word, msi);
+ }
+ }
+ *ptr++ = word;
+ }
+ }
+ return ptr;
+}
+
+static u32 *save_ram(u32 *ptr, unsigned int *pending,
+ struct nvhost_channel *channel,
+ unsigned words, unsigned cmd_reg, unsigned data_reg)
+{
+ int err = 0;
+ ptr += RESTORE_RAM_SIZE;
+ err = nvhost_channel_drain_read_fifo(channel, ptr, words, pending);
+ WARN_ON(err);
+ return ptr + words;
+}
+
+/*** ctxmpe ***/
+
+static struct nvhost_hwctx *ctxmpe_alloc(struct nvhost_hwctx_handler *h,
+ struct nvhost_channel *ch)
+{
+ struct mem_mgr *memmgr = nvhost_get_host(ch->dev)->memmgr;
+ struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);
+ struct host1x_hwctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+ ctx->restore = mem_op().alloc(memmgr, restore_size * 4, 32,
+ mem_mgr_flag_write_combine);
+ if (IS_ERR_OR_NULL(ctx->restore)) {
+ kfree(ctx);
+ return NULL;
+ }
+
+ ctx->restore_virt = mem_op().mmap(ctx->restore);
+ if (!ctx->restore_virt) {
+ mem_op().put(memmgr, ctx->restore);
+ kfree(ctx);
+ return NULL;
+ }
+
+ kref_init(&ctx->hwctx.ref);
+ ctx->hwctx.h = &p->h;
+ ctx->hwctx.channel = ch;
+ ctx->hwctx.valid = false;
+ ctx->save_incrs = 3;
+ ctx->save_thresh = 2;
+ ctx->save_slots = p->save_slots;
+ ctx->restore_phys = mem_op().pin(memmgr, ctx->restore);
+ ctx->restore_size = restore_size;
+ ctx->restore_incrs = 1;
+
+ setup_restore(p, ctx->restore_virt);
+
+ return &ctx->hwctx;
+}
+
+static void ctxmpe_get(struct nvhost_hwctx *ctx)
+{
+ kref_get(&ctx->ref);
+}
+
+static void ctxmpe_free(struct kref *ref)
+{
+ struct nvhost_hwctx *nctx = container_of(ref, struct nvhost_hwctx, ref);
+ struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
+ struct mem_mgr *memmgr = nvhost_get_host(nctx->channel->dev)->memmgr;
+
+ if (ctx->restore_virt)
+ mem_op().munmap(ctx->restore, ctx->restore_virt);
+ mem_op().unpin(memmgr, ctx->restore);
+ mem_op().put(memmgr, ctx->restore);
+ kfree(ctx);
+}
+
+static void ctxmpe_put(struct nvhost_hwctx *ctx)
+{
+ kref_put(&ctx->ref, ctxmpe_free);
+}
+
+static void ctxmpe_save_push(struct nvhost_hwctx *nctx,
+ struct nvhost_cdma *cdma)
+{
+ struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
+ struct host1x_hwctx_handler *h = host1x_hwctx_handler(ctx);
+ nvhost_cdma_push_gather(cdma,
+ nvhost_get_host(nctx->channel->dev)->memmgr,
+ h->save_buf,
+ 0,
+ nvhost_opcode_gather(h->save_size),
+ h->save_phys);
+}
+
+static void ctxmpe_save_service(struct nvhost_hwctx *nctx)
+{
+ struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
+ struct host1x_hwctx_handler *h = host1x_hwctx_handler(ctx);
+
+ u32 *ptr = (u32 *)ctx->restore_virt + RESTORE_BEGIN_SIZE;
+ unsigned int pending = 0;
+ struct mpe_save_info msi;
+
+ msi.in_pos = 0;
+ msi.out_pos = 0;
+
+ ptr = save_regs(ptr, &pending, nctx->channel,
+ ctxsave_regs_mpe, ARRAY_SIZE(ctxsave_regs_mpe), &msi);
+
+ ptr = save_ram(ptr, &pending, nctx->channel,
+ RC_RAM_SIZE, RC_RAM_READ_CMD, RC_RAM_READ_DATA);
+
+ ptr = save_ram(ptr, &pending, nctx->channel,
+ IRFR_RAM_SIZE, IRFR_RAM_READ_CMD, IRFR_RAM_READ_DATA);
+
+ wmb();
+ nvhost_syncpt_cpu_incr(&nvhost_get_host(nctx->channel->dev)->syncpt,
+ h->syncpt);
+}
+
+struct nvhost_hwctx_handler *nvhost_mpe_ctxhandler_init(u32 syncpt,
+ u32 waitbase, struct nvhost_channel *ch)
+{
+ struct mem_mgr *memmgr;
+ u32 *save_ptr;
+ struct host1x_hwctx_handler *p;
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ memmgr = nvhost_get_host(ch->dev)->memmgr;
+
+ p->syncpt = syncpt;
+ p->waitbase = waitbase;
+
+ setup_save(p, NULL);
+
+ p->save_buf = mem_op().alloc(memmgr, p->save_size * 4, 32,
+ mem_mgr_flag_write_combine);
+ if (IS_ERR_OR_NULL(p->save_buf)) {
+ p->save_buf = NULL;
+ return NULL;
+ }
+
+ save_ptr = mem_op().mmap(p->save_buf);
+ if (!save_ptr) {
+ mem_op().put(memmgr, p->save_buf);
+ p->save_buf = NULL;
+ return NULL;
+ }
+
+ p->save_phys = mem_op().pin(memmgr, p->save_buf);
+ p->save_slots = 1;
+
+ setup_save(p, save_ptr);
+
+ p->h.alloc = ctxmpe_alloc;
+ p->h.save_push = ctxmpe_save_push;
+ p->h.save_service = ctxmpe_save_service;
+ p->h.get = ctxmpe_get;
+ p->h.put = ctxmpe_put;
+
+ return &p->h;
+}
+
+int nvhost_mpe_prepare_power_off(struct nvhost_device *dev)
+{
+ return nvhost_channel_save_context(dev->channel);
+}
+
+enum mpe_ip_ver {
+ mpe_01 = 1,
+ mpe_02,
+};
+
+struct mpe_desc {
+ int (*prepare_poweroff)(struct nvhost_device *dev);
+ struct nvhost_hwctx_handler *(*alloc_hwctx_handler)(u32 syncpt,
+ u32 waitbase, struct nvhost_channel *ch);
+};
+
+static const struct mpe_desc mpe[] = {
+ [mpe_01] = {
+ .prepare_poweroff = nvhost_mpe_prepare_power_off,
+ .alloc_hwctx_handler = nvhost_mpe_ctxhandler_init,
+ },
+ [mpe_02] = {
+ .prepare_poweroff = nvhost_mpe_prepare_power_off,
+ .alloc_hwctx_handler = nvhost_mpe_ctxhandler_init,
+ },
+};
+
+static struct nvhost_device_id mpe_id[] = {
+ { "mpe", mpe_01 },
+ { "mpe", mpe_02 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(nvhost, mpe_id);
+
+static int __devinit mpe_probe(struct nvhost_device *dev,
+ struct nvhost_device_id *id_table)
+{
+ int err = 0;
+ int index = 0;
+ struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+
+ index = id_table->version;
+
+ drv->prepare_poweroff = mpe[index].prepare_poweroff;
+ drv->alloc_hwctx_handler = mpe[index].alloc_hwctx_handler;
+
+ err = nvhost_client_device_get_resources(dev);
+ if (err)
+ return err;
+
+ return nvhost_client_device_init(dev);
+}
+
+static int __exit mpe_remove(struct nvhost_device *dev)
+{
+ /* Add clean-up */
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mpe_suspend(struct nvhost_device *dev, pm_message_t state)
+{
+ return nvhost_client_device_suspend(dev);
+}
+
+static int mpe_resume(struct nvhost_device *dev)
+{
+ dev_info(&dev->dev, "resuming\n");
+ return 0;
+}
+#endif
+
+static struct nvhost_driver mpe_driver = {
+ .probe = mpe_probe,
+ .remove = __exit_p(mpe_remove),
+#ifdef CONFIG_PM
+ .suspend = mpe_suspend,
+ .resume = mpe_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mpe",
+ },
+ .id_table = mpe_id,
+};
+
+static int __init mpe_init(void)
+{
+ return nvhost_driver_register(&mpe_driver);
+}
+
+static void __exit mpe_exit(void)
+{
+ nvhost_driver_unregister(&mpe_driver);
+}
+
+module_init(mpe_init);
+module_exit(mpe_exit);
diff --git a/drivers/video/tegra/host/mpe/mpe.h b/drivers/video/tegra/host/mpe/mpe.h
new file mode 100644
index 000000000000..1bc2a8a04c1a
--- /dev/null
+++ b/drivers/video/tegra/host/mpe/mpe.h
@@ -0,0 +1,32 @@
+/*
+ * drivers/video/tegra/host/mpe/mpe.h
+ *
+ * Tegra Graphics Host MPE
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_MPE_MPE_H
+#define __NVHOST_MPE_MPE_H
+
+struct nvhost_hwctx_handler;
+struct nvhost_device;
+
+struct nvhost_hwctx_handler *nvhost_mpe_ctxhandler_init(
+ u32 syncpt, u32 waitbase,
+ struct nvhost_channel *ch);
+int nvhost_mpe_prepare_power_off(struct nvhost_device *dev);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_acm.c b/drivers/video/tegra/host/nvhost_acm.c
new file mode 100644
index 000000000000..860ce6b35572
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_acm.c
@@ -0,0 +1,649 @@
+/*
+ * drivers/video/tegra/host/nvhost_acm.c
+ *
+ * Tegra Graphics Host Automatic Clock Management
+ *
+ * Copyright (c) 2010-2014, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvhost_acm.h"
+#include "dev.h"
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <mach/powergate.h>
+#include <mach/clk.h>
+#include <mach/hardware.h>
+
+#define ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT (2 * HZ)
+#define POWERGATE_DELAY 10
+#define MAX_DEVID_LENGTH 16
+
+DEFINE_MUTEX(client_list_lock);
+
+struct nvhost_module_client {
+ struct list_head node;
+ unsigned long rate[NVHOST_MODULE_MAX_CLOCKS];
+ void *priv;
+};
+
+static void do_powergate_locked(int id)
+{
+ if (id != -1 && tegra_powergate_is_powered(id))
+ tegra_powergate_partition(id);
+}
+
+static void do_unpowergate_locked(int id)
+{
+ if (id != -1)
+ tegra_unpowergate_partition(id);
+}
+
+static void do_module_reset_locked(struct nvhost_device *dev)
+{
+ /* assert module and mc client reset */
+ if (dev->powergate_ids[0] != -1) {
+ tegra_powergate_mc_disable(dev->powergate_ids[0]);
+ tegra_periph_reset_assert(dev->clk[0]);
+ tegra_powergate_mc_flush(dev->powergate_ids[0]);
+ }
+ if (dev->powergate_ids[1] != -1) {
+ tegra_powergate_mc_disable(dev->powergate_ids[1]);
+ tegra_periph_reset_assert(dev->clk[1]);
+ tegra_powergate_mc_flush(dev->powergate_ids[1]);
+ }
+
+ udelay(POWERGATE_DELAY);
+
+ /* deassert reset */
+ if (dev->powergate_ids[0] != -1) {
+ tegra_powergate_mc_flush_done(dev->powergate_ids[0]);
+ tegra_periph_reset_deassert(dev->clk[0]);
+ tegra_powergate_mc_enable(dev->powergate_ids[0]);
+ }
+ if (dev->powergate_ids[1] != -1) {
+ tegra_powergate_mc_flush_done(dev->powergate_ids[1]);
+ tegra_periph_reset_deassert(dev->clk[1]);
+ tegra_powergate_mc_enable(dev->powergate_ids[1]);
+ }
+}
+
+void nvhost_module_reset(struct nvhost_device *dev)
+{
+ dev_dbg(&dev->dev,
+ "%s: asserting %s module reset (id %d, id2 %d)\n",
+ __func__, dev->name,
+ dev->powergate_ids[0], dev->powergate_ids[1]);
+
+ mutex_lock(&dev->lock);
+ do_module_reset_locked(dev);
+ mutex_unlock(&dev->lock);
+
+ dev_dbg(&dev->dev, "%s: module %s out of reset\n",
+ __func__, dev->name);
+}
+
+static void to_state_clockgated_locked(struct nvhost_device *dev)
+{
+ struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+
+ if (dev->powerstate == NVHOST_POWER_STATE_RUNNING) {
+ int i, err;
+ if (drv->prepare_clockoff) {
+ err = drv->prepare_clockoff(dev);
+ if (err) {
+ dev_err(&dev->dev, "error clock gating");
+ return;
+ }
+ }
+ for (i = 0; i < dev->num_clks; i++)
+ clk_disable(dev->clk[i]);
+ if (dev->dev.parent)
+ nvhost_module_idle(to_nvhost_device(dev->dev.parent));
+ } else if (dev->powerstate == NVHOST_POWER_STATE_POWERGATED
+ && dev->can_powergate) {
+ do_unpowergate_locked(dev->powergate_ids[0]);
+ do_unpowergate_locked(dev->powergate_ids[1]);
+
+ if (dev->powerup_reset)
+ do_module_reset_locked(dev);
+ }
+ dev->powerstate = NVHOST_POWER_STATE_CLOCKGATED;
+}
+
+static void to_state_running_locked(struct nvhost_device *dev)
+{
+ struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+ int prev_state = dev->powerstate;
+
+ if (dev->powerstate == NVHOST_POWER_STATE_POWERGATED)
+ to_state_clockgated_locked(dev);
+
+ if (dev->powerstate == NVHOST_POWER_STATE_CLOCKGATED) {
+ int i;
+
+ if (dev->dev.parent)
+ nvhost_module_busy(to_nvhost_device(dev->dev.parent));
+
+ for (i = 0; i < dev->num_clks; i++) {
+ int err = clk_enable(dev->clk[i]);
+ if (err) {
+ dev_err(&dev->dev, "Cannot turn on clock %s",
+ dev->clocks[i].name);
+ return;
+ }
+ }
+
+ /* Invoke callback after enabling clock. This is used for
+ * re-enabling host1x interrupts. */
+ if (prev_state == NVHOST_POWER_STATE_CLOCKGATED
+ && drv->finalize_clockon)
+ drv->finalize_clockon(dev);
+
+ /* Invoke callback after power un-gating. This is used for
+ * restoring context. */
+ if (prev_state == NVHOST_POWER_STATE_POWERGATED
+ && drv->finalize_poweron)
+ drv->finalize_poweron(dev);
+ }
+ dev->powerstate = NVHOST_POWER_STATE_RUNNING;
+}
+
+/* This gets called from powergate_handler() and from module suspend.
+ * Module suspend is done for all modules, runtime power gating only
+ * for modules with can_powergate set.
+ */
+static int to_state_powergated_locked(struct nvhost_device *dev)
+{
+ int err = 0;
+ struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+
+ if (drv->prepare_poweroff
+ && dev->powerstate != NVHOST_POWER_STATE_POWERGATED) {
+ /* Clock needs to be on in prepare_poweroff */
+ to_state_running_locked(dev);
+ err = drv->prepare_poweroff(dev);
+ if (err)
+ return err;
+ }
+
+ if (dev->powerstate == NVHOST_POWER_STATE_RUNNING)
+ to_state_clockgated_locked(dev);
+
+ if (dev->can_powergate) {
+ do_powergate_locked(dev->powergate_ids[0]);
+ do_powergate_locked(dev->powergate_ids[1]);
+ }
+
+ dev->powerstate = NVHOST_POWER_STATE_POWERGATED;
+ return 0;
+}
+
+static void schedule_powergating_locked(struct nvhost_device *dev)
+{
+ if (dev->can_powergate)
+ schedule_delayed_work(&dev->powerstate_down,
+ msecs_to_jiffies(dev->powergate_delay));
+}
+
+static void schedule_clockgating_locked(struct nvhost_device *dev)
+{
+ schedule_delayed_work(&dev->powerstate_down,
+ msecs_to_jiffies(dev->clockgate_delay));
+}
+
+void nvhost_module_busy(struct nvhost_device *dev)
+{
+ struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+
+ if (drv->busy)
+ drv->busy(dev);
+
+ mutex_lock(&dev->lock);
+ cancel_delayed_work(&dev->powerstate_down);
+
+ dev->refcount++;
+ if (dev->refcount > 0 && !nvhost_module_powered(dev))
+ to_state_running_locked(dev);
+
+ mutex_unlock(&dev->lock);
+}
+
+static void powerstate_down_handler(struct work_struct *work)
+{
+ struct nvhost_device *dev;
+
+ dev = container_of(to_delayed_work(work),
+ struct nvhost_device,
+ powerstate_down);
+
+ mutex_lock(&dev->lock);
+ if (dev->refcount == 0) {
+ switch (dev->powerstate) {
+ case NVHOST_POWER_STATE_RUNNING:
+ to_state_clockgated_locked(dev);
+ schedule_powergating_locked(dev);
+ break;
+ case NVHOST_POWER_STATE_CLOCKGATED:
+ if (to_state_powergated_locked(dev))
+ schedule_powergating_locked(dev);
+ break;
+ default:
+ break;
+ }
+ }
+ mutex_unlock(&dev->lock);
+}
+
+void nvhost_module_idle_mult(struct nvhost_device *dev, int refs)
+{
+ struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+ bool kick = false;
+
+ mutex_lock(&dev->lock);
+ dev->refcount -= refs;
+ if (dev->refcount == 0) {
+ if (nvhost_module_powered(dev))
+ schedule_clockgating_locked(dev);
+ kick = true;
+ }
+ mutex_unlock(&dev->lock);
+
+ if (kick) {
+ wake_up(&dev->idle_wq);
+
+ if (drv->idle)
+ drv->idle(dev);
+ }
+}
+
+int nvhost_module_get_rate(struct nvhost_device *dev, unsigned long *rate,
+ int index)
+{
+ struct clk *c;
+
+ c = dev->clk[index];
+ if (IS_ERR_OR_NULL(c))
+ return -EINVAL;
+
+ /* Need to enable client to get correct rate */
+ nvhost_module_busy(dev);
+ *rate = clk_get_rate(c);
+ nvhost_module_idle(dev);
+ return 0;
+
+}
+
+static int nvhost_module_update_rate(struct nvhost_device *dev, int index)
+{
+ unsigned long rate = 0;
+ struct nvhost_module_client *m;
+
+ if (!dev->clk[index])
+ return -EINVAL;
+
+ list_for_each_entry(m, &dev->client_list, node) {
+ rate = max(m->rate[index], rate);
+ }
+ if (!rate)
+ rate = clk_round_rate(dev->clk[index],
+ dev->clocks[index].default_rate);
+
+ return clk_set_rate(dev->clk[index], rate);
+}
+
+int nvhost_module_set_rate(struct nvhost_device *dev, void *priv,
+ unsigned long rate, int index)
+{
+ struct nvhost_module_client *m;
+ int i, ret = 0;
+
+ mutex_lock(&client_list_lock);
+ list_for_each_entry(m, &dev->client_list, node) {
+ if (m->priv == priv) {
+ for (i = 0; i < dev->num_clks; i++)
+ m->rate[i] = clk_round_rate(dev->clk[i], rate);
+ break;
+ }
+ }
+
+ for (i = 0; i < dev->num_clks; i++) {
+ ret = nvhost_module_update_rate(dev, i);
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&client_list_lock);
+ return ret;
+
+}
+
+int nvhost_module_add_client(struct nvhost_device *dev, void *priv)
+{
+ int i;
+ unsigned long rate;
+ struct nvhost_module_client *client;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&client->node);
+ client->priv = priv;
+
+ for (i = 0; i < dev->num_clks; i++) {
+ rate = clk_round_rate(dev->clk[i],
+ dev->clocks[i].default_rate);
+ client->rate[i] = rate;
+ }
+ mutex_lock(&client_list_lock);
+ list_add_tail(&client->node, &dev->client_list);
+ mutex_unlock(&client_list_lock);
+ return 0;
+}
+
+void nvhost_module_remove_client(struct nvhost_device *dev, void *priv)
+{
+ int i;
+ struct nvhost_module_client *m;
+ int found = 0;
+
+ mutex_lock(&client_list_lock);
+ list_for_each_entry(m, &dev->client_list, node) {
+ if (priv == m->priv) {
+ list_del(&m->node);
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ kfree(m);
+ for (i = 0; i < dev->num_clks; i++)
+ nvhost_module_update_rate(dev, i);
+ }
+ mutex_unlock(&client_list_lock);
+}
+
+static ssize_t refcount_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int ret;
+ struct nvhost_device_power_attr *power_attribute =
+ container_of(attr, struct nvhost_device_power_attr, \
+ power_attr[NVHOST_POWER_SYSFS_ATTRIB_REFCOUNT]);
+ struct nvhost_device *dev = power_attribute->ndev;
+
+ mutex_lock(&dev->lock);
+ ret = sprintf(buf, "%d\n", dev->refcount);
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static ssize_t powergate_delay_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int powergate_delay = 0, ret = 0;
+ struct nvhost_device_power_attr *power_attribute =
+ container_of(attr, struct nvhost_device_power_attr, \
+ power_attr[NVHOST_POWER_SYSFS_ATTRIB_POWERGATE_DELAY]);
+ struct nvhost_device *dev = power_attribute->ndev;
+
+ if (!dev->can_powergate) {
+ dev_info(&dev->dev, "does not support power-gating\n");
+ return count;
+ }
+
+ mutex_lock(&dev->lock);
+ ret = sscanf(buf, "%d", &powergate_delay);
+ if (ret == 1 && powergate_delay >= 0)
+ dev->powergate_delay = powergate_delay;
+ else
+ dev_err(&dev->dev, "Invalid powergate delay\n");
+ mutex_unlock(&dev->lock);
+
+ return count;
+}
+
+static ssize_t powergate_delay_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int ret;
+ struct nvhost_device_power_attr *power_attribute =
+ container_of(attr, struct nvhost_device_power_attr, \
+ power_attr[NVHOST_POWER_SYSFS_ATTRIB_POWERGATE_DELAY]);
+ struct nvhost_device *dev = power_attribute->ndev;
+
+ mutex_lock(&dev->lock);
+ ret = sprintf(buf, "%d\n", dev->powergate_delay);
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static ssize_t clockgate_delay_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int clockgate_delay = 0, ret = 0;
+ struct nvhost_device_power_attr *power_attribute =
+ container_of(attr, struct nvhost_device_power_attr, \
+ power_attr[NVHOST_POWER_SYSFS_ATTRIB_CLOCKGATE_DELAY]);
+ struct nvhost_device *dev = power_attribute->ndev;
+
+ mutex_lock(&dev->lock);
+ ret = sscanf(buf, "%d", &clockgate_delay);
+ if (ret == 1 && clockgate_delay >= 0)
+ dev->clockgate_delay = clockgate_delay;
+ else
+ dev_err(&dev->dev, "Invalid clockgate delay\n");
+ mutex_unlock(&dev->lock);
+
+ return count;
+}
+
+static ssize_t clockgate_delay_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int ret;
+ struct nvhost_device_power_attr *power_attribute =
+ container_of(attr, struct nvhost_device_power_attr, \
+ power_attr[NVHOST_POWER_SYSFS_ATTRIB_CLOCKGATE_DELAY]);
+ struct nvhost_device *dev = power_attribute->ndev;
+
+ mutex_lock(&dev->lock);
+ ret = sprintf(buf, "%d\n", dev->clockgate_delay);
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+int nvhost_module_init(struct nvhost_device *dev)
+{
+ int i = 0, err = 0;
+ struct kobj_attribute *attr = NULL;
+
+ /* initialize clocks to known state */
+ INIT_LIST_HEAD(&dev->client_list);
+ while (dev->clocks[i].name && i < NVHOST_MODULE_MAX_CLOCKS) {
+ char devname[MAX_DEVID_LENGTH];
+ long rate = dev->clocks[i].default_rate;
+ struct clk *c;
+
+ snprintf(devname, MAX_DEVID_LENGTH, "tegra_%s", dev->name);
+ c = clk_get_sys(devname, dev->clocks[i].name);
+ if (IS_ERR_OR_NULL(c)) {
+ dev_err(&dev->dev, "Cannot get clock %s\n",
+ dev->clocks[i].name);
+ continue;
+ }
+
+ rate = clk_round_rate(c, rate);
+ clk_enable(c);
+ clk_set_rate(c, rate);
+ clk_disable(c);
+ dev->clk[i] = c;
+ i++;
+ }
+ dev->num_clks = i;
+
+ mutex_init(&dev->lock);
+ init_waitqueue_head(&dev->idle_wq);
+ INIT_DELAYED_WORK(&dev->powerstate_down, powerstate_down_handler);
+
+ /* power gate units that we can power gate */
+ if (dev->can_powergate) {
+ do_powergate_locked(dev->powergate_ids[0]);
+ do_powergate_locked(dev->powergate_ids[1]);
+ dev->powerstate = NVHOST_POWER_STATE_POWERGATED;
+ } else {
+ do_unpowergate_locked(dev->powergate_ids[0]);
+ do_unpowergate_locked(dev->powergate_ids[1]);
+ dev->powerstate = NVHOST_POWER_STATE_CLOCKGATED;
+ }
+
+ /* Init the power sysfs attributes for this device */
+ dev->power_attrib = kzalloc(sizeof(struct nvhost_device_power_attr),
+ GFP_KERNEL);
+ if (!dev->power_attrib) {
+ dev_err(&dev->dev, "Unable to allocate sysfs attributes\n");
+ return -ENOMEM;
+ }
+ dev->power_attrib->ndev = dev;
+
+ dev->power_kobj = kobject_create_and_add("acm", &dev->dev.kobj);
+ if (!dev->power_kobj) {
+ dev_err(&dev->dev, "Could not add dir 'power'\n");
+ err = -EIO;
+ goto fail_attrib_alloc;
+ }
+
+ attr = &dev->power_attrib->power_attr[NVHOST_POWER_SYSFS_ATTRIB_CLOCKGATE_DELAY];
+ attr->attr.name = "clockgate_delay";
+ attr->attr.mode = S_IWUSR | S_IRUGO;
+ attr->show = clockgate_delay_show;
+ attr->store = clockgate_delay_store;
+ if (sysfs_create_file(dev->power_kobj, &attr->attr)) {
+ dev_err(&dev->dev, "Could not create sysfs attribute clockgate_delay\n");
+ err = -EIO;
+ goto fail_clockdelay;
+ }
+
+ attr = &dev->power_attrib->power_attr[NVHOST_POWER_SYSFS_ATTRIB_POWERGATE_DELAY];
+ attr->attr.name = "powergate_delay";
+ attr->attr.mode = S_IWUSR | S_IRUGO;
+ attr->show = powergate_delay_show;
+ attr->store = powergate_delay_store;
+ if (sysfs_create_file(dev->power_kobj, &attr->attr)) {
+ dev_err(&dev->dev, "Could not create sysfs attribute powergate_delay\n");
+ err = -EIO;
+ goto fail_powergatedelay;
+ }
+
+ attr = &dev->power_attrib->power_attr[NVHOST_POWER_SYSFS_ATTRIB_REFCOUNT];
+ attr->attr.name = "refcount";
+ attr->attr.mode = S_IRUGO;
+ attr->show = refcount_show;
+ if (sysfs_create_file(dev->power_kobj, &attr->attr)) {
+ dev_err(&dev->dev, "Could not create sysfs attribute refcount\n");
+ err = -EIO;
+ goto fail_refcount;
+ }
+
+ return 0;
+
+fail_refcount:
+ attr = &dev->power_attrib->power_attr[NVHOST_POWER_SYSFS_ATTRIB_POWERGATE_DELAY];
+ sysfs_remove_file(dev->power_kobj, &attr->attr);
+
+fail_powergatedelay:
+ attr = &dev->power_attrib->power_attr[NVHOST_POWER_SYSFS_ATTRIB_CLOCKGATE_DELAY];
+ sysfs_remove_file(dev->power_kobj, &attr->attr);
+
+fail_clockdelay:
+ kobject_put(dev->power_kobj);
+
+fail_attrib_alloc:
+ kfree(dev->power_attrib);
+
+ return err;
+}
+
+static int is_module_idle(struct nvhost_device *dev)
+{
+ int count;
+ mutex_lock(&dev->lock);
+ count = dev->refcount;
+ mutex_unlock(&dev->lock);
+ return (count == 0);
+}
+
+int nvhost_module_suspend(struct nvhost_device *dev)
+{
+ int ret;
+ struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+
+ ret = wait_event_timeout(dev->idle_wq, is_module_idle(dev),
+ ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT);
+ if (ret == 0) {
+ dev_info(&dev->dev, "%s prevented suspend\n",
+ dev->name);
+ return -EBUSY;
+ }
+
+ mutex_lock(&dev->lock);
+ cancel_delayed_work(&dev->powerstate_down);
+ to_state_powergated_locked(dev);
+ mutex_unlock(&dev->lock);
+
+ if (drv->suspend_ndev)
+ drv->suspend_ndev(dev);
+
+ return 0;
+}
+
+void nvhost_module_deinit(struct nvhost_device *dev)
+{
+ int i;
+ struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+
+ if (drv->deinit)
+ drv->deinit(dev);
+
+ nvhost_module_suspend(dev);
+ for (i = 0; i < dev->num_clks; i++)
+ clk_put(dev->clk[i]);
+ dev->powerstate = NVHOST_POWER_STATE_DEINIT;
+}
+
+/* public host1x power management APIs */
+bool nvhost_module_powered_ext(struct nvhost_device *dev)
+{
+ return nvhost_module_powered(dev);
+}
+
+void nvhost_module_busy_ext(struct nvhost_device *dev)
+{
+ nvhost_module_busy(dev);
+}
+EXPORT_SYMBOL(nvhost_module_busy_ext);
+
+void nvhost_module_idle_ext(struct nvhost_device *dev)
+{
+ nvhost_module_idle(dev);
+}
+EXPORT_SYMBOL(nvhost_module_idle_ext);
diff --git a/drivers/video/tegra/host/nvhost_acm.h b/drivers/video/tegra/host/nvhost_acm.h
new file mode 100644
index 000000000000..a5894dcfc0b2
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_acm.h
@@ -0,0 +1,58 @@
+/*
+ * drivers/video/tegra/host/nvhost_acm.h
+ *
+ * Tegra Graphics Host Automatic Clock Management
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_ACM_H
+#define __NVHOST_ACM_H
+
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/nvhost.h>
+
+/* Sets clocks and powergating state for a module */
+int nvhost_module_init(struct nvhost_device *ndev);
+void nvhost_module_deinit(struct nvhost_device *dev);
+int nvhost_module_suspend(struct nvhost_device *dev);
+
+void nvhost_module_reset(struct nvhost_device *dev);
+void nvhost_module_busy(struct nvhost_device *dev);
+void nvhost_module_idle_mult(struct nvhost_device *dev, int refs);
+int nvhost_module_add_client(struct nvhost_device *dev,
+ void *priv);
+void nvhost_module_remove_client(struct nvhost_device *dev,
+ void *priv);
+int nvhost_module_get_rate(struct nvhost_device *dev,
+ unsigned long *rate,
+ int index);
+int nvhost_module_set_rate(struct nvhost_device *dev, void *priv,
+ unsigned long rate, int index);
+
+static inline bool nvhost_module_powered(struct nvhost_device *dev)
+{
+ return dev->powerstate == NVHOST_POWER_STATE_RUNNING;
+}
+
+static inline void nvhost_module_idle(struct nvhost_device *dev)
+{
+ nvhost_module_idle_mult(dev, 1);
+}
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_cdma.c b/drivers/video/tegra/host/nvhost_cdma.c
new file mode 100644
index 000000000000..dae3b7e6182d
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_cdma.c
@@ -0,0 +1,559 @@
+/*
+ * drivers/video/tegra/host/nvhost_cdma.c
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvhost_cdma.h"
+#include "nvhost_channel.h"
+#include "nvhost_job.h"
+#include "nvhost_hwctx.h"
+#include "dev.h"
+#include "debug.h"
+#include "nvhost_memmgr.h"
+#include "chip_support.h"
+#include <asm/cacheflush.h>
+
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <trace/events/nvhost.h>
+#include <linux/interrupt.h>
+
+/*
+ * TODO:
+ * stats
+ * - for figuring out what to optimize further
+ * resizable push buffer
+ * - some channels hardly need any, some channels (3d) could use more
+ */
+
+/**
+ * Add an entry to the sync queue.
+ */
+static void add_to_sync_queue(struct nvhost_cdma *cdma,
+ struct nvhost_job *job,
+ u32 nr_slots,
+ u32 first_get)
+{
+ BUG_ON(job->syncpt_id == NVSYNCPT_INVALID);
+
+ job->first_get = first_get;
+ job->num_slots = nr_slots;
+ nvhost_job_get(job);
+ list_add_tail(&job->list, &cdma->sync_queue);
+
+ switch (job->priority) {
+ case NVHOST_PRIORITY_HIGH:
+ cdma->high_prio_count++;
+ break;
+ case NVHOST_PRIORITY_MEDIUM:
+ cdma->med_prio_count++;
+ break;
+ case NVHOST_PRIORITY_LOW:
+ cdma->low_prio_count++;
+ break;
+ }
+}
+
+/**
+ * Return the status of the cdma's sync queue or push buffer for the given event
+ * - sq empty: returns 1 for empty, 0 for not empty (as in "1 empty queue" :-)
+ * - pb space: returns the number of free slots in the channel's push buffer
+ * Must be called with the cdma lock held.
+ */
+static unsigned int cdma_status_locked(struct nvhost_cdma *cdma,
+ enum cdma_event event)
+{
+ switch (event) {
+ case CDMA_EVENT_SYNC_QUEUE_EMPTY:
+ return list_empty(&cdma->sync_queue) ? 1 : 0;
+ case CDMA_EVENT_PUSH_BUFFER_SPACE: {
+ struct push_buffer *pb = &cdma->push_buffer;
+ BUG_ON(!cdma_pb_op().space);
+ return cdma_pb_op().space(pb);
+ }
+ default:
+ return 0;
+ }
+}
+
+/**
+ * Sleep (if necessary) until the requested event happens
+ * - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
+ * - Returns 1
+ * - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
+ * - Return the amount of space (> 0)
+ * Must be called with the cdma lock held.
+ */
+unsigned int nvhost_cdma_wait_locked(struct nvhost_cdma *cdma,
+ enum cdma_event event)
+{
+ for (;;) {
+ unsigned int space = cdma_status_locked(cdma, event);
+ if (space)
+ return space;
+
+ trace_nvhost_wait_cdma(cdma_to_channel(cdma)->dev->name,
+ event);
+
+ /* If somebody has managed to already start waiting, yield */
+ if (cdma->event != CDMA_EVENT_NONE) {
+ mutex_unlock(&cdma->lock);
+ schedule();
+ mutex_lock(&cdma->lock);
+ continue;
+ }
+ cdma->event = event;
+
+ mutex_unlock(&cdma->lock);
+ down(&cdma->sem);
+ mutex_lock(&cdma->lock);
+ }
+ return 0;
+}
+
+/**
+ * Start timer for a buffer submition that has completed yet.
+ * Must be called with the cdma lock held.
+ */
+static void cdma_start_timer_locked(struct nvhost_cdma *cdma,
+ struct nvhost_job *job)
+{
+ BUG_ON(!job);
+ if (cdma->timeout.clientid) {
+ /* timer already started */
+ return;
+ }
+
+ cdma->timeout.ctx = job->hwctx;
+ cdma->timeout.clientid = job->clientid;
+ cdma->timeout.syncpt_id = job->syncpt_id;
+ cdma->timeout.syncpt_val = job->syncpt_end;
+ cdma->timeout.start_ktime = ktime_get();
+
+ schedule_delayed_work(&cdma->timeout.wq,
+ msecs_to_jiffies(job->timeout));
+}
+
+/**
+ * Stop timer when a buffer submition completes.
+ * Must be called with the cdma lock held.
+ */
+static void stop_cdma_timer_locked(struct nvhost_cdma *cdma)
+{
+ cancel_delayed_work(&cdma->timeout.wq);
+ cdma->timeout.ctx = NULL;
+ cdma->timeout.clientid = 0;
+}
+
+/**
+ * For all sync queue entries that have already finished according to the
+ * current sync point registers:
+ * - unpin & unref their mems
+ * - pop their push buffer slots
+ * - remove them from the sync queue
+ * This is normally called from the host code's worker thread, but can be
+ * called manually if necessary.
+ * Must be called with the cdma lock held.
+ */
+static void update_cdma_locked(struct nvhost_cdma *cdma)
+{
+ bool signal = false;
+ struct nvhost_master *dev = cdma_to_dev(cdma);
+ struct nvhost_syncpt *sp = &dev->syncpt;
+ struct nvhost_job *job, *n;
+
+ /* If CDMA is stopped, queue is cleared and we can return */
+ if (!cdma->running)
+ return;
+
+ /*
+ * Walk the sync queue, reading the sync point registers as necessary,
+ * to consume as many sync queue entries as possible without blocking
+ */
+ list_for_each_entry_safe(job, n, &cdma->sync_queue, list) {
+ BUG_ON(job->syncpt_id == NVSYNCPT_INVALID);
+
+ /* Check whether this syncpt has completed, and bail if not */
+ if (!nvhost_syncpt_is_expired(sp,
+ job->syncpt_id, job->syncpt_end)) {
+ /* Start timer on next pending syncpt */
+ if (job->timeout)
+ cdma_start_timer_locked(cdma, job);
+ break;
+ }
+
+ /* Cancel timeout, when a buffer completes */
+ if (cdma->timeout.clientid)
+ stop_cdma_timer_locked(cdma);
+
+ /* Unpin the memory */
+ nvhost_job_unpin(job);
+
+ /* Pop push buffer slots */
+ if (job->num_slots) {
+ struct push_buffer *pb = &cdma->push_buffer;
+ BUG_ON(!cdma_pb_op().pop_from);
+ cdma_pb_op().pop_from(pb, job->num_slots);
+ if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
+ signal = true;
+ }
+
+ list_del(&job->list);
+
+ switch (job->priority) {
+ case NVHOST_PRIORITY_HIGH:
+ cdma->high_prio_count--;
+ break;
+ case NVHOST_PRIORITY_MEDIUM:
+ cdma->med_prio_count--;
+ break;
+ case NVHOST_PRIORITY_LOW:
+ cdma->low_prio_count--;
+ break;
+ }
+
+ nvhost_job_put(job);
+ }
+
+ if (list_empty(&cdma->sync_queue) &&
+ cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
+ signal = true;
+
+ /* Wake up CdmaWait() if the requested event happened */
+ if (signal) {
+ cdma->event = CDMA_EVENT_NONE;
+ up(&cdma->sem);
+ }
+}
+
+void nvhost_cdma_update_sync_queue(struct nvhost_cdma *cdma,
+ struct nvhost_syncpt *syncpt, struct nvhost_device *dev)
+{
+ u32 get_restart;
+ u32 syncpt_incrs;
+ struct nvhost_job *job = NULL;
+ u32 syncpt_val;
+
+ syncpt_val = nvhost_syncpt_update_min(syncpt, cdma->timeout.syncpt_id);
+
+ dev_dbg(&dev->dev,
+ "%s: starting cleanup (thresh %d)\n",
+ __func__, syncpt_val);
+
+ /*
+ * Move the sync_queue read pointer to the first entry that hasn't
+ * completed based on the current HW syncpt value. It's likely there
+ * won't be any (i.e. we're still at the head), but covers the case
+ * where a syncpt incr happens just prior/during the teardown.
+ */
+
+ dev_dbg(&dev->dev,
+ "%s: skip completed buffers still in sync_queue\n",
+ __func__);
+
+ list_for_each_entry(job, &cdma->sync_queue, list) {
+ if (syncpt_val < job->syncpt_end)
+ break;
+
+ nvhost_job_dump(&dev->dev, job);
+ }
+
+ /*
+ * Walk the sync_queue, first incrementing with the CPU syncpts that
+ * are partially executed (the first buffer) or fully skipped while
+ * still in the current context (slots are also NOP-ed).
+ *
+ * At the point contexts are interleaved, syncpt increments must be
+ * done inline with the pushbuffer from a GATHER buffer to maintain
+ * the order (slots are modified to be a GATHER of syncpt incrs).
+ *
+ * Note: save in get_restart the location where the timed out buffer
+ * started in the PB, so we can start the refetch from there (with the
+ * modified NOP-ed PB slots). This lets things appear to have completed
+ * properly for this buffer and resources are freed.
+ */
+
+ dev_dbg(&dev->dev,
+ "%s: perform CPU incr on pending same ctx buffers\n",
+ __func__);
+
+ get_restart = cdma->last_put;
+ if (!list_empty(&cdma->sync_queue))
+ get_restart = job->first_get;
+
+ /* do CPU increments as long as this context continues */
+ list_for_each_entry_from(job, &cdma->sync_queue, list) {
+ /* different context, gets us out of this loop */
+ if (job->clientid != cdma->timeout.clientid)
+ break;
+
+ /* won't need a timeout when replayed */
+ job->timeout = 0;
+
+ syncpt_incrs = job->syncpt_end - syncpt_val;
+ dev_dbg(&dev->dev,
+ "%s: CPU incr (%d)\n", __func__, syncpt_incrs);
+
+ nvhost_job_dump(&dev->dev, job);
+
+ /* safe to use CPU to incr syncpts */
+ cdma_op().timeout_cpu_incr(cdma,
+ job->first_get,
+ syncpt_incrs,
+ job->syncpt_end,
+ job->num_slots,
+ dev->waitbases);
+
+ syncpt_val += syncpt_incrs;
+ }
+
+ dev_dbg(&dev->dev,
+ "%s: finished sync_queue modification\n", __func__);
+
+ /* roll back DMAGET and start up channel again */
+ cdma_op().timeout_teardown_end(cdma, get_restart);
+
+ if (cdma->timeout.ctx)
+ cdma->timeout.ctx->has_timedout = true;
+}
+
+/**
+ * Create a cdma
+ */
+int nvhost_cdma_init(struct nvhost_cdma *cdma)
+{
+ int err;
+ struct push_buffer *pb = &cdma->push_buffer;
+ BUG_ON(!cdma_pb_op().init);
+ mutex_init(&cdma->lock);
+ sema_init(&cdma->sem, 0);
+
+ INIT_LIST_HEAD(&cdma->sync_queue);
+
+ cdma->event = CDMA_EVENT_NONE;
+ cdma->running = false;
+ cdma->torndown = false;
+
+ err = cdma_pb_op().init(pb);
+ if (err)
+ return err;
+ return 0;
+}
+
+/**
+ * Destroy a cdma
+ */
+void nvhost_cdma_deinit(struct nvhost_cdma *cdma)
+{
+ struct push_buffer *pb = &cdma->push_buffer;
+
+ BUG_ON(!cdma_pb_op().destroy);
+ BUG_ON(cdma->running);
+ cdma_pb_op().destroy(pb);
+ cdma_op().timeout_destroy(cdma);
+}
+
+/**
+ * Begin a cdma submit
+ */
+int nvhost_cdma_begin(struct nvhost_cdma *cdma, struct nvhost_job *job)
+{
+ mutex_lock(&cdma->lock);
+
+ if (job->timeout) {
+ /* init state on first submit with timeout value */
+ if (!cdma->timeout.initialized) {
+ int err;
+ BUG_ON(!cdma_op().timeout_init);
+ err = cdma_op().timeout_init(cdma,
+ job->syncpt_id);
+ if (err) {
+ mutex_unlock(&cdma->lock);
+ return err;
+ }
+ }
+ }
+ if (!cdma->running) {
+ BUG_ON(!cdma_op().start);
+ cdma_op().start(cdma);
+ }
+ cdma->slots_free = 0;
+ cdma->slots_used = 0;
+ cdma->first_get = cdma_pb_op().putptr(&cdma->push_buffer);
+ return 0;
+}
+
+static void trace_write_gather(struct nvhost_cdma *cdma,
+ struct mem_handle *ref,
+ u32 offset, u32 words)
+{
+ void *mem = NULL;
+
+ if (nvhost_debug_trace_cmdbuf) {
+ mem = mem_op().mmap(ref);
+ if (IS_ERR_OR_NULL(mem))
+ mem = NULL;
+ };
+
+ if (mem) {
+ u32 i;
+ /*
+ * Write in batches of 128 as there seems to be a limit
+ * of how much you can output to ftrace at once.
+ */
+ for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
+ trace_nvhost_cdma_push_gather(
+ cdma_to_channel(cdma)->dev->name,
+ (u32)ref,
+ min(words - i, TRACE_MAX_LENGTH),
+ offset + i * sizeof(u32),
+ mem);
+ }
+ mem_op().munmap(ref, mem);
+ }
+}
+
+/**
+ * Push two words into a push buffer slot
+ * Blocks as necessary if the push buffer is full.
+ */
+void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2)
+{
+ if (nvhost_debug_trace_cmdbuf)
+ trace_nvhost_cdma_push(cdma_to_channel(cdma)->dev->name,
+ op1, op2);
+
+ nvhost_cdma_push_gather(cdma, NULL, NULL, 0, op1, op2);
+}
+
+/**
+ * Push two words into a push buffer slot
+ * Blocks as necessary if the push buffer is full.
+ */
+void nvhost_cdma_push_gather(struct nvhost_cdma *cdma,
+ struct mem_mgr *client, struct mem_handle *handle,
+ u32 offset, u32 op1, u32 op2)
+{
+ u32 slots_free = cdma->slots_free;
+ struct push_buffer *pb = &cdma->push_buffer;
+
+ BUG_ON(!cdma_pb_op().push_to);
+ BUG_ON(!cdma_op().kick);
+
+ if (handle)
+ trace_write_gather(cdma, handle, offset, op1 & 0xffff);
+
+ if (slots_free == 0) {
+ cdma_op().kick(cdma);
+ slots_free = nvhost_cdma_wait_locked(cdma,
+ CDMA_EVENT_PUSH_BUFFER_SPACE);
+ }
+ cdma->slots_free = slots_free - 1;
+ cdma->slots_used++;
+ cdma_pb_op().push_to(pb, client, handle, op1, op2);
+}
+
+/**
+ * End a cdma submit
+ * Kick off DMA, add job to the sync queue, and a number of slots to be freed
+ * from the pushbuffer. The handles for a submit must all be pinned at the same
+ * time, but they can be unpinned in smaller chunks.
+ */
+void nvhost_cdma_end(struct nvhost_cdma *cdma,
+ struct nvhost_job *job)
+{
+ bool was_idle = list_empty(&cdma->sync_queue);
+
+ BUG_ON(!cdma_op().kick);
+ cdma_op().kick(cdma);
+
+ BUG_ON(job->syncpt_id == NVSYNCPT_INVALID);
+
+ add_to_sync_queue(cdma,
+ job,
+ cdma->slots_used,
+ cdma->first_get);
+
+ /* start timer on idle -> active transitions */
+ if (job->timeout && was_idle)
+ cdma_start_timer_locked(cdma, job);
+
+ trace_nvhost_cdma_end(job->ch->dev->name,
+ job->priority,
+ job->ch->cdma.high_prio_count,
+ job->ch->cdma.med_prio_count,
+ job->ch->cdma.low_prio_count);
+
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Update cdma state according to current sync point values
+ */
+void nvhost_cdma_update(struct nvhost_cdma *cdma)
+{
+ mutex_lock(&cdma->lock);
+ update_cdma_locked(cdma);
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Wait for push buffer to be empty.
+ * @cdma pointer to channel cdma
+ * @timeout timeout in ms
+ * Returns -ETIME if timeout was reached, zero if push buffer is empty.
+ */
+int nvhost_cdma_flush(struct nvhost_cdma *cdma, int timeout)
+{
+ unsigned int space, err = 0;
+ unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
+
+ trace_nvhost_cdma_flush(cdma_to_channel(cdma)->dev->name, timeout);
+
+ /*
+ * Wait for at most timeout ms. Recalculate timeout at each iteration
+ * to better keep within given timeout.
+ */
+ while(!err && time_before(jiffies, end_jiffies)) {
+ int timeout_jiffies = end_jiffies - jiffies;
+
+ mutex_lock(&cdma->lock);
+ space = cdma_status_locked(cdma,
+ CDMA_EVENT_SYNC_QUEUE_EMPTY);
+ if (space) {
+ mutex_unlock(&cdma->lock);
+ return 0;
+ }
+
+ /*
+ * Wait for sync queue to become empty. If there is already
+ * an event pending, we need to poll.
+ */
+ if (cdma->event != CDMA_EVENT_NONE) {
+ mutex_unlock(&cdma->lock);
+ schedule();
+ } else {
+ cdma->event = CDMA_EVENT_SYNC_QUEUE_EMPTY;
+
+ mutex_unlock(&cdma->lock);
+ err = down_timeout(&cdma->sem,
+ jiffies_to_msecs(timeout_jiffies));
+ }
+ }
+ return err;
+}
diff --git a/drivers/video/tegra/host/nvhost_cdma.h b/drivers/video/tegra/host/nvhost_cdma.h
new file mode 100644
index 000000000000..a9522c5f6326
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_cdma.h
@@ -0,0 +1,117 @@
+/*
+ * drivers/video/tegra/host/nvhost_cdma.h
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_CDMA_H
+#define __NVHOST_CDMA_H
+
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+
+#include <linux/nvhost.h>
+#include <linux/list.h>
+
+struct nvhost_syncpt;
+struct nvhost_userctx_timeout;
+struct nvhost_job;
+struct mem_mgr;
+struct mem_handle;
+
+/*
+ * cdma
+ *
+ * This is in charge of a host command DMA channel.
+ * Sends ops to a push buffer, and takes responsibility for unpinning
+ * (& possibly freeing) of memory after those ops have completed.
+ * Producer:
+ * begin
+ * push - send ops to the push buffer
+ * end - start command DMA and enqueue handles to be unpinned
+ * Consumer:
+ * update - call to update sync queue and push buffer, unpin memory
+ */
+
+struct push_buffer {
+ struct mem_handle *mem; /* handle to pushbuffer memory */
+ u32 *mapped; /* mapped pushbuffer memory */
+ u32 phys; /* physical address of pushbuffer */
+ u32 fence; /* index we've written */
+ u32 cur; /* index to write to */
+ struct mem_mgr_handle *client_handle; /* handle for each opcode pair */
+};
+
+struct buffer_timeout {
+ struct delayed_work wq; /* work queue */
+ bool initialized; /* timer one-time setup flag */
+ u32 syncpt_id; /* buffer completion syncpt id */
+ u32 syncpt_val; /* syncpt value when completed */
+ ktime_t start_ktime; /* starting time */
+ /* context timeout information */
+ struct nvhost_hwctx *ctx;
+ int clientid;
+};
+
+enum cdma_event {
+ CDMA_EVENT_NONE, /* not waiting for any event */
+ CDMA_EVENT_SYNC_QUEUE_EMPTY, /* wait for empty sync queue */
+ CDMA_EVENT_PUSH_BUFFER_SPACE /* wait for space in push buffer */
+};
+
+struct nvhost_cdma {
+ struct mutex lock; /* controls access to shared state */
+ struct semaphore sem; /* signalled when event occurs */
+ enum cdma_event event; /* event that sem is waiting for */
+ unsigned int slots_used; /* pb slots used in current submit */
+ unsigned int slots_free; /* pb slots free in current submit */
+ unsigned int first_get; /* DMAGET value, where submit begins */
+ unsigned int last_put; /* last value written to DMAPUT */
+ struct push_buffer push_buffer; /* channel's push buffer */
+ struct list_head sync_queue; /* job queue */
+ struct buffer_timeout timeout; /* channel's timeout state/wq */
+ bool running;
+ bool torndown;
+ int high_prio_count;
+ int med_prio_count;
+ int low_prio_count;
+};
+
+#define cdma_to_channel(cdma) container_of(cdma, struct nvhost_channel, cdma)
+#define cdma_to_dev(cdma) nvhost_get_host(cdma_to_channel(cdma)->dev)
+#define cdma_to_memmgr(cdma) ((cdma_to_dev(cdma))->memmgr)
+#define pb_to_cdma(pb) container_of(pb, struct nvhost_cdma, push_buffer)
+
+int nvhost_cdma_init(struct nvhost_cdma *cdma);
+void nvhost_cdma_deinit(struct nvhost_cdma *cdma);
+void nvhost_cdma_stop(struct nvhost_cdma *cdma);
+int nvhost_cdma_begin(struct nvhost_cdma *cdma, struct nvhost_job *job);
+void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2);
+void nvhost_cdma_push_gather(struct nvhost_cdma *cdma,
+ struct mem_mgr *client,
+ struct mem_handle *handle, u32 offset, u32 op1, u32 op2);
+void nvhost_cdma_end(struct nvhost_cdma *cdma,
+ struct nvhost_job *job);
+void nvhost_cdma_update(struct nvhost_cdma *cdma);
+int nvhost_cdma_flush(struct nvhost_cdma *cdma, int timeout);
+void nvhost_cdma_peek(struct nvhost_cdma *cdma,
+ u32 dmaget, int slot, u32 *out);
+unsigned int nvhost_cdma_wait_locked(struct nvhost_cdma *cdma,
+ enum cdma_event event);
+void nvhost_cdma_update_sync_queue(struct nvhost_cdma *cdma,
+ struct nvhost_syncpt *syncpt, struct nvhost_device *dev);
+#endif
diff --git a/drivers/video/tegra/host/nvhost_channel.c b/drivers/video/tegra/host/nvhost_channel.c
new file mode 100644
index 000000000000..fd309ee9917b
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_channel.c
@@ -0,0 +1,188 @@
+/*
+ * drivers/video/tegra/host/nvhost_channel.c
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvhost_channel.h"
+#include "dev.h"
+#include "nvhost_acm.h"
+#include "nvhost_job.h"
+#include "chip_support.h"
+
+#include <trace/events/nvhost.h>
+#include <linux/nvhost_ioctl.h>
+#include <linux/slab.h>
+
+#define NVHOST_CHANNEL_LOW_PRIO_MAX_WAIT 50
+
+int nvhost_channel_init(struct nvhost_channel *ch,
+ struct nvhost_master *dev, int index)
+{
+ int err;
+ struct nvhost_device *ndev;
+
+ /* Link nvhost_device to nvhost_channel */
+ err = channel_op().init(ch, dev, index);
+ if (err < 0) {
+ dev_err(&dev->dev->dev, "failed to init channel %d\n",
+ index);
+ return err;
+ }
+ ndev = ch->dev;
+ ndev->channel = ch;
+
+ return 0;
+}
+
+int nvhost_channel_submit(struct nvhost_job *job)
+{
+ /*
+ * Check if queue has higher priority jobs running. If so, wait until
+ * queue is empty. Ignores result from nvhost_cdma_flush, as we submit
+ * either when push buffer is empty or when we reach the timeout.
+ */
+ int higher_count = 0;
+
+ switch (job->priority) {
+ case NVHOST_PRIORITY_HIGH:
+ higher_count = 0;
+ break;
+ case NVHOST_PRIORITY_MEDIUM:
+ higher_count = job->ch->cdma.high_prio_count;
+ break;
+ case NVHOST_PRIORITY_LOW:
+ higher_count = job->ch->cdma.high_prio_count
+ + job->ch->cdma.med_prio_count;
+ break;
+ }
+ if (higher_count > 0)
+ (void)nvhost_cdma_flush(&job->ch->cdma,
+ NVHOST_CHANNEL_LOW_PRIO_MAX_WAIT);
+
+ return channel_op().submit(job);
+}
+
+struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch)
+{
+ int err = 0;
+ struct nvhost_driver *drv = to_nvhost_driver(ch->dev->dev.driver);
+
+ mutex_lock(&ch->reflock);
+ if (ch->refcount == 0) {
+ if (drv->init)
+ drv->init(ch->dev);
+ err = nvhost_cdma_init(&ch->cdma);
+ } else if (ch->dev->exclusive) {
+ err = -EBUSY;
+ }
+ if (!err)
+ ch->refcount++;
+
+ mutex_unlock(&ch->reflock);
+
+ /* Keep alive modules that needs to be when a channel is open */
+ if (!err && ch->dev->keepalive)
+ nvhost_module_busy(ch->dev);
+
+ return err ? NULL : ch;
+}
+
+void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx)
+{
+ BUG_ON(!channel_cdma_op().stop);
+
+ if (ctx) {
+ mutex_lock(&ch->submitlock);
+ if (ch->cur_ctx == ctx)
+ ch->cur_ctx = NULL;
+ mutex_unlock(&ch->submitlock);
+ }
+
+ /* Allow keep-alive'd module to be turned off */
+ if (ch->dev->keepalive)
+ nvhost_module_idle(ch->dev);
+
+ mutex_lock(&ch->reflock);
+ if (ch->refcount == 1) {
+ channel_cdma_op().stop(&ch->cdma);
+ nvhost_cdma_deinit(&ch->cdma);
+ nvhost_module_suspend(ch->dev);
+ }
+ ch->refcount--;
+ mutex_unlock(&ch->reflock);
+}
+
+int nvhost_channel_suspend(struct nvhost_channel *ch)
+{
+ int ret = 0;
+
+ mutex_lock(&ch->reflock);
+ BUG_ON(!channel_cdma_op().stop);
+
+ if (ch->refcount) {
+ ret = nvhost_module_suspend(ch->dev);
+ if (!ret)
+ channel_cdma_op().stop(&ch->cdma);
+ }
+ mutex_unlock(&ch->reflock);
+
+ return ret;
+}
+
+struct nvhost_channel *nvhost_alloc_channel_internal(int chindex,
+ int max_channels, int *current_channel_count)
+{
+ struct nvhost_channel *ch = NULL;
+
+ if ( (chindex > max_channels) ||
+ ( (*current_channel_count + 1) > max_channels) )
+ return NULL;
+ else {
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (ch == NULL)
+ return NULL;
+ else {
+ (*current_channel_count)++;
+ return ch;
+ }
+ }
+}
+
+void nvhost_free_channel_internal(struct nvhost_channel *ch,
+ int *current_channel_count)
+{
+ kfree(ch);
+ (*current_channel_count)--;
+}
+
+int nvhost_channel_save_context(struct nvhost_channel *ch)
+{
+ struct nvhost_hwctx *cur_ctx = ch->cur_ctx;
+ int err = 0;
+ if (cur_ctx)
+ err = channel_op().save_context(ch);
+
+ return err;
+
+}
+
+int nvhost_channel_drain_read_fifo(struct nvhost_channel *ch,
+ u32 *ptr, unsigned int count, unsigned int *pending)
+{
+ return channel_op().drain_read_fifo(ch, ptr, count, pending);
+}
diff --git a/drivers/video/tegra/host/nvhost_channel.h b/drivers/video/tegra/host/nvhost_channel.h
new file mode 100644
index 000000000000..d7f096db1ffa
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_channel.h
@@ -0,0 +1,77 @@
+/*
+ * drivers/video/tegra/host/nvhost_channel.h
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_CHANNEL_H
+#define __NVHOST_CHANNEL_H
+
+#include <linux/cdev.h>
+#include <linux/io.h>
+#include "nvhost_cdma.h"
+
+#define NVHOST_MAX_WAIT_CHECKS 256
+#define NVHOST_MAX_GATHERS 512
+#define NVHOST_MAX_HANDLES 1280
+#define NVHOST_MAX_POWERGATE_IDS 2
+
+struct nvhost_master;
+struct nvhost_device;
+struct nvhost_channel;
+struct nvhost_hwctx;
+
+struct nvhost_channel {
+ int refcount;
+ int chid;
+ u32 syncpt_id;
+ struct mutex reflock;
+ struct mutex submitlock;
+ void __iomem *aperture;
+ struct nvhost_hwctx *cur_ctx;
+ struct device *node;
+ struct nvhost_device *dev;
+ struct cdev cdev;
+ struct nvhost_hwctx_handler *ctxhandler;
+ struct nvhost_cdma cdma;
+};
+
+int nvhost_channel_init(struct nvhost_channel *ch,
+ struct nvhost_master *dev, int index);
+
+int nvhost_channel_submit(struct nvhost_job *job);
+
+struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch);
+void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx);
+int nvhost_channel_suspend(struct nvhost_channel *ch);
+
+int nvhost_channel_drain_read_fifo(struct nvhost_channel *ch,
+ u32 *ptr, unsigned int count, unsigned int *pending);
+
+int nvhost_channel_read_3d_reg(struct nvhost_channel *channel,
+ struct nvhost_hwctx *hwctx,
+ u32 offset, u32 *value);
+
+struct nvhost_channel *nvhost_alloc_channel_internal(int chindex,
+ int max_channels, int *current_channel_count);
+
+void nvhost_free_channel_internal(struct nvhost_channel *ch,
+ int *current_channel_count);
+
+int nvhost_channel_save_context(struct nvhost_channel *ch);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_hwctx.h b/drivers/video/tegra/host/nvhost_hwctx.h
new file mode 100644
index 000000000000..47bc3d408fde
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_hwctx.h
@@ -0,0 +1,66 @@
+/*
+ * drivers/video/tegra/host/nvhost_hwctx.h
+ *
+ * Tegra Graphics Host Hardware Context Interface
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_HWCTX_H
+#define __NVHOST_HWCTX_H
+
+#include <linux/string.h>
+#include <linux/kref.h>
+
+#include <linux/nvhost.h>
+
+struct nvhost_channel;
+struct nvhost_cdma;
+
+struct nvhost_hwctx {
+ struct kref ref;
+ struct nvhost_hwctx_handler *h;
+ struct nvhost_channel *channel;
+ bool valid;
+ bool has_timedout;
+};
+
+struct nvhost_hwctx_handler {
+ struct nvhost_hwctx * (*alloc) (struct nvhost_hwctx_handler *h,
+ struct nvhost_channel *ch);
+ void (*get) (struct nvhost_hwctx *ctx);
+ void (*put) (struct nvhost_hwctx *ctx);
+ void (*save_push) (struct nvhost_hwctx *ctx,
+ struct nvhost_cdma *cdma);
+ void (*save_service) (struct nvhost_hwctx *ctx);
+ void *priv;
+};
+
+
+struct hwctx_reginfo {
+ unsigned int offset:12;
+ unsigned int count:16;
+ unsigned int type:2;
+};
+
+enum {
+ HWCTX_REGINFO_DIRECT = 0,
+ HWCTX_REGINFO_INDIRECT,
+ HWCTX_REGINFO_INDIRECT_4X
+};
+
+#define HWCTX_REGINFO(offset, count, type) {offset, count, HWCTX_REGINFO_##type}
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_intr.c b/drivers/video/tegra/host/nvhost_intr.c
new file mode 100644
index 000000000000..9788d32bd4a9
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_intr.c
@@ -0,0 +1,441 @@
+/*
+ * drivers/video/tegra/host/nvhost_intr.c
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvhost_intr.h"
+#include "dev.h"
+#include "nvhost_acm.h"
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <trace/events/nvhost.h>
+#include "nvhost_channel.h"
+#include "nvhost_hwctx.h"
+#include "chip_support.h"
+
+/*** Wait list management ***/
+
+struct nvhost_waitlist {
+ struct list_head list;
+ struct kref refcount;
+ u32 thresh;
+ enum nvhost_intr_action action;
+ atomic_t state;
+ void *data;
+ int count;
+};
+
+enum waitlist_state {
+ WLS_PENDING,
+ WLS_REMOVED,
+ WLS_CANCELLED,
+ WLS_HANDLED
+};
+
+static void waiter_release(struct kref *kref)
+{
+ kfree(container_of(kref, struct nvhost_waitlist, refcount));
+}
+
+/**
+ * add a waiter to a waiter queue, sorted by threshold
+ * returns true if it was added at the head of the queue
+ */
+static bool add_waiter_to_queue(struct nvhost_waitlist *waiter,
+ struct list_head *queue)
+{
+ struct nvhost_waitlist *pos;
+ u32 thresh = waiter->thresh;
+
+ list_for_each_entry_reverse(pos, queue, list)
+ if ((s32)(pos->thresh - thresh) <= 0) {
+ list_add(&waiter->list, &pos->list);
+ return false;
+ }
+
+ list_add(&waiter->list, queue);
+ return true;
+}
+
+/**
+ * run through a waiter queue for a single sync point ID
+ * and gather all completed waiters into lists by actions
+ */
+static void remove_completed_waiters(struct list_head *head, u32 sync,
+ struct list_head completed[NVHOST_INTR_ACTION_COUNT])
+{
+ struct list_head *dest;
+ struct nvhost_waitlist *waiter, *next, *prev;
+
+ list_for_each_entry_safe(waiter, next, head, list) {
+ if ((s32)(waiter->thresh - sync) > 0)
+ break;
+
+ dest = completed + waiter->action;
+
+ /* consolidate submit cleanups */
+ if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE
+ && !list_empty(dest)) {
+ prev = list_entry(dest->prev,
+ struct nvhost_waitlist, list);
+ if (prev->data == waiter->data) {
+ prev->count++;
+ dest = NULL;
+ }
+ }
+
+ /* PENDING->REMOVED or CANCELLED->HANDLED */
+ if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
+ list_del(&waiter->list);
+ kref_put(&waiter->refcount, waiter_release);
+ } else {
+ list_move_tail(&waiter->list, dest);
+ }
+ }
+}
+
+void reset_threshold_interrupt(struct nvhost_intr *intr,
+ struct list_head *head,
+ unsigned int id)
+{
+ u32 thresh = list_first_entry(head,
+ struct nvhost_waitlist, list)->thresh;
+ BUG_ON(!(intr_op().set_syncpt_threshold &&
+ intr_op().enable_syncpt_intr));
+
+ intr_op().set_syncpt_threshold(intr, id, thresh);
+ intr_op().enable_syncpt_intr(intr, id);
+}
+
+
+static void action_submit_complete(struct nvhost_waitlist *waiter)
+{
+ struct nvhost_channel *channel = waiter->data;
+ int nr_completed = waiter->count;
+
+ nvhost_cdma_update(&channel->cdma);
+ nvhost_module_idle_mult(channel->dev, nr_completed);
+
+ /* Add nr_completed to trace */
+ trace_nvhost_channel_submit_complete(channel->dev->name,
+ nr_completed, waiter->thresh,
+ channel->cdma.high_prio_count,
+ channel->cdma.med_prio_count,
+ channel->cdma.low_prio_count);
+
+}
+
+static void action_ctxsave(struct nvhost_waitlist *waiter)
+{
+ struct nvhost_hwctx *hwctx = waiter->data;
+ struct nvhost_channel *channel = hwctx->channel;
+
+ if (channel->ctxhandler->save_service)
+ channel->ctxhandler->save_service(hwctx);
+}
+
+static void action_wakeup(struct nvhost_waitlist *waiter)
+{
+ wait_queue_head_t *wq = waiter->data;
+
+ wake_up(wq);
+}
+
+static void action_wakeup_interruptible(struct nvhost_waitlist *waiter)
+{
+ wait_queue_head_t *wq = waiter->data;
+
+ wake_up_interruptible(wq);
+}
+
+typedef void (*action_handler)(struct nvhost_waitlist *waiter);
+
+static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = {
+ action_submit_complete,
+ action_ctxsave,
+ action_wakeup,
+ action_wakeup_interruptible,
+};
+
+static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT])
+{
+ struct list_head *head = completed;
+ int i;
+
+ for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) {
+ action_handler handler = action_handlers[i];
+ struct nvhost_waitlist *waiter, *next;
+
+ list_for_each_entry_safe(waiter, next, head, list) {
+ list_del(&waiter->list);
+ handler(waiter);
+ WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != WLS_REMOVED);
+ kref_put(&waiter->refcount, waiter_release);
+ }
+ }
+}
+
+/**
+ * Remove & handle all waiters that have completed for the given syncpt
+ */
+static int process_wait_list(struct nvhost_intr *intr,
+ struct nvhost_intr_syncpt *syncpt,
+ u32 threshold)
+{
+ struct list_head completed[NVHOST_INTR_ACTION_COUNT];
+ unsigned int i;
+ int empty;
+
+ for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i)
+ INIT_LIST_HEAD(completed + i);
+
+ spin_lock(&syncpt->lock);
+
+ remove_completed_waiters(&syncpt->wait_head, threshold, completed);
+
+ empty = list_empty(&syncpt->wait_head);
+ if (empty)
+ intr_op().disable_syncpt_intr(intr, syncpt->id);
+ else
+ reset_threshold_interrupt(intr, &syncpt->wait_head,
+ syncpt->id);
+
+ spin_unlock(&syncpt->lock);
+
+ run_handlers(completed);
+
+ return empty;
+}
+
+/*** host syncpt interrupt service functions ***/
+/**
+ * Sync point threshold interrupt service thread function
+ * Handles sync point threshold triggers, in thread context
+ */
+irqreturn_t nvhost_syncpt_thresh_fn(int irq, void *dev_id)
+{
+ struct nvhost_intr_syncpt *syncpt = dev_id;
+ unsigned int id = syncpt->id;
+ struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt);
+ struct nvhost_master *dev = intr_to_dev(intr);
+
+ (void)process_wait_list(intr, syncpt,
+ nvhost_syncpt_update_min(&dev->syncpt, id));
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * free a syncpt's irq. syncpt interrupt should be disabled first.
+ */
+static void free_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
+{
+ if (syncpt->irq_requested) {
+ free_irq(syncpt->irq, syncpt);
+ syncpt->irq_requested = 0;
+ }
+}
+
+
+/*** host general interrupt service functions ***/
+
+
+/*** Main API ***/
+
+int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
+ enum nvhost_intr_action action, void *data,
+ void *_waiter,
+ void **ref)
+{
+ struct nvhost_waitlist *waiter = _waiter;
+ struct nvhost_intr_syncpt *syncpt;
+ int queue_was_empty;
+ int err;
+
+ BUG_ON(waiter == NULL);
+
+ BUG_ON(!(intr_op().set_syncpt_threshold &&
+ intr_op().enable_syncpt_intr));
+
+ /* initialize a new waiter */
+ INIT_LIST_HEAD(&waiter->list);
+ kref_init(&waiter->refcount);
+ if (ref)
+ kref_get(&waiter->refcount);
+ waiter->thresh = thresh;
+ waiter->action = action;
+ atomic_set(&waiter->state, WLS_PENDING);
+ waiter->data = data;
+ waiter->count = 1;
+
+ syncpt = intr->syncpt + id;
+
+ spin_lock(&syncpt->lock);
+
+ /* lazily request irq for this sync point */
+ if (!syncpt->irq_requested) {
+ spin_unlock(&syncpt->lock);
+
+ mutex_lock(&intr->mutex);
+ BUG_ON(!(intr_op().request_syncpt_irq));
+ err = intr_op().request_syncpt_irq(syncpt);
+ mutex_unlock(&intr->mutex);
+
+ if (err) {
+ kfree(waiter);
+ return err;
+ }
+
+ spin_lock(&syncpt->lock);
+ }
+
+ queue_was_empty = list_empty(&syncpt->wait_head);
+
+ if (add_waiter_to_queue(waiter, &syncpt->wait_head)) {
+ /* added at head of list - new threshold value */
+ intr_op().set_syncpt_threshold(intr, id, thresh);
+
+ /* added as first waiter - enable interrupt */
+ if (queue_was_empty)
+ intr_op().enable_syncpt_intr(intr, id);
+ }
+
+ spin_unlock(&syncpt->lock);
+
+ if (ref)
+ *ref = waiter;
+ return 0;
+}
+
+void *nvhost_intr_alloc_waiter()
+{
+ return kzalloc(sizeof(struct nvhost_waitlist),
+ GFP_KERNEL|__GFP_REPEAT);
+}
+
+void nvhost_intr_put_ref(struct nvhost_intr *intr, u32 id, void *ref)
+{
+ struct nvhost_waitlist *waiter = ref;
+ struct nvhost_intr_syncpt *syncpt;
+ struct nvhost_master *host = intr_to_dev(intr);
+
+ while (atomic_cmpxchg(&waiter->state,
+ WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED)
+ schedule();
+
+ syncpt = intr->syncpt + id;
+ (void)process_wait_list(intr, syncpt,
+ nvhost_syncpt_update_min(&host->syncpt, id));
+
+ kref_put(&waiter->refcount, waiter_release);
+}
+
+
+/*** Init & shutdown ***/
+
+int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
+{
+ unsigned int id;
+ struct nvhost_intr_syncpt *syncpt;
+ struct nvhost_master *host = intr_to_dev(intr);
+ u32 nb_pts = nvhost_syncpt_nb_pts(&host->syncpt);
+
+ mutex_init(&intr->mutex);
+ intr->host_syncpt_irq_base = irq_sync;
+ intr_op().init_host_sync(intr);
+ intr->host_general_irq = irq_gen;
+ intr->host_general_irq_requested = false;
+ intr_op().request_host_general_irq(intr);
+
+ for (id = 0, syncpt = intr->syncpt;
+ id < nb_pts;
+ ++id, ++syncpt) {
+ syncpt->intr = &host->intr;
+ syncpt->id = id;
+ syncpt->irq = irq_sync + id;
+ syncpt->irq_requested = 0;
+ spin_lock_init(&syncpt->lock);
+ INIT_LIST_HEAD(&syncpt->wait_head);
+ snprintf(syncpt->thresh_irq_name,
+ sizeof(syncpt->thresh_irq_name),
+ "host_sp_%02d", id);
+ }
+
+ return 0;
+}
+
+void nvhost_intr_deinit(struct nvhost_intr *intr)
+{
+ nvhost_intr_stop(intr);
+}
+
+void nvhost_intr_start(struct nvhost_intr *intr, u32 hz)
+{
+ BUG_ON(!(intr_op().init_host_sync &&
+ intr_op().set_host_clocks_per_usec &&
+ intr_op().request_host_general_irq));
+
+ mutex_lock(&intr->mutex);
+
+ intr_op().init_host_sync(intr);
+ intr_op().set_host_clocks_per_usec(intr,
+ (hz + 1000000 - 1)/1000000);
+
+ intr_op().request_host_general_irq(intr);
+
+ mutex_unlock(&intr->mutex);
+}
+
+void nvhost_intr_stop(struct nvhost_intr *intr)
+{
+ unsigned int id;
+ struct nvhost_intr_syncpt *syncpt;
+ u32 nb_pts = nvhost_syncpt_nb_pts(&intr_to_dev(intr)->syncpt);
+
+ BUG_ON(!(intr_op().disable_all_syncpt_intrs &&
+ intr_op().free_host_general_irq));
+
+ mutex_lock(&intr->mutex);
+
+ intr_op().disable_all_syncpt_intrs(intr);
+
+ for (id = 0, syncpt = intr->syncpt;
+ id < nb_pts;
+ ++id, ++syncpt) {
+ struct nvhost_waitlist *waiter, *next;
+ list_for_each_entry_safe(waiter, next, &syncpt->wait_head, list) {
+ if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED)
+ == WLS_CANCELLED) {
+ list_del(&waiter->list);
+ kref_put(&waiter->refcount, waiter_release);
+ }
+ }
+
+ if (!list_empty(&syncpt->wait_head)) { /* output diagnostics */
+ printk(KERN_DEBUG "%s id=%d\n", __func__, id);
+ BUG_ON(1);
+ }
+
+ free_syncpt_irq(syncpt);
+ }
+
+ intr_op().free_host_general_irq(intr);
+
+ mutex_unlock(&intr->mutex);
+}
diff --git a/drivers/video/tegra/host/nvhost_intr.h b/drivers/video/tegra/host/nvhost_intr.h
new file mode 100644
index 000000000000..d4a6157eced1
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_intr.h
@@ -0,0 +1,115 @@
+/*
+ * drivers/video/tegra/host/nvhost_intr.h
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_INTR_H
+#define __NVHOST_INTR_H
+
+#include <linux/kthread.h>
+#include <linux/semaphore.h>
+#include <linux/interrupt.h>
+
+struct nvhost_channel;
+
+enum nvhost_intr_action {
+ /**
+ * Perform cleanup after a submit has completed.
+ * 'data' points to a channel
+ */
+ NVHOST_INTR_ACTION_SUBMIT_COMPLETE = 0,
+
+ /**
+ * Save a HW context.
+ * 'data' points to a context
+ */
+ NVHOST_INTR_ACTION_CTXSAVE,
+
+ /**
+ * Wake up a task.
+ * 'data' points to a wait_queue_head_t
+ */
+ NVHOST_INTR_ACTION_WAKEUP,
+
+ /**
+ * Wake up a interruptible task.
+ * 'data' points to a wait_queue_head_t
+ */
+ NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
+
+ NVHOST_INTR_ACTION_COUNT
+};
+
+struct nvhost_intr;
+
+struct nvhost_intr_syncpt {
+ struct nvhost_intr *intr;
+ u8 id;
+ u8 irq_requested;
+ u16 irq;
+ spinlock_t lock;
+ struct list_head wait_head;
+ char thresh_irq_name[12];
+};
+
+struct nvhost_intr {
+ struct nvhost_intr_syncpt *syncpt;
+ struct mutex mutex;
+ int host_general_irq;
+ int host_syncpt_irq_base;
+ bool host_general_irq_requested;
+};
+#define intr_to_dev(x) container_of(x, struct nvhost_master, intr)
+#define intr_syncpt_to_intr(is) (is->intr)
+
+/**
+ * Schedule an action to be taken when a sync point reaches the given threshold.
+ *
+ * @id the sync point
+ * @thresh the threshold
+ * @action the action to take
+ * @data a pointer to extra data depending on action, see above
+ * @waiter waiter allocated with nvhost_intr_alloc_waiter - assumes ownership
+ * @ref must be passed if cancellation is possible, else NULL
+ *
+ * This is a non-blocking api.
+ */
+int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
+ enum nvhost_intr_action action, void *data,
+ void *waiter,
+ void **ref);
+
+/**
+ * Allocate a waiter.
+ */
+void *nvhost_intr_alloc_waiter(void);
+
+/**
+ * Unreference an action submitted to nvhost_intr_add_action().
+ * You must call this if you passed non-NULL as ref.
+ * @ref the ref returned from nvhost_intr_add_action()
+ */
+void nvhost_intr_put_ref(struct nvhost_intr *intr, u32 id, void *ref);
+
+int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync);
+void nvhost_intr_deinit(struct nvhost_intr *intr);
+void nvhost_intr_start(struct nvhost_intr *intr, u32 hz);
+void nvhost_intr_stop(struct nvhost_intr *intr);
+
+irqreturn_t nvhost_syncpt_thresh_fn(int irq, void *dev_id);
+#endif
diff --git a/drivers/video/tegra/host/nvhost_job.c b/drivers/video/tegra/host/nvhost_job.c
new file mode 100644
index 000000000000..5587f51ea996
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_job.c
@@ -0,0 +1,361 @@
+/*
+ * drivers/video/tegra/host/nvhost_job.c
+ *
+ * Tegra Graphics Host Job
+ *
+ * Copyright (c) 2010-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/kref.h>
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+#include <trace/events/nvhost.h>
+#include "nvhost_channel.h"
+#include "nvhost_job.h"
+#include "nvhost_hwctx.h"
+#include "nvhost_syncpt.h"
+#include "dev.h"
+#include "nvhost_memmgr.h"
+#include "chip_support.h"
+#include "nvmap.h"
+
+/* Magic to use to fill freed handle slots */
+#define BAD_MAGIC 0xdeadbeef
+
+static size_t job_size(struct nvhost_submit_hdr_ext *hdr)
+{
+ s64 num_relocs = hdr ? (int)hdr->num_relocs : 0;
+ s64 num_waitchks = hdr ? (int)hdr->num_waitchks : 0;
+ s64 num_cmdbufs = hdr ? (int)hdr->num_cmdbufs : 0;
+ s64 num_unpins = num_cmdbufs + num_relocs;
+ s64 total;
+
+ if(num_relocs < 0 || num_waitchks < 0 || num_cmdbufs < 0)
+ return 0;
+
+ total = sizeof(struct nvhost_job)
+ + num_relocs * sizeof(struct nvhost_reloc)
+ + num_relocs * sizeof(struct nvhost_reloc_shift)
+ + num_unpins * sizeof(struct mem_handle *)
+ + num_waitchks * sizeof(struct nvhost_waitchk)
+ + num_cmdbufs * sizeof(struct nvhost_job_gather);
+
+ if(total > ULONG_MAX)
+ return 0;
+ return (size_t)total;
+}
+
+static void init_fields(struct nvhost_job *job,
+ struct nvhost_submit_hdr_ext *hdr,
+ int priority, int clientid)
+{
+ int num_relocs = hdr ? hdr->num_relocs : 0;
+ int num_waitchks = hdr ? hdr->num_waitchks : 0;
+ int num_cmdbufs = hdr ? hdr->num_cmdbufs : 0;
+ int num_unpins = num_cmdbufs + num_relocs;
+ void *mem = job;
+
+ /* First init state to zero */
+ job->priority = priority;
+ job->clientid = clientid;
+
+ /*
+ * Redistribute memory to the structs.
+ * Overflows and negative conditions have
+ * already been checked in job_alloc().
+ */
+ mem += sizeof(struct nvhost_job);
+ job->relocarray = num_relocs ? mem : NULL;
+ mem += num_relocs * sizeof(struct nvhost_reloc);
+ job->relocshiftarray = num_relocs ? mem : NULL;
+ mem += num_relocs * sizeof(struct nvhost_reloc_shift);
+ job->unpins = num_unpins ? mem : NULL;
+ mem += num_unpins * sizeof(struct mem_handle *);
+ job->waitchk = num_waitchks ? mem : NULL;
+ mem += num_waitchks * sizeof(struct nvhost_waitchk);
+ job->gathers = num_cmdbufs ? mem : NULL;
+
+ /* Copy information from header */
+ if (hdr) {
+ job->waitchk_mask = hdr->waitchk_mask;
+ job->syncpt_id = hdr->syncpt_id;
+ job->syncpt_incrs = hdr->syncpt_incrs;
+ }
+}
+
+struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch,
+ struct nvhost_hwctx *hwctx,
+ struct nvhost_submit_hdr_ext *hdr,
+ struct mem_mgr *memmgr,
+ int priority,
+ int clientid)
+{
+ struct nvhost_job *job = NULL;
+ size_t size = job_size(hdr);
+
+ if(!size)
+ goto error;
+ job = vzalloc(size);
+ if (!job)
+ goto error;
+
+ kref_init(&job->ref);
+ job->ch = ch;
+ job->hwctx = hwctx;
+ if (hwctx)
+ hwctx->h->get(hwctx);
+ job->memmgr = memmgr ? mem_op().get_mgr(memmgr) : NULL;
+
+ init_fields(job, hdr, priority, clientid);
+
+ return job;
+
+error:
+ if (job)
+ nvhost_job_put(job);
+ return NULL;
+}
+
+void nvhost_job_get(struct nvhost_job *job)
+{
+ kref_get(&job->ref);
+}
+
+static void job_free(struct kref *ref)
+{
+ struct nvhost_job *job = container_of(ref, struct nvhost_job, ref);
+
+ if (job->hwctxref)
+ job->hwctxref->h->put(job->hwctxref);
+ if (job->hwctx)
+ job->hwctx->h->put(job->hwctx);
+ if (job->memmgr)
+ mem_op().put_mgr(job->memmgr);
+ vfree(job);
+}
+
+/* Acquire reference to a hardware context. Used for keeping saved contexts in
+ * memory. */
+void nvhost_job_get_hwctx(struct nvhost_job *job, struct nvhost_hwctx *hwctx)
+{
+ BUG_ON(job->hwctxref);
+
+ job->hwctxref = hwctx;
+ hwctx->h->get(hwctx);
+}
+
+void nvhost_job_put(struct nvhost_job *job)
+{
+ kref_put(&job->ref, job_free);
+}
+
+void nvhost_job_add_gather(struct nvhost_job *job,
+ u32 mem_id, u32 words, u32 offset)
+{
+ struct nvhost_job_gather *cur_gather =
+ &job->gathers[job->num_gathers];
+
+ cur_gather->words = words;
+ cur_gather->mem_id = mem_id;
+ cur_gather->offset = offset;
+ job->num_gathers += 1;
+}
+
+static int do_relocs(struct nvhost_job *job, u32 cmdbuf_mem, void *cmdbuf_addr)
+{
+ phys_addr_t target_phys = -EINVAL;
+ int i;
+ u32 mem_id = 0;
+ struct mem_handle *target_ref = NULL;
+
+ /* pin & patch the relocs for one gather */
+ for (i = 0; i < job->num_relocs; i++) {
+ struct nvhost_reloc *reloc = &job->relocarray[i];
+ struct nvhost_reloc_shift *shift = &job->relocshiftarray[i];
+
+ /* skip all other gathers */
+ if (cmdbuf_mem != reloc->cmdbuf_mem)
+ continue;
+
+ /* check if pin-mem is same as previous */
+ if (reloc->target != mem_id) {
+ target_ref = mem_op().get(job->memmgr, reloc->target);
+ if (IS_ERR(target_ref))
+ return PTR_ERR(target_ref);
+
+ target_phys = mem_op().pin(job->memmgr, target_ref);
+ if (IS_ERR((void *)target_phys)) {
+ mem_op().put(job->memmgr, target_ref);
+ return target_phys;
+ }
+
+ mem_id = reloc->target;
+ job->unpins[job->num_unpins++] = target_ref;
+ }
+
+ __raw_writel(
+ (target_phys + reloc->target_offset) >> shift->shift,
+ (cmdbuf_addr + reloc->cmdbuf_offset));
+
+ /* Different gathers might have same mem_id. This ensures we
+ * perform reloc only once per gather memid. */
+ reloc->cmdbuf_mem = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Check driver supplied waitchk structs for syncpt thresholds
+ * that have already been satisfied and NULL the comparison (to
+ * avoid a wrap condition in the HW).
+ */
+static int do_waitchks(struct nvhost_job *job, struct nvhost_syncpt *sp,
+ u32 patch_mem, void *patch_addr)
+{
+ int i;
+
+ /* compare syncpt vs wait threshold */
+ for (i = 0; i < job->num_waitchk; i++) {
+ struct nvhost_waitchk *wait = &job->waitchk[i];
+
+ /* skip all other gathers */
+ if (patch_mem != wait->mem)
+ continue;
+
+ trace_nvhost_syncpt_wait_check(wait->mem, wait->offset,
+ wait->syncpt_id, wait->thresh,
+ nvhost_syncpt_read(sp, wait->syncpt_id));
+ if (nvhost_syncpt_is_expired(sp,
+ wait->syncpt_id, wait->thresh)) {
+ /*
+ * NULL an already satisfied WAIT_SYNCPT host method,
+ * by patching its args in the command stream. The
+ * method data is changed to reference a reserved
+ * (never given out or incr) NVSYNCPT_GRAPHICS_HOST
+ * syncpt with a matching threshold value of 0, so
+ * is guaranteed to be popped by the host HW.
+ */
+ dev_dbg(&syncpt_to_dev(sp)->dev->dev,
+ "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
+ wait->syncpt_id,
+ syncpt_op().name(sp, wait->syncpt_id),
+ wait->thresh,
+ nvhost_syncpt_read_min(sp, wait->syncpt_id));
+
+ /* patch the wait */
+ nvhost_syncpt_patch_wait(sp,
+ (patch_addr + wait->offset));
+ }
+
+ wait->mem = 0;
+ }
+ return 0;
+}
+
+int nvhost_job_pin(struct nvhost_job *job, struct nvhost_syncpt *sp)
+{
+ int err = 0, i = 0;
+ phys_addr_t gather_phys = 0;
+ void *gather_addr = NULL;
+ unsigned long waitchk_mask = job->waitchk_mask;
+
+ /* get current syncpt values for waitchk */
+ for_each_set_bit(i, &waitchk_mask, sizeof(job->waitchk_mask))
+ nvhost_syncpt_update_min(sp, i);
+
+ /* pin gathers */
+ for (i = 0; i < job->num_gathers; i++) {
+ struct nvhost_job_gather *g = &job->gathers[i];
+
+ /* process each gather mem only once */
+ if (!g->ref) {
+ g->ref = mem_op().get(job->memmgr,
+ job->gathers[i].mem_id);
+ if (IS_ERR(g->ref)) {
+ err = PTR_ERR(g->ref);
+ g->ref = NULL;
+ break;
+ }
+
+ gather_phys = mem_op().pin(job->memmgr, g->ref);
+ if (IS_ERR((void *)gather_phys)) {
+ mem_op().put(job->memmgr, g->ref);
+ err = gather_phys;
+ break;
+ }
+
+ /* store the gather ref into unpin array */
+ job->unpins[job->num_unpins++] = g->ref;
+
+ gather_addr = mem_op().mmap(g->ref);
+ if (!gather_addr) {
+ err = -ENOMEM;
+ break;
+ }
+
+ err = do_relocs(job, g->mem_id, gather_addr);
+ if (!err)
+ err = do_waitchks(job, sp,
+ g->mem_id, gather_addr);
+ mem_op().munmap(g->ref, gather_addr);
+
+ if (err)
+ break;
+ }
+ g->mem = gather_phys + g->offset;
+ }
+ wmb();
+
+ return err;
+}
+
+void nvhost_job_unpin(struct nvhost_job *job)
+{
+ int i;
+
+ for (i = 0; i < job->num_unpins; i++) {
+ struct mem_handle *handle;
+ handle = nvhost_nvmap_validate_ref(job->memmgr, job->unpins[i]);
+ mem_op().unpin(job->memmgr, handle);
+ mem_op().put(job->memmgr, handle);
+ }
+
+ memset(job->unpins, BAD_MAGIC,
+ job->num_unpins * sizeof(struct mem_handle *));
+ job->num_unpins = 0;
+}
+
+/**
+ * Debug routine used to dump job entries
+ */
+void nvhost_job_dump(struct device *dev, struct nvhost_job *job)
+{
+ dev_dbg(dev, " SYNCPT_ID %d\n",
+ job->syncpt_id);
+ dev_dbg(dev, " SYNCPT_VAL %d\n",
+ job->syncpt_end);
+ dev_dbg(dev, " FIRST_GET 0x%x\n",
+ job->first_get);
+ dev_dbg(dev, " TIMEOUT %d\n",
+ job->timeout);
+ dev_dbg(dev, " CTX 0x%p\n",
+ job->hwctx);
+ dev_dbg(dev, " NUM_SLOTS %d\n",
+ job->num_slots);
+ dev_dbg(dev, " NUM_HANDLES %d\n",
+ job->num_unpins);
+}
diff --git a/drivers/video/tegra/host/nvhost_job.h b/drivers/video/tegra/host/nvhost_job.h
new file mode 100644
index 000000000000..3b444579c543
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_job.h
@@ -0,0 +1,148 @@
+/*
+ * drivers/video/tegra/host/nvhost_job.h
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_JOB_H
+#define __NVHOST_JOB_H
+
+#include <linux/nvhost_ioctl.h>
+
+struct nvhost_channel;
+struct nvhost_hwctx;
+struct nvhost_waitchk;
+struct nvhost_syncpt;
+
+struct nvhost_job_gather {
+ u32 words;
+ phys_addr_t mem;
+ u32 mem_id;
+ int offset;
+ struct mem_handle *ref;
+};
+
+/*
+ * Each submit is tracked as a nvhost_job.
+ */
+struct nvhost_job {
+ /* When refcount goes to zero, job can be freed */
+ struct kref ref;
+
+ /* List entry */
+ struct list_head list;
+
+ /* Channel where job is submitted to */
+ struct nvhost_channel *ch;
+
+ /* Hardware context valid for this client */
+ struct nvhost_hwctx *hwctx;
+ int clientid;
+
+ /* Nvmap to be used for pinning & unpinning memory */
+ struct mem_mgr *memmgr;
+
+ /* Gathers and their memory */
+ struct nvhost_job_gather *gathers;
+ int num_gathers;
+
+ /* Wait checks to be processed at submit time */
+ struct nvhost_waitchk *waitchk;
+ int num_waitchk;
+ u32 waitchk_mask;
+
+ /* Array of handles to be pinned & unpinned */
+ struct nvhost_reloc *relocarray;
+ struct nvhost_reloc_shift *relocshiftarray;
+ int num_relocs;
+ struct mem_handle **unpins;
+ int num_unpins;
+
+ /* Sync point id, number of increments and end related to the submit */
+ u32 syncpt_id;
+ u32 syncpt_incrs;
+ u32 syncpt_end;
+
+ /* Priority of this submit. */
+ int priority;
+
+ /* Maximum time to wait for this job */
+ int timeout;
+
+ /* Null kickoff prevents submit from being sent to hardware */
+ bool null_kickoff;
+
+ /* Index and number of slots used in the push buffer */
+ int first_get;
+ int num_slots;
+
+ /* Context to be freed */
+ struct nvhost_hwctx *hwctxref;
+};
+
+/*
+ * Allocate memory for a job. Just enough memory will be allocated to
+ * accomodate the submit announced in submit header.
+ */
+struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch,
+ struct nvhost_hwctx *hwctx,
+ struct nvhost_submit_hdr_ext *hdr,
+ struct mem_mgr *memmgr,
+ int priority, int clientid);
+
+/*
+ * Add a gather to a job.
+ */
+void nvhost_job_add_gather(struct nvhost_job *job,
+ u32 mem_id, u32 words, u32 offset);
+
+/*
+ * Increment reference going to nvhost_job.
+ */
+void nvhost_job_get(struct nvhost_job *job);
+
+/*
+ * Increment reference for a hardware context.
+ */
+void nvhost_job_get_hwctx(struct nvhost_job *job, struct nvhost_hwctx *hwctx);
+
+/*
+ * Decrement reference job, free if goes to zero.
+ */
+void nvhost_job_put(struct nvhost_job *job);
+
+/*
+ * Pin memory related to job. This handles relocation of addresses to the
+ * host1x address space. Handles both the gather memory and any other memory
+ * referred to from the gather buffers.
+ *
+ * Handles also patching out host waits that would wait for an expired sync
+ * point value.
+ */
+int nvhost_job_pin(struct nvhost_job *job, struct nvhost_syncpt *sp);
+
+/*
+ * Unpin memory related to job.
+ */
+void nvhost_job_unpin(struct nvhost_job *job);
+
+/*
+ * Dump contents of job to debug output.
+ */
+void nvhost_job_dump(struct device *dev, struct nvhost_job *job);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_memmgr.c b/drivers/video/tegra/host/nvhost_memmgr.c
new file mode 100644
index 000000000000..f530c2e63006
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_memmgr.c
@@ -0,0 +1,34 @@
+/*
+ * drivers/video/tegra/host/nvhost_memmgr.c
+ *
+ * Tegra Graphics Host Memory Management Abstraction
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "nvhost_memmgr.h"
+#include "nvmap.h"
+
+int nvhost_memmgr_init(struct nvhost_chip_support *chip)
+{
+#ifdef CONFIG_TEGRA_GRHOST_USE_NVMAP
+ return nvhost_init_nvmap_support(chip);
+#endif
+ BUG_ON(!"No memory manager selected");
+ return -ENODEV;
+}
diff --git a/drivers/video/tegra/host/nvhost_memmgr.h b/drivers/video/tegra/host/nvhost_memmgr.h
new file mode 100644
index 000000000000..d61379b6ff55
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_memmgr.h
@@ -0,0 +1,38 @@
+/*
+ * drivers/video/tegra/host/nvhost_memmgr.h
+ *
+ * Tegra Graphics Host Memory Management Abstraction header
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _NVHOST_MEM_MGR_H_
+#define _NVHOST_MEM_MGR_H_
+
+struct nvhost_chip_support;
+
+enum mem_mgr_flag {
+ mem_mgr_flag_uncacheable = 0,
+ mem_mgr_flag_write_combine = 1,
+};
+
+struct mem_mgr_handle {
+ struct mem_mgr *client;
+ struct mem_handle *handle;
+};
+
+int nvhost_memmgr_init(struct nvhost_chip_support *chip);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_syncpt.c b/drivers/video/tegra/host/nvhost_syncpt.c
new file mode 100644
index 000000000000..5837a3f76cf0
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_syncpt.c
@@ -0,0 +1,512 @@
+/*
+ * drivers/video/tegra/host/nvhost_syncpt.c
+ *
+ * Tegra Graphics Host Syncpoints
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/nvhost_ioctl.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <trace/events/nvhost.h>
+#include "nvhost_syncpt.h"
+#include "nvhost_acm.h"
+#include "dev.h"
+#include "chip_support.h"
+
+#define MAX_SYNCPT_LENGTH 5
+/* Name of sysfs node for min and max value */
+static const char *min_name = "min";
+static const char *max_name = "max";
+
+/**
+ * Resets syncpoint and waitbase values to sw shadows
+ */
+void nvhost_syncpt_reset(struct nvhost_syncpt *sp)
+{
+ u32 i;
+ BUG_ON(!(syncpt_op().reset && syncpt_op().reset_wait_base));
+
+ for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++)
+ syncpt_op().reset(sp, i);
+ for (i = 0; i < nvhost_syncpt_nb_bases(sp); i++)
+ syncpt_op().reset_wait_base(sp, i);
+ wmb();
+}
+
+/**
+ * Updates sw shadow state for client managed registers
+ */
+void nvhost_syncpt_save(struct nvhost_syncpt *sp)
+{
+ u32 i;
+ BUG_ON(!(syncpt_op().update_min && syncpt_op().read_wait_base));
+
+ for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) {
+ if (nvhost_syncpt_client_managed(sp, i))
+ syncpt_op().update_min(sp, i);
+ else
+ BUG_ON(!nvhost_syncpt_min_eq_max(sp, i));
+ }
+
+ for (i = 0; i < nvhost_syncpt_nb_bases(sp); i++)
+ syncpt_op().read_wait_base(sp, i);
+}
+
+/**
+ * Updates the last value read from hardware.
+ */
+u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
+{
+ u32 val;
+
+ BUG_ON(!syncpt_op().update_min);
+
+ val = syncpt_op().update_min(sp, id);
+ trace_nvhost_syncpt_update_min(id, val);
+
+ return val;
+}
+
+/**
+ * Get the current syncpoint value
+ */
+u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
+{
+ u32 val;
+ BUG_ON(!syncpt_op().update_min);
+ nvhost_module_busy(syncpt_to_dev(sp)->dev);
+ val = syncpt_op().update_min(sp, id);
+ nvhost_module_idle(syncpt_to_dev(sp)->dev);
+ return val;
+}
+
+/**
+ * Get the current syncpoint base
+ */
+u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+ u32 val;
+ BUG_ON(!syncpt_op().read_wait_base);
+ nvhost_module_busy(syncpt_to_dev(sp)->dev);
+ syncpt_op().read_wait_base(sp, id);
+ val = sp->base_val[id];
+ nvhost_module_idle(syncpt_to_dev(sp)->dev);
+ return val;
+}
+
+/**
+ * Write a cpu syncpoint increment to the hardware, without touching
+ * the cache. Caller is responsible for host being powered.
+ */
+void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
+{
+ BUG_ON(!syncpt_op().cpu_incr);
+ syncpt_op().cpu_incr(sp, id);
+}
+
+/**
+ * Increment syncpoint value from cpu, updating cache
+ */
+void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
+{
+ if (nvhost_syncpt_client_managed(sp, id))
+ nvhost_syncpt_incr_max(sp, id, 1);
+ nvhost_module_busy(syncpt_to_dev(sp)->dev);
+ nvhost_syncpt_cpu_incr(sp, id);
+ nvhost_module_idle(syncpt_to_dev(sp)->dev);
+}
+
+/**
+ * Updated sync point form hardware, and returns true if syncpoint is expired,
+ * false if we may need to wait
+ */
+static bool syncpt_update_min_is_expired(
+ struct nvhost_syncpt *sp,
+ u32 id,
+ u32 thresh)
+{
+ syncpt_op().update_min(sp, id);
+ return nvhost_syncpt_is_expired(sp, id, thresh);
+}
+
+/**
+ * Main entrypoint for syncpoint value waits.
+ */
+int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
+ u32 thresh, u32 timeout, u32 *value)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ void *ref;
+ void *waiter;
+ int err = 0, check_count = 0, low_timeout = 0;
+ u32 val;
+
+ if (value)
+ *value = 0;
+
+ /* first check cache */
+ if (nvhost_syncpt_is_expired(sp, id, thresh)) {
+ if (value)
+ *value = nvhost_syncpt_read_min(sp, id);
+ return 0;
+ }
+
+ /* keep host alive */
+ nvhost_module_busy(syncpt_to_dev(sp)->dev);
+
+ /* try to read from register */
+ val = syncpt_op().update_min(sp, id);
+ if (nvhost_syncpt_is_expired(sp, id, thresh)) {
+ if (value)
+ *value = val;
+ goto done;
+ }
+
+ if (!timeout) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ /* schedule a wakeup when the syncpoint value is reached */
+ waiter = nvhost_intr_alloc_waiter();
+ if (!waiter) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
+ NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq,
+ waiter,
+ &ref);
+ if (err)
+ goto done;
+
+ err = -EAGAIN;
+ /* Caller-specified timeout may be impractically low */
+ if (timeout < SYNCPT_CHECK_PERIOD)
+ low_timeout = timeout;
+
+ /* wait for the syncpoint, or timeout, or signal */
+ while (timeout) {
+ u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
+ int remain = wait_event_interruptible_timeout(wq,
+ syncpt_update_min_is_expired(sp, id, thresh),
+ check);
+ if (remain > 0 || nvhost_syncpt_is_expired(sp, id, thresh)) {
+ if (value)
+ *value = nvhost_syncpt_read_min(sp, id);
+ err = 0;
+ break;
+ }
+ if (remain < 0) {
+ err = remain;
+ break;
+ }
+ if (timeout != NVHOST_NO_TIMEOUT)
+ timeout -= check;
+ if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
+ dev_warn(&syncpt_to_dev(sp)->dev->dev,
+ "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n",
+ current->comm, id, syncpt_op().name(sp, id),
+ thresh, timeout);
+ syncpt_op().debug(sp);
+ if (check_count == MAX_STUCK_CHECK_COUNT) {
+ if (low_timeout) {
+ dev_warn(&syncpt_to_dev(sp)->dev->dev,
+ "is timeout %d too low?\n",
+ low_timeout);
+ }
+ nvhost_debug_dump(syncpt_to_dev(sp));
+ }
+ check_count++;
+ }
+ }
+ nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), id, ref);
+
+done:
+ nvhost_module_idle(syncpt_to_dev(sp)->dev);
+ return err;
+}
+
+/**
+ * Returns true if syncpoint is expired, false if we may need to wait
+ */
+bool nvhost_syncpt_is_expired(
+ struct nvhost_syncpt *sp,
+ u32 id,
+ u32 thresh)
+{
+ u32 current_val;
+ u32 future_val;
+ smp_rmb();
+ current_val = (u32)atomic_read(&sp->min_val[id]);
+ future_val = (u32)atomic_read(&sp->max_val[id]);
+
+ /* Note the use of unsigned arithmetic here (mod 1<<32).
+ *
+ * c = current_val = min_val = the current value of the syncpoint.
+ * t = thresh = the value we are checking
+ * f = future_val = max_val = the value c will reach when all
+ * outstanding increments have completed.
+ *
+ * Note that c always chases f until it reaches f.
+ *
+ * Dtf = (f - t)
+ * Dtc = (c - t)
+ *
+ * Consider all cases:
+ *
+ * A) .....c..t..f..... Dtf < Dtc need to wait
+ * B) .....c.....f..t.. Dtf > Dtc expired
+ * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large)
+ *
+ * Any case where f==c: always expired (for any t). Dtf == Dcf
+ * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0)
+ * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0,
+ * Dtc!=0)
+ *
+ * Other cases:
+ *
+ * A) .....t..f..c..... Dtf < Dtc need to wait
+ * A) .....f..c..t..... Dtf < Dtc need to wait
+ * A) .....f..t..c..... Dtf > Dtc expired
+ *
+ * So:
+ * Dtf >= Dtc implies EXPIRED (return true)
+ * Dtf < Dtc implies WAIT (return false)
+ *
+ * Note: If t is expired then we *cannot* wait on it. We would wait
+ * forever (hang the system).
+ *
+ * Note: do NOT get clever and remove the -thresh from both sides. It
+ * is NOT the same.
+ *
+ * If future valueis zero, we have a client managed sync point. In that
+ * case we do a direct comparison.
+ */
+ if (!nvhost_syncpt_client_managed(sp, id))
+ return future_val - thresh >= current_val - thresh;
+ else
+ return (s32)(current_val - thresh) >= 0;
+}
+
+void nvhost_syncpt_debug(struct nvhost_syncpt *sp)
+{
+ syncpt_op().debug(sp);
+}
+
+int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx)
+{
+ struct nvhost_master *host = syncpt_to_dev(sp);
+ u32 reg;
+
+ nvhost_module_busy(host->dev);
+ reg = syncpt_op().mutex_try_lock(sp, idx);
+ if (reg) {
+ nvhost_module_idle(host->dev);
+ return -EBUSY;
+ }
+ atomic_inc(&sp->lock_counts[idx]);
+ return 0;
+}
+
+void nvhost_mutex_unlock(struct nvhost_syncpt *sp, int idx)
+{
+ syncpt_op().mutex_unlock(sp, idx);
+ nvhost_module_idle(syncpt_to_dev(sp)->dev);
+ atomic_dec(&sp->lock_counts[idx]);
+}
+
+/* remove a wait pointed to by patch_addr */
+int nvhost_syncpt_patch_wait(struct nvhost_syncpt *sp, void *patch_addr)
+{
+ return syncpt_op().patch_wait(sp, patch_addr);
+}
+
+/* Displays the current value of the sync point via sysfs */
+static ssize_t syncpt_min_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct nvhost_syncpt_attr *syncpt_attr =
+ container_of(attr, struct nvhost_syncpt_attr, attr);
+
+ return snprintf(buf, PAGE_SIZE, "%u",
+ nvhost_syncpt_read(&syncpt_attr->host->syncpt,
+ syncpt_attr->id));
+}
+
+static ssize_t syncpt_max_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct nvhost_syncpt_attr *syncpt_attr =
+ container_of(attr, struct nvhost_syncpt_attr, attr);
+
+ return snprintf(buf, PAGE_SIZE, "%u",
+ nvhost_syncpt_read_max(&syncpt_attr->host->syncpt,
+ syncpt_attr->id));
+}
+
+int nvhost_syncpt_init(struct nvhost_device *dev,
+ struct nvhost_syncpt *sp)
+{
+ int i;
+ struct nvhost_master *host = syncpt_to_dev(sp);
+ int err = 0;
+
+ /* Allocate structs for min, max and base values */
+ sp->min_val = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_pts(sp),
+ GFP_KERNEL);
+ sp->max_val = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_pts(sp),
+ GFP_KERNEL);
+ sp->base_val = kzalloc(sizeof(u32) * nvhost_syncpt_nb_bases(sp),
+ GFP_KERNEL);
+ sp->lock_counts =
+ kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_mlocks(sp),
+ GFP_KERNEL);
+
+ if (!(sp->min_val && sp->max_val && sp->base_val && sp->lock_counts)) {
+ /* frees happen in the deinit */
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ sp->kobj = kobject_create_and_add("syncpt", &dev->dev.kobj);
+ if (!sp->kobj) {
+ err = -EIO;
+ goto fail;
+ }
+
+ /* Allocate two attributes for each sync point: min and max */
+ sp->syncpt_attrs = kzalloc(sizeof(*sp->syncpt_attrs)
+ * nvhost_syncpt_nb_pts(sp) * 2, GFP_KERNEL);
+ if (!sp->syncpt_attrs) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ /* Fill in the attributes */
+ for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) {
+ char name[MAX_SYNCPT_LENGTH];
+ struct kobject *kobj;
+ struct nvhost_syncpt_attr *min = &sp->syncpt_attrs[i*2];
+ struct nvhost_syncpt_attr *max = &sp->syncpt_attrs[i*2+1];
+
+ /* Create one directory per sync point */
+ snprintf(name, sizeof(name), "%d", i);
+ kobj = kobject_create_and_add(name, sp->kobj);
+ if (!kobj) {
+ err = -EIO;
+ goto fail;
+ }
+
+ min->id = i;
+ min->host = host;
+ min->attr.attr.name = min_name;
+ min->attr.attr.mode = S_IRUGO;
+ min->attr.show = syncpt_min_show;
+ if (sysfs_create_file(kobj, &min->attr.attr)) {
+ err = -EIO;
+ goto fail;
+ }
+
+ max->id = i;
+ max->host = host;
+ max->attr.attr.name = max_name;
+ max->attr.attr.mode = S_IRUGO;
+ max->attr.show = syncpt_max_show;
+ if (sysfs_create_file(kobj, &max->attr.attr)) {
+ err = -EIO;
+ goto fail;
+ }
+ }
+
+ return err;
+
+fail:
+ nvhost_syncpt_deinit(sp);
+ return err;
+}
+
+void nvhost_syncpt_deinit(struct nvhost_syncpt *sp)
+{
+ kobject_put(sp->kobj);
+
+ kfree(sp->min_val);
+ sp->min_val = NULL;
+
+ kfree(sp->max_val);
+ sp->max_val = NULL;
+
+ kfree(sp->base_val);
+ sp->base_val = NULL;
+
+ kfree(sp->lock_counts);
+ sp->lock_counts = 0;
+
+ kfree(sp->syncpt_attrs);
+ sp->syncpt_attrs = NULL;
+}
+
+int nvhost_syncpt_client_managed(struct nvhost_syncpt *sp, u32 id)
+{
+ return BIT(id) & syncpt_to_dev(sp)->info.client_managed;
+}
+
+int nvhost_syncpt_nb_pts(struct nvhost_syncpt *sp)
+{
+ return syncpt_to_dev(sp)->info.nb_pts;
+}
+
+int nvhost_syncpt_nb_bases(struct nvhost_syncpt *sp)
+{
+ return syncpt_to_dev(sp)->info.nb_bases;
+}
+
+int nvhost_syncpt_nb_mlocks(struct nvhost_syncpt *sp)
+{
+ return syncpt_to_dev(sp)->info.nb_mlocks;
+}
+
+/* public sync point API */
+u32 nvhost_syncpt_incr_max_ext(struct nvhost_device *dev, u32 id, u32 incrs)
+{
+ struct nvhost_syncpt *sp = &(nvhost_get_host(dev)->syncpt);
+ return nvhost_syncpt_incr_max(sp, id, incrs);
+}
+EXPORT_SYMBOL(nvhost_syncpt_incr_max_ext);
+
+void nvhost_syncpt_cpu_incr_ext(struct nvhost_device *dev, u32 id)
+{
+ struct nvhost_syncpt *sp = &(nvhost_get_host(dev)->syncpt);
+ nvhost_syncpt_cpu_incr(sp, id);
+}
+EXPORT_SYMBOL(nvhost_syncpt_cpu_incr_ext);
+
+u32 nvhost_syncpt_read_ext(struct nvhost_device *dev, u32 id)
+{
+ struct nvhost_syncpt *sp = &(nvhost_get_host(dev)->syncpt);
+ return nvhost_syncpt_read(sp, id);
+}
+EXPORT_SYMBOL(nvhost_syncpt_read_ext);
+
+int nvhost_syncpt_wait_timeout_ext(struct nvhost_device *dev, u32 id, u32 thresh,
+ u32 timeout, u32 *value)
+{
+ struct nvhost_syncpt *sp = &(nvhost_get_host(dev)->syncpt);
+ return nvhost_syncpt_wait_timeout(sp, id, thresh, timeout, value);
+}
+EXPORT_SYMBOL(nvhost_syncpt_wait_timeout_ext);
diff --git a/drivers/video/tegra/host/nvhost_syncpt.h b/drivers/video/tegra/host/nvhost_syncpt.h
new file mode 100644
index 000000000000..9ee4f3a8d49d
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_syncpt.h
@@ -0,0 +1,151 @@
+/*
+ * drivers/video/tegra/host/nvhost_syncpt.h
+ *
+ * Tegra Graphics Host Syncpoints
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_SYNCPT_H
+#define __NVHOST_SYNCPT_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/nvhost.h>
+#include <linux/atomic.h>
+
+/* host managed and invalid syncpt id */
+#define NVSYNCPT_GRAPHICS_HOST (0)
+
+/* Attribute struct for sysfs min and max attributes */
+struct nvhost_syncpt_attr {
+ struct kobj_attribute attr;
+ struct nvhost_master *host;
+ int id;
+};
+
+struct nvhost_syncpt {
+ struct kobject *kobj;
+ atomic_t *min_val;
+ atomic_t *max_val;
+ u32 *base_val;
+ atomic_t *lock_counts;
+ const char **syncpt_names;
+ struct nvhost_syncpt_attr *syncpt_attrs;
+};
+
+int nvhost_syncpt_init(struct nvhost_device *, struct nvhost_syncpt *);
+void nvhost_syncpt_deinit(struct nvhost_syncpt *);
+
+#define syncpt_to_dev(sp) container_of(sp, struct nvhost_master, syncpt)
+#define SYNCPT_CHECK_PERIOD (2 * HZ)
+#define MAX_STUCK_CHECK_COUNT 15
+
+/**
+ * Updates the value sent to hardware.
+ */
+static inline u32 nvhost_syncpt_incr_max(struct nvhost_syncpt *sp,
+ u32 id, u32 incrs)
+{
+ return (u32)atomic_add_return(incrs, &sp->max_val[id]);
+}
+
+/**
+ * Updated the value sent to hardware.
+ */
+static inline u32 nvhost_syncpt_set_max(struct nvhost_syncpt *sp,
+ u32 id, u32 val)
+{
+ atomic_set(&sp->max_val[id], val);
+ smp_wmb();
+ return val;
+}
+
+static inline u32 nvhost_syncpt_read_max(struct nvhost_syncpt *sp, u32 id)
+{
+ smp_rmb();
+ return (u32)atomic_read(&sp->max_val[id]);
+}
+
+static inline u32 nvhost_syncpt_read_min(struct nvhost_syncpt *sp, u32 id)
+{
+ smp_rmb();
+ return (u32)atomic_read(&sp->min_val[id]);
+}
+
+int nvhost_syncpt_client_managed(struct nvhost_syncpt *sp, u32 id);
+int nvhost_syncpt_nb_pts(struct nvhost_syncpt *sp);
+int nvhost_syncpt_nb_bases(struct nvhost_syncpt *sp);
+int nvhost_syncpt_nb_mlocks(struct nvhost_syncpt *sp);
+
+static inline bool nvhost_syncpt_check_max(struct nvhost_syncpt *sp,
+ u32 id, u32 real)
+{
+ u32 max;
+ if (nvhost_syncpt_client_managed(sp, id))
+ return true;
+ max = nvhost_syncpt_read_max(sp, id);
+ return (s32)(max - real) >= 0;
+}
+
+/**
+ * Returns true if syncpoint min == max
+ */
+static inline bool nvhost_syncpt_min_eq_max(struct nvhost_syncpt *sp, u32 id)
+{
+ int min, max;
+ smp_rmb();
+ min = atomic_read(&sp->min_val[id]);
+ max = atomic_read(&sp->max_val[id]);
+ return (min == max);
+}
+
+void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id);
+
+u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id);
+bool nvhost_syncpt_is_expired(struct nvhost_syncpt *sp, u32 id, u32 thresh);
+
+void nvhost_syncpt_save(struct nvhost_syncpt *sp);
+
+void nvhost_syncpt_reset(struct nvhost_syncpt *sp);
+
+u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id);
+u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id);
+
+void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id);
+
+int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh,
+ u32 timeout, u32 *value);
+
+static inline int nvhost_syncpt_wait(struct nvhost_syncpt *sp, u32 id, u32 thresh)
+{
+ return nvhost_syncpt_wait_timeout(sp, id, thresh,
+ MAX_SCHEDULE_TIMEOUT, NULL);
+}
+
+int nvhost_syncpt_patch_wait(struct nvhost_syncpt *sp, void *patch_addr);
+
+void nvhost_syncpt_debug(struct nvhost_syncpt *sp);
+
+static inline int nvhost_syncpt_is_valid(struct nvhost_syncpt *sp, u32 id)
+{
+ return id != NVSYNCPT_INVALID && id < nvhost_syncpt_nb_pts(sp);
+}
+
+int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx);
+
+void nvhost_mutex_unlock(struct nvhost_syncpt *sp, int idx);
+
+#endif
diff --git a/drivers/video/tegra/host/nvmap.c b/drivers/video/tegra/host/nvmap.c
new file mode 100644
index 000000000000..b8361c4a1a36
--- /dev/null
+++ b/drivers/video/tegra/host/nvmap.c
@@ -0,0 +1,109 @@
+/*
+ * drivers/video/tegra/host/nvmap.c
+ *
+ * Tegra Graphics Host Nvmap support
+ *
+ * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "chip_support.h"
+#include <linux/nvmap.h>
+
+struct mem_mgr *nvhost_nvmap_alloc_mgr(void)
+{
+ return (struct mem_mgr *)nvmap_create_client(nvmap_dev, "nvhost");
+}
+
+void nvhost_nvmap_put_mgr(struct mem_mgr *mgr)
+{
+ nvmap_client_put((struct nvmap_client *)mgr);
+}
+
+struct mem_mgr *nvhost_nvmap_get_mgr(struct mem_mgr *mgr)
+{
+ return (struct mem_mgr *)nvmap_client_get((struct nvmap_client *)mgr);
+}
+
+struct mem_mgr *nvhost_nvmap_get_mgr_file(int fd)
+{
+ return (struct mem_mgr *)nvmap_client_get_file(fd);
+}
+
+struct mem_handle *nvhost_nvmap_alloc(struct mem_mgr *mgr,
+ size_t size, size_t align, int flags)
+{
+ return (struct mem_handle *)nvmap_alloc((struct nvmap_client *)mgr,
+ size, align, flags, 0);
+}
+
+void nvhost_nvmap_put(struct mem_mgr *mgr, struct mem_handle *handle)
+{
+ return nvmap_free((struct nvmap_client *)mgr,
+ (struct nvmap_handle_ref *)handle);
+}
+
+phys_addr_t nvhost_nvmap_pin(struct mem_mgr *mgr, struct mem_handle *handle)
+{
+ return nvmap_pin((struct nvmap_client *)mgr,
+ (struct nvmap_handle_ref *)handle);
+}
+
+void nvhost_nvmap_unpin(struct mem_mgr *mgr, struct mem_handle *handle)
+{
+ return nvmap_unpin((struct nvmap_client *)mgr,
+ (struct nvmap_handle_ref *)handle);
+}
+
+void *nvhost_nvmap_mmap(struct mem_handle *handle)
+{
+ return nvmap_mmap((struct nvmap_handle_ref *)handle);
+}
+
+void nvhost_nvmap_munmap(struct mem_handle *handle, void *addr)
+{
+ nvmap_munmap((struct nvmap_handle_ref *)handle, addr);
+}
+
+struct mem_handle *nvhost_nvmap_get(struct mem_mgr *mgr, u32 id)
+{
+ return (struct mem_handle *)
+ nvmap_duplicate_handle_id((struct nvmap_client *)mgr, id);
+}
+
+struct mem_handle *nvhost_nvmap_validate_ref(struct mem_mgr *mgr,
+ struct mem_handle *handle)
+{
+ unsigned long ref;
+ ref = nvmap_validate_ref((struct nvmap_client *)mgr,
+ (struct nvmap_handle_ref *)handle);
+ return (struct mem_handle *)ref;
+}
+
+int nvhost_init_nvmap_support(struct nvhost_chip_support *chip)
+{
+ chip->mem.alloc_mgr = nvhost_nvmap_alloc_mgr;
+ chip->mem.put_mgr = nvhost_nvmap_put_mgr;
+ chip->mem.get_mgr = nvhost_nvmap_get_mgr;
+ chip->mem.get_mgr_file = nvhost_nvmap_get_mgr_file;
+ chip->mem.alloc = nvhost_nvmap_alloc;
+ chip->mem.put = nvhost_nvmap_put;
+ chip->mem.get = nvhost_nvmap_get;
+ chip->mem.pin = nvhost_nvmap_pin;
+ chip->mem.unpin = nvhost_nvmap_unpin;
+ chip->mem.mmap = nvhost_nvmap_mmap;
+ chip->mem.munmap = nvhost_nvmap_munmap;
+
+ return 0;
+}
diff --git a/drivers/video/tegra/host/nvmap.h b/drivers/video/tegra/host/nvmap.h
new file mode 100644
index 000000000000..25e3535ce544
--- /dev/null
+++ b/drivers/video/tegra/host/nvmap.h
@@ -0,0 +1,28 @@
+/*
+ * drivers/video/tegra/host/nvmap.h
+ *
+ * Tegra Graphics Host nvmap memory manager
+ *
+ * Copyright (c) 2010-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_NVMAP_H
+#define __NVHOST_NVMAP_H
+
+struct nvhost_chip_support;
+int nvhost_init_nvmap_support(struct nvhost_chip_support *op);
+struct mem_handle *nvhost_nvmap_validate_ref(struct mem_mgr *mgr,
+ struct mem_handle *handle);
+#endif
diff --git a/drivers/video/tegra/host/t20/Makefile b/drivers/video/tegra/host/t20/Makefile
new file mode 100644
index 000000000000..c2ade9bf925b
--- /dev/null
+++ b/drivers/video/tegra/host/t20/Makefile
@@ -0,0 +1,8 @@
+GCOV_PROFILE := y
+
+EXTRA_CFLAGS += -Idrivers/video/tegra/host
+
+nvhost-t20-objs = \
+ t20.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-t20.o
diff --git a/drivers/video/tegra/host/t20/t20.c b/drivers/video/tegra/host/t20/t20.c
new file mode 100644
index 000000000000..e6840ae7ba1c
--- /dev/null
+++ b/drivers/video/tegra/host/t20/t20.c
@@ -0,0 +1,294 @@
+/*
+ * drivers/video/tegra/host/t20/t20.c
+ *
+ * Tegra Graphics Init for T20 Architecture Chips
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/nvhost_ioctl.h>
+#include <mach/powergate.h>
+#include <mach/iomap.h>
+#include "t20.h"
+#include "gr3d/gr3d_t20.h"
+#include "mpe/mpe.h"
+#include "host1x/host1x.h"
+#include "nvhost_channel.h"
+#include "nvhost_memmgr.h"
+#include "host1x/host1x01_hardware.h"
+#include "host1x/host1x_syncpt.h"
+#include "chip_support.h"
+
+#define NVMODMUTEX_2D_FULL (1)
+#define NVMODMUTEX_2D_SIMPLE (2)
+#define NVMODMUTEX_2D_SB_A (3)
+#define NVMODMUTEX_2D_SB_B (4)
+#define NVMODMUTEX_3D (5)
+#define NVMODMUTEX_DISPLAYA (6)
+#define NVMODMUTEX_DISPLAYB (7)
+#define NVMODMUTEX_VI (8)
+#define NVMODMUTEX_DSI (9)
+
+static int t20_num_alloc_channels = 0;
+
+static struct resource tegra_host1x01_resources[] = {
+ {
+ .start = TEGRA_HOST1X_BASE,
+ .end = TEGRA_HOST1X_BASE + TEGRA_HOST1X_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_SYNCPT_THRESH_BASE,
+ .end = INT_SYNCPT_THRESH_BASE + INT_SYNCPT_THRESH_NR - 1,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = INT_HOST1X_MPCORE_GENERAL,
+ .end = INT_HOST1X_MPCORE_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static const char *s_syncpt_names[32] = {
+ "gfx_host",
+ "", "", "", "", "", "", "",
+ "disp0_a", "disp1_a", "avp_0",
+ "csi_vi_0", "csi_vi_1",
+ "vi_isp_0", "vi_isp_1", "vi_isp_2", "vi_isp_3", "vi_isp_4",
+ "2d_0", "2d_1",
+ "disp0_b", "disp1_b",
+ "3d",
+ "mpe",
+ "disp0_c", "disp1_c",
+ "vblank0", "vblank1",
+ "mpe_ebm_eof", "mpe_wr_safe",
+ "2d_tinyblt",
+ "dsi"
+};
+
+static struct host1x_device_info host1x01_info = {
+ .nb_channels = 8,
+ .nb_pts = 32,
+ .nb_mlocks = 16,
+ .nb_bases = 8,
+ .syncpt_names = s_syncpt_names,
+ .client_managed = NVSYNCPTS_CLIENT_MANAGED,
+};
+
+static struct nvhost_device tegra_host1x01_device = {
+ .dev = {.platform_data = &host1x01_info},
+ .name = "host1x",
+ .id = -1,
+ .resource = tegra_host1x01_resources,
+ .num_resources = ARRAY_SIZE(tegra_host1x01_resources),
+ .clocks = {{"host1x", UINT_MAX}, {} },
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+};
+
+static struct nvhost_device tegra_display01_device = {
+ .name = "display",
+ .id = -1,
+ .index = 0,
+ .syncpts = BIT(NVSYNCPT_DISP0_A) | BIT(NVSYNCPT_DISP1_A) |
+ BIT(NVSYNCPT_DISP0_B) | BIT(NVSYNCPT_DISP1_B) |
+ BIT(NVSYNCPT_DISP0_C) | BIT(NVSYNCPT_DISP1_C) |
+ BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1),
+ .modulemutexes = BIT(NVMODMUTEX_DISPLAYA) | BIT(NVMODMUTEX_DISPLAYB),
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ .moduleid = NVHOST_MODULE_NONE,
+};
+
+static struct nvhost_device tegra_gr3d01_device = {
+ .name = "gr3d",
+ .version = 1,
+ .id = -1,
+ .index = 1,
+ .syncpts = BIT(NVSYNCPT_3D),
+ .waitbases = BIT(NVWAITBASE_3D),
+ .modulemutexes = BIT(NVMODMUTEX_3D),
+ .class = NV_GRAPHICS_3D_CLASS_ID,
+ .clocks = {{"gr3d", UINT_MAX}, {"emc", UINT_MAX}, {} },
+ .powergate_ids = {TEGRA_POWERGATE_3D, -1},
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ .moduleid = NVHOST_MODULE_NONE,
+};
+
+static struct nvhost_device tegra_gr2d01_device = {
+ .name = "gr2d",
+ .id = -1,
+ .index = 2,
+ .syncpts = BIT(NVSYNCPT_2D_0) | BIT(NVSYNCPT_2D_1),
+ .waitbases = BIT(NVWAITBASE_2D_0) | BIT(NVWAITBASE_2D_1),
+ .modulemutexes = BIT(NVMODMUTEX_2D_FULL) | BIT(NVMODMUTEX_2D_SIMPLE) |
+ BIT(NVMODMUTEX_2D_SB_A) | BIT(NVMODMUTEX_2D_SB_B),
+ .clocks = { {"gr2d", UINT_MAX},
+ {"epp", UINT_MAX},
+ {"emc", UINT_MAX} },
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ .clockgate_delay = 0,
+ .moduleid = NVHOST_MODULE_NONE,
+ .serialize = true,
+};
+
+static struct resource isp_resources_t20[] = {
+ {
+ .name = "regs",
+ .start = TEGRA_ISP_BASE,
+ .end = TEGRA_ISP_BASE + TEGRA_ISP_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct nvhost_device tegra_isp01_device = {
+ .name = "isp",
+ .id = -1,
+ .resource = isp_resources_t20,
+ .num_resources = ARRAY_SIZE(isp_resources_t20),
+ .index = 3,
+ .syncpts = 0,
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ .moduleid = NVHOST_MODULE_ISP,
+};
+
+static struct resource vi_resources[] = {
+ {
+ .name = "regs",
+ .start = TEGRA_VI_BASE,
+ .end = TEGRA_VI_BASE + TEGRA_VI_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct nvhost_device tegra_vi01_device = {
+ .name = "vi",
+ .resource = vi_resources,
+ .num_resources = ARRAY_SIZE(vi_resources),
+ .id = -1,
+ .index = 4,
+ .syncpts = BIT(NVSYNCPT_CSI_VI_0) | BIT(NVSYNCPT_CSI_VI_1) |
+ BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_1) |
+ BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) |
+ BIT(NVSYNCPT_VI_ISP_4),
+ .modulemutexes = BIT(NVMODMUTEX_VI),
+ .exclusive = true,
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ .moduleid = NVHOST_MODULE_VI,
+};
+
+static struct resource tegra_mpe01_resources[] = {
+ {
+ .name = "regs",
+ .start = TEGRA_MPE_BASE,
+ .end = TEGRA_MPE_BASE + TEGRA_MPE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct nvhost_device tegra_mpe01_device = {
+ .name = "mpe",
+ .version = 1,
+ .id = -1,
+ .resource = tegra_mpe01_resources,
+ .num_resources = ARRAY_SIZE(tegra_mpe01_resources),
+ .index = 5,
+ .syncpts = BIT(NVSYNCPT_MPE) | BIT(NVSYNCPT_MPE_EBM_EOF) |
+ BIT(NVSYNCPT_MPE_WR_SAFE),
+ .waitbases = BIT(NVWAITBASE_MPE),
+ .class = NV_VIDEO_ENCODE_MPEG_CLASS_ID,
+ .waitbasesync = true,
+ .keepalive = true,
+ .clocks = { {"mpe", UINT_MAX},
+ {"emc", UINT_MAX} },
+ .powergate_ids = {TEGRA_POWERGATE_MPE, -1},
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ .moduleid = NVHOST_MODULE_MPE,
+};
+
+static struct nvhost_device tegra_dsi01_device = {
+ .name = "dsi",
+ .id = -1,
+ .index = 6,
+ .syncpts = BIT(NVSYNCPT_DSI),
+ .modulemutexes = BIT(NVMODMUTEX_DSI),
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ .moduleid = NVHOST_MODULE_NONE,
+};
+
+static struct nvhost_device *t20_devices[] = {
+ &tegra_host1x01_device,
+ &tegra_display01_device,
+ &tegra_gr3d01_device,
+ &tegra_gr2d01_device,
+ &tegra_isp01_device,
+ &tegra_vi01_device,
+ &tegra_mpe01_device,
+ &tegra_dsi01_device,
+};
+
+int tegra2_register_host1x_devices(void)
+{
+ return nvhost_add_devices(t20_devices, ARRAY_SIZE(t20_devices));
+}
+
+static void t20_free_nvhost_channel(struct nvhost_channel *ch)
+{
+ nvhost_free_channel_internal(ch, &t20_num_alloc_channels);
+}
+
+static struct nvhost_channel *t20_alloc_nvhost_channel(
+ struct nvhost_device *dev)
+{
+ return nvhost_alloc_channel_internal(dev->index,
+ nvhost_get_host(dev)->info.nb_channels,
+ &t20_num_alloc_channels);
+}
+
+#include "host1x/host1x_channel.c"
+#include "host1x/host1x_cdma.c"
+#include "host1x/host1x_debug.c"
+#include "host1x/host1x_syncpt.c"
+#include "host1x/host1x_intr.c"
+
+int nvhost_init_t20_support(struct nvhost_master *host,
+ struct nvhost_chip_support *op)
+{
+ int err;
+
+ op->channel = host1x_channel_ops;
+ op->cdma = host1x_cdma_ops;
+ op->push_buffer = host1x_pushbuffer_ops;
+ op->debug = host1x_debug_ops;
+ host->sync_aperture = host->aperture + HOST1X_CHANNEL_SYNC_REG_BASE;
+ op->syncpt = host1x_syncpt_ops;
+ op->intr = host1x_intr_ops;
+ err = nvhost_memmgr_init(op);
+ if (err)
+ return err;
+
+ op->nvhost_dev.alloc_nvhost_channel = t20_alloc_nvhost_channel;
+ op->nvhost_dev.free_nvhost_channel = t20_free_nvhost_channel;
+
+ return 0;
+}
+
+/* Hacky way to get access to struct nvhost_device tegra_vi01_device. */
+struct nvhost_device *t20_get_tegra_vi01_device(void)
+{
+ return &tegra_vi01_device;
+}
diff --git a/drivers/video/tegra/host/t20/t20.h b/drivers/video/tegra/host/t20/t20.h
new file mode 100644
index 000000000000..729f9d8e85e4
--- /dev/null
+++ b/drivers/video/tegra/host/t20/t20.h
@@ -0,0 +1,29 @@
+/*
+ * drivers/video/tegra/host/t20/t20.h
+ *
+ * Tegra Graphics Chip support for T20
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _NVHOST_T20_H_
+#define _NVHOST_T20_H_
+
+struct nvhost_master;
+struct nvhost_chip_support;
+
+int nvhost_init_t20_support(struct nvhost_master *,
+ struct nvhost_chip_support *);
+
+#endif /* _NVHOST_T20_H_ */
diff --git a/drivers/video/tegra/host/t30/Makefile b/drivers/video/tegra/host/t30/Makefile
new file mode 100644
index 000000000000..b343eb4fc7cc
--- /dev/null
+++ b/drivers/video/tegra/host/t30/Makefile
@@ -0,0 +1,8 @@
+GCOV_PROFILE := y
+
+EXTRA_CFLAGS += -Idrivers/video/tegra/host
+
+nvhost-t30-objs = \
+ t30.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-t30.o
diff --git a/drivers/video/tegra/host/t30/t30.c b/drivers/video/tegra/host/t30/t30.c
new file mode 100644
index 000000000000..6c3a7f925177
--- /dev/null
+++ b/drivers/video/tegra/host/t30/t30.c
@@ -0,0 +1,309 @@
+/*
+ * drivers/video/tegra/host/t30/t30.c
+ *
+ * Tegra Graphics Init for T30 Architecture Chips
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/nvhost_ioctl.h>
+#include <mach/powergate.h>
+#include <mach/iomap.h>
+#include "t20/t20.h"
+#include "t30.h"
+#include "gr3d/gr3d_t30.h"
+#include "gr3d/scale3d.h"
+#include "mpe/mpe.h"
+#include "host1x/host1x.h"
+#include "host1x/host1x01_hardware.h"
+#include "chip_support.h"
+#include "nvhost_channel.h"
+#include "nvhost_memmgr.h"
+#include "host1x/host1x_syncpt.h"
+
+#define NVMODMUTEX_2D_FULL (1)
+#define NVMODMUTEX_2D_SIMPLE (2)
+#define NVMODMUTEX_2D_SB_A (3)
+#define NVMODMUTEX_2D_SB_B (4)
+#define NVMODMUTEX_3D (5)
+#define NVMODMUTEX_DISPLAYA (6)
+#define NVMODMUTEX_DISPLAYB (7)
+#define NVMODMUTEX_VI (8)
+#define NVMODMUTEX_DSI (9)
+
+static int t30_num_alloc_channels = 0;
+
+static struct resource tegra_host1x01_resources[] = {
+ {
+ .start = TEGRA_HOST1X_BASE,
+ .end = TEGRA_HOST1X_BASE + TEGRA_HOST1X_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_SYNCPT_THRESH_BASE,
+ .end = INT_SYNCPT_THRESH_BASE + INT_SYNCPT_THRESH_NR - 1,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = INT_HOST1X_MPCORE_GENERAL,
+ .end = INT_HOST1X_MPCORE_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static const char *s_syncpt_names[32] = {
+ "gfx_host",
+ "", "", "", "", "", "", "",
+ "disp0_a", "disp1_a", "avp_0",
+ "csi_vi_0", "csi_vi_1",
+ "vi_isp_0", "vi_isp_1", "vi_isp_2", "vi_isp_3", "vi_isp_4",
+ "2d_0", "2d_1",
+ "disp0_b", "disp1_b",
+ "3d",
+ "mpe",
+ "disp0_c", "disp1_c",
+ "vblank0", "vblank1",
+ "mpe_ebm_eof", "mpe_wr_safe",
+ "2d_tinyblt",
+ "dsi"
+};
+
+static struct host1x_device_info host1x01_info = {
+ .nb_channels = 8,
+ .nb_pts = 32,
+ .nb_mlocks = 16,
+ .nb_bases = 8,
+ .syncpt_names = s_syncpt_names,
+ .client_managed = NVSYNCPTS_CLIENT_MANAGED,
+};
+
+static struct nvhost_device tegra_host1x01_device = {
+ .dev = {.platform_data = &host1x01_info},
+ .name = "host1x",
+ .id = -1,
+ .resource = tegra_host1x01_resources,
+ .num_resources = ARRAY_SIZE(tegra_host1x01_resources),
+ .clocks = {{"host1x", UINT_MAX}, {} },
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+};
+
+static struct nvhost_device tegra_display01_device = {
+ .name = "display",
+ .id = -1,
+ .index = 0,
+ .syncpts = BIT(NVSYNCPT_DISP0_A) | BIT(NVSYNCPT_DISP1_A) |
+ BIT(NVSYNCPT_DISP0_B) | BIT(NVSYNCPT_DISP1_B) |
+ BIT(NVSYNCPT_DISP0_C) | BIT(NVSYNCPT_DISP1_C) |
+ BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1),
+ .modulemutexes = BIT(NVMODMUTEX_DISPLAYA) | BIT(NVMODMUTEX_DISPLAYB),
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ .moduleid = NVHOST_MODULE_NONE,
+};
+
+static struct nvhost_device tegra_gr3d02_device = {
+ .name = "gr3d",
+ .version = 2,
+ .id = -1,
+ .index = 1,
+ .syncpts = BIT(NVSYNCPT_3D),
+ .waitbases = BIT(NVWAITBASE_3D),
+ .modulemutexes = BIT(NVMODMUTEX_3D),
+ .class = NV_GRAPHICS_3D_CLASS_ID,
+ .clocks = { {"gr3d", UINT_MAX},
+ {"gr3d2", UINT_MAX},
+ {"emc", UINT_MAX} },
+ .powergate_ids = { TEGRA_POWERGATE_3D,
+ TEGRA_POWERGATE_3D1 },
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ .can_powergate = true,
+ .powerup_reset = true,
+ .powergate_delay = 250,
+ .moduleid = NVHOST_MODULE_NONE,
+};
+
+static struct nvhost_device tegra_gr2d02_device = {
+ .name = "gr2d",
+ .id = -1,
+ .index = 2,
+ .syncpts = BIT(NVSYNCPT_2D_0) | BIT(NVSYNCPT_2D_1),
+ .waitbases = BIT(NVWAITBASE_2D_0) | BIT(NVWAITBASE_2D_1),
+ .modulemutexes = BIT(NVMODMUTEX_2D_FULL) | BIT(NVMODMUTEX_2D_SIMPLE) |
+ BIT(NVMODMUTEX_2D_SB_A) | BIT(NVMODMUTEX_2D_SB_B),
+ .clocks = { {"gr2d", 0},
+ {"epp", 0},
+ {"emc", 300000000} },
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ .clockgate_delay = 0,
+ .moduleid = NVHOST_MODULE_NONE,
+ .serialize = true,
+};
+
+static struct resource isp_resources_t20[] = {
+ {
+ .name = "regs",
+ .start = TEGRA_ISP_BASE,
+ .end = TEGRA_ISP_BASE + TEGRA_ISP_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct nvhost_device tegra_isp01_device = {
+ .name = "isp",
+ .id = -1,
+ .resource = isp_resources_t20,
+ .num_resources = ARRAY_SIZE(isp_resources_t20),
+ .index = 3,
+ .syncpts = BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) |
+ BIT(NVSYNCPT_VI_ISP_4),
+ .clocks = { {"epp", 0}
+ },
+ .keepalive = true,
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ .moduleid = NVHOST_MODULE_ISP,
+};
+
+static struct resource vi_resources[] = {
+ {
+ .name = "regs",
+ .start = TEGRA_VI_BASE,
+ .end = TEGRA_VI_BASE + TEGRA_VI_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct nvhost_device tegra_vi01_device = {
+ .name = "vi",
+ .resource = vi_resources,
+ .num_resources = ARRAY_SIZE(vi_resources),
+ .id = -1,
+ .index = 4,
+ .syncpts = BIT(NVSYNCPT_CSI_VI_0) | BIT(NVSYNCPT_CSI_VI_1) |
+ BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_1) |
+ BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) |
+ BIT(NVSYNCPT_VI_ISP_4),
+ .modulemutexes = BIT(NVMODMUTEX_VI),
+ .exclusive = true,
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ .moduleid = NVHOST_MODULE_VI,
+};
+
+static struct resource tegra_mpe01_resources[] = {
+ {
+ .name = "regs",
+ .start = TEGRA_MPE_BASE,
+ .end = TEGRA_MPE_BASE + TEGRA_MPE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct nvhost_device tegra_mpe02_device = {
+ .name = "mpe",
+ .version = 2,
+ .id = -1,
+ .resource = tegra_mpe01_resources,
+ .num_resources = ARRAY_SIZE(tegra_mpe01_resources),
+ .index = 5,
+ .syncpts = BIT(NVSYNCPT_MPE) | BIT(NVSYNCPT_MPE_EBM_EOF) |
+ BIT(NVSYNCPT_MPE_WR_SAFE),
+ .waitbases = BIT(NVWAITBASE_MPE),
+ .class = NV_VIDEO_ENCODE_MPEG_CLASS_ID,
+ .waitbasesync = true,
+ .keepalive = true,
+ .clocks = { {"mpe", UINT_MAX},
+ {"emc", UINT_MAX} },
+ .powergate_ids = {TEGRA_POWERGATE_MPE, -1},
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ .can_powergate = true,
+ .powergate_delay = 100,
+ .moduleid = NVHOST_MODULE_MPE,
+};
+
+static struct nvhost_device tegra_dsi01_device = {
+ .name = "dsi",
+ .id = -1,
+ .index = 6,
+ .syncpts = BIT(NVSYNCPT_DSI),
+ .modulemutexes = BIT(NVMODMUTEX_DSI),
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ .moduleid = NVHOST_MODULE_NONE,
+};
+
+static struct nvhost_device *t30_devices[] = {
+ &tegra_host1x01_device,
+ &tegra_display01_device,
+ &tegra_gr3d02_device,
+ &tegra_gr2d02_device,
+ &tegra_isp01_device,
+ &tegra_vi01_device,
+ &tegra_mpe02_device,
+ &tegra_dsi01_device,
+};
+
+int tegra3_register_host1x_devices(void)
+{
+ return nvhost_add_devices(t30_devices, ARRAY_SIZE(t30_devices));
+}
+
+static void t30_free_nvhost_channel(struct nvhost_channel *ch)
+{
+ nvhost_free_channel_internal(ch, &t30_num_alloc_channels);
+}
+
+static struct nvhost_channel *t30_alloc_nvhost_channel(
+ struct nvhost_device *dev)
+{
+ return nvhost_alloc_channel_internal(dev->index,
+ nvhost_get_host(dev)->info.nb_channels,
+ &t30_num_alloc_channels);
+}
+
+#include "host1x/host1x_channel.c"
+#include "host1x/host1x_cdma.c"
+#include "host1x/host1x_debug.c"
+#include "host1x/host1x_syncpt.c"
+#include "host1x/host1x_intr.c"
+
+int nvhost_init_t30_support(struct nvhost_master *host,
+ struct nvhost_chip_support *op)
+{
+ int err;
+
+ op->channel = host1x_channel_ops;
+ op->cdma = host1x_cdma_ops;
+ op->push_buffer = host1x_pushbuffer_ops;
+ op->debug = host1x_debug_ops;
+ op->debug.debug_init = nvhost_scale3d_debug_init;
+ host->sync_aperture = host->aperture + HOST1X_CHANNEL_SYNC_REG_BASE;
+ op->syncpt = host1x_syncpt_ops;
+ op->intr = host1x_intr_ops;
+ err = nvhost_memmgr_init(op);
+ if (err)
+ return err;
+
+ op->nvhost_dev.alloc_nvhost_channel = t30_alloc_nvhost_channel;
+ op->nvhost_dev.free_nvhost_channel = t30_free_nvhost_channel;
+
+ return 0;
+}
+
+/* Hacky way to get access to struct nvhost_device tegra_vi01_device. */
+struct nvhost_device *t30_get_tegra_vi01_device(void)
+{
+ return &tegra_vi01_device;
+}
diff --git a/drivers/video/tegra/host/t30/t30.h b/drivers/video/tegra/host/t30/t30.h
new file mode 100644
index 000000000000..80838a5e287c
--- /dev/null
+++ b/drivers/video/tegra/host/t30/t30.h
@@ -0,0 +1,29 @@
+/*
+ * drivers/video/tegra/host/t30/t30.h
+ *
+ * Tegra Graphics Chip support for Tegra3
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _NVHOST_T30_H_
+#define _NVHOST_T30_H_
+
+struct nvhost_master;
+struct nvhost_chip_support;
+
+int nvhost_init_t30_support(struct nvhost_master *host,
+ struct nvhost_chip_support *);
+
+#endif /* _NVHOST_T30_H_ */
diff --git a/drivers/video/tegra/host/vi/Makefile b/drivers/video/tegra/host/vi/Makefile
new file mode 100644
index 000000000000..8c130e49814d
--- /dev/null
+++ b/drivers/video/tegra/host/vi/Makefile
@@ -0,0 +1,7 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/video/tegra/host
+
+nvhost-vi-objs = \
+ vi.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-vi.o
diff --git a/drivers/video/tegra/host/vi/vi.c b/drivers/video/tegra/host/vi/vi.c
new file mode 100644
index 000000000000..ee801c91efa5
--- /dev/null
+++ b/drivers/video/tegra/host/vi/vi.c
@@ -0,0 +1,79 @@
+/*
+ * drivers/video/tegra/host/vi/vi.c
+ *
+ * Tegra Graphics Host VI
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "dev.h"
+#include "bus_client.h"
+
+static int __devinit vi_probe(struct nvhost_device *dev,
+ struct nvhost_device_id *id_table)
+{
+ int err = 0;
+
+ err = nvhost_client_device_get_resources(dev);
+ if (err)
+ return err;
+
+ return nvhost_client_device_init(dev);
+}
+
+static int __exit vi_remove(struct nvhost_device *dev)
+{
+ /* Add clean-up */
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int vi_suspend(struct nvhost_device *dev, pm_message_t state)
+{
+ return nvhost_client_device_suspend(dev);
+}
+
+static int vi_resume(struct nvhost_device *dev)
+{
+ dev_info(&dev->dev, "resuming\n");
+ return 0;
+}
+#endif
+
+static struct nvhost_driver vi_driver = {
+ .probe = vi_probe,
+ .remove = __exit_p(vi_remove),
+#ifdef CONFIG_PM
+ .suspend = vi_suspend,
+ .resume = vi_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "vi",
+ }
+};
+
+static int __init vi_init(void)
+{
+ return nvhost_driver_register(&vi_driver);
+}
+
+static void __exit vi_exit(void)
+{
+ nvhost_driver_unregister(&vi_driver);
+}
+
+module_init(vi_init);
+module_exit(vi_exit);