summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/configs/tegra_harmony_android_defconfig2
-rw-r--r--arch/arm/configs/tegra_harmony_gnu_linux_defconfig10
-rwxr-xr-xarch/arm/configs/tegra_ventana_android_defconfig2
-rw-r--r--arch/arm/configs/tegra_whistler_android_defconfig2
-rw-r--r--arch/arm/configs/tegra_whistler_gnu_linux_defconfig2
-rw-r--r--arch/arm/mach-tegra/board-common.c62
-rw-r--r--arch/arm/mach-tegra/clock_nvrm.c64
-rw-r--r--arch/arm/mach-tegra/common.c2
-rw-r--r--arch/arm/mach-tegra/include/mach/iomap.h12
-rw-r--r--arch/arm/mach-tegra/include/mach/irqs.h7
-rw-r--r--arch/arm/mach-tegra/irq.c75
-rw-r--r--drivers/char/Kconfig29
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/video/Kconfig15
-rw-r--r--drivers/video/Makefile2
-rw-r--r--drivers/video/tegra/Kconfig54
-rw-r--r--drivers/video/tegra/Makefile3
-rw-r--r--drivers/video/tegra/host/Makefile11
-rw-r--r--drivers/video/tegra/host/nvhost_3dctx.c490
-rw-r--r--drivers/video/tegra/host/nvhost_acm.c148
-rw-r--r--drivers/video/tegra/host/nvhost_acm.h75
-rw-r--r--drivers/video/tegra/host/nvhost_cdma.c597
-rw-r--r--drivers/video/tegra/host/nvhost_cdma.h99
-rw-r--r--drivers/video/tegra/host/nvhost_channel.c247
-rw-r--r--drivers/video/tegra/host/nvhost_channel.h94
-rw-r--r--drivers/video/tegra/host/nvhost_cpuaccess.c139
-rw-r--r--drivers/video/tegra/host/nvhost_cpuaccess.h71
-rw-r--r--drivers/video/tegra/host/nvhost_dev.c779
-rw-r--r--drivers/video/tegra/host/nvhost_dev.h50
-rw-r--r--drivers/video/tegra/host/nvhost_hardware.h219
-rw-r--r--drivers/video/tegra/host/nvhost_hwctx.h86
-rw-r--r--drivers/video/tegra/host/nvhost_intr.c561
-rw-r--r--drivers/video/tegra/host/nvhost_intr.h104
-rw-r--r--drivers/video/tegra/host/nvhost_mpectx.c23
-rw-r--r--drivers/video/tegra/host/nvhost_syncpt.c252
-rw-r--r--drivers/video/tegra/host/nvhost_syncpt.h150
-rw-r--r--drivers/video/tegra/nvmap.c (renamed from drivers/char/nvmap.c)244
-rw-r--r--drivers/video/tegra/tegra-fb.c (renamed from drivers/video/tegra-fb.c)0
-rw-r--r--include/linux/nvhost.h123
-rw-r--r--include/linux/nvmap.h (renamed from arch/arm/mach-tegra/include/mach/nvmem.h)31
40 files changed, 4856 insertions, 81 deletions
diff --git a/arch/arm/configs/tegra_harmony_android_defconfig b/arch/arm/configs/tegra_harmony_android_defconfig
index c4dcf77001d7..2eeb145dacd4 100644
--- a/arch/arm/configs/tegra_harmony_android_defconfig
+++ b/arch/arm/configs/tegra_harmony_android_defconfig
@@ -1339,6 +1339,8 @@ CONFIG_FB_TEGRA=y
# CONFIG_FB_METRONOME is not set
# CONFIG_FB_MB862XX is not set
# CONFIG_FB_BROADSHEET is not set
+CONFIG_FB_TEGRA_DUMMY=y
+CONFIG_FB_TEGRA_GRHOST=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
# CONFIG_LCD_LMS283GF05 is not set
diff --git a/arch/arm/configs/tegra_harmony_gnu_linux_defconfig b/arch/arm/configs/tegra_harmony_gnu_linux_defconfig
index aa5a9c250557..bdd316cbbef6 100644
--- a/arch/arm/configs/tegra_harmony_gnu_linux_defconfig
+++ b/arch/arm/configs/tegra_harmony_gnu_linux_defconfig
@@ -949,9 +949,6 @@ CONFIG_HW_CONSOLE=y
CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_DEVMEM=y
# CONFIG_DEVKMEM is not set
-CONFIG_DEVNVMAP=y
-# CONFIG_DEVNVMAP_PARANOID is not set
-CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM=y
# CONFIG_SERIAL_NONSTANDARD is not set
# CONFIG_NOZOMI is not set
@@ -1194,7 +1191,6 @@ CONFIG_FB_TILEBLITTING=y
# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_NVIDIA is not set
# CONFIG_FB_RIVA is not set
-CONFIG_FB_TEGRA=y
# CONFIG_FB_MATROX is not set
# CONFIG_FB_RADEON is not set
# CONFIG_FB_ATY128 is not set
@@ -1216,6 +1212,12 @@ CONFIG_FB_TEGRA=y
# CONFIG_FB_METRONOME is not set
# CONFIG_FB_MB862XX is not set
# CONFIG_FB_BROADSHEET is not set
+CONFIG_FB_TEGRA=y
+CONFIG_FB_TEGRA_DUMMY=y
+CONFIG_FB_TEGRA_GRHOST=y
+CONFIG_DEVNVMAP=y
+# CONFIG_DEVNVMAP_PARANOID is not set
+CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
# CONFIG_LCD_LMS283GF05 is not set
diff --git a/arch/arm/configs/tegra_ventana_android_defconfig b/arch/arm/configs/tegra_ventana_android_defconfig
index fd2fc1697254..c0c64f728223 100755
--- a/arch/arm/configs/tegra_ventana_android_defconfig
+++ b/arch/arm/configs/tegra_ventana_android_defconfig
@@ -1046,6 +1046,8 @@ CONFIG_FB_TEGRA=y
# CONFIG_FB_METRONOME is not set
# CONFIG_FB_MB862XX is not set
# CONFIG_FB_BROADSHEET is not set
+CONFIG_FB_TEGRA_DUMMY=y
+CONFIG_FB_TEGRA_GRHOST=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
# CONFIG_LCD_LMS283GF05 is not set
diff --git a/arch/arm/configs/tegra_whistler_android_defconfig b/arch/arm/configs/tegra_whistler_android_defconfig
index 6ed4b376de64..c749c4c1a285 100644
--- a/arch/arm/configs/tegra_whistler_android_defconfig
+++ b/arch/arm/configs/tegra_whistler_android_defconfig
@@ -1204,6 +1204,8 @@ CONFIG_FB_TEGRA=y
# CONFIG_FB_METRONOME is not set
# CONFIG_FB_MB862XX is not set
# CONFIG_FB_BROADSHEET is not set
+CONFIG_FB_TEGRA_DUMMY=y
+CONFIG_FB_TEGRA_GRHOST=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
# CONFIG_LCD_LMS283GF05 is not set
diff --git a/arch/arm/configs/tegra_whistler_gnu_linux_defconfig b/arch/arm/configs/tegra_whistler_gnu_linux_defconfig
index 06f299d6e40c..4c927c138eab 100644
--- a/arch/arm/configs/tegra_whistler_gnu_linux_defconfig
+++ b/arch/arm/configs/tegra_whistler_gnu_linux_defconfig
@@ -1057,6 +1057,8 @@ CONFIG_FB_TEGRA=y
# CONFIG_FB_METRONOME is not set
# CONFIG_FB_MB862XX is not set
# CONFIG_FB_BROADSHEET is not set
+CONFIG_FB_TEGRA_DUMMY=y
+CONFIG_FB_TEGRA_GRHOST=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
# CONFIG_LCD_LMS283GF05 is not set
diff --git a/arch/arm/mach-tegra/board-common.c b/arch/arm/mach-tegra/board-common.c
index 1cc943ddaa18..f0d5259ef36c 100644
--- a/arch/arm/mach-tegra/board-common.c
+++ b/arch/arm/mach-tegra/board-common.c
@@ -184,6 +184,65 @@ static struct platform_device tegra_gart_device = {
};
#endif
+#ifdef CONFIG_FB_TEGRA_GRHOST
+static struct resource tegra_grhost_resources[] = {
+ [0] = {
+ .name = "host1x",
+ .start = TEGRA_HOST1X_BASE,
+ .end = TEGRA_HOST1X_BASE + TEGRA_HOST1X_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = "display",
+ .start = TEGRA_DISPLAY_BASE,
+ .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .name = "display2",
+ .start = TEGRA_DISPLAY2_BASE,
+ .end = TEGRA_DISPLAY2_BASE + TEGRA_DISPLAY2_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [3] = {
+ .name = "vi",
+ .start = TEGRA_VI_BASE,
+ .end = TEGRA_VI_BASE + TEGRA_VI_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [4] = {
+ .name = "isp",
+ .start = TEGRA_ISP_BASE,
+ .end = TEGRA_ISP_BASE + TEGRA_ISP_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [5] = {
+ .name = "mpe",
+ .start = TEGRA_MPE_BASE,
+ .end = TEGRA_MPE_BASE + TEGRA_MPE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [6] = {
+ .name = "syncpt_thresh",
+ .start = INT_SYNCPT_THRESH_BASE,
+ .end = INT_SYNCPT_THRESH_BASE + INT_SYNCPT_THRESH_NR - 1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [7] = {
+ .name = "host1x_mpcore_general",
+ .start = INT_HOST1X_MPCORE_GENERAL,
+ .end = INT_HOST1X_MPCORE_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+static struct platform_device tegra_grhost_device = {
+ .name = "tegra_grhost",
+ .id = -1,
+ .resource = tegra_grhost_resources,
+ .num_resources = ARRAY_SIZE(tegra_grhost_resources),
+};
+#endif
+
#ifdef CONFIG_USB_GADGET_TEGRA
static u64 tegra_udc_dma_mask = DMA_BIT_MASK(32);
static struct fsl_usb2_platform_data tegra_udc_platform = {
@@ -234,6 +293,9 @@ static struct platform_device *tegra_devices[] __initdata = {
#ifdef CONFIG_TEGRA_IOVMM_GART
&tegra_gart_device,
#endif
+#ifdef CONFIG_FB_TEGRA_GRHOST
+ &tegra_grhost_device,
+#endif
};
void __init tegra_register_socdev(void)
diff --git a/arch/arm/mach-tegra/clock_nvrm.c b/arch/arm/mach-tegra/clock_nvrm.c
index c94dfd6a54a7..9bd5e7fb571a 100644
--- a/arch/arm/mach-tegra/clock_nvrm.c
+++ b/arch/arm/mach-tegra/clock_nvrm.c
@@ -100,6 +100,12 @@ static int tegra_periph_clk_enable(struct clk *c)
return -ENXIO;
}
+ /* max out emc when 3d is on */
+ if (NVRM_MODULE_ID_MODULE(c->module) == NvRmModuleID_3D) {
+ NvRmPowerBusyHint(s_hRmGlobal, NvRmDfsClockId_Emc, clk_pwr_client,
+ 0xffffffff, NvRmFreqMaximum);
+ }
+
return 0;
}
@@ -107,6 +113,10 @@ static void tegra_periph_clk_disable(struct clk *c)
{
NvError e;
+ if (NVRM_MODULE_ID_MODULE(c->module) == NvRmModuleID_3D) {
+ NvRmPowerBusyHint(s_hRmGlobal, NvRmDfsClockId_Emc, clk_pwr_client, 0, 0);
+ }
+
e = NvRmPowerModuleClockControl(s_hRmGlobal, c->module,
clk_pwr_client, NV_FALSE);
@@ -170,6 +180,16 @@ static unsigned long tegra_periph_clk_get_rate(struct clk *c)
return (unsigned long)freq * 1000;
}
+static long tegra_periph_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ NvRmFreqKHz max;
+ /* TODO: rm reports an unachievable max rate for host */
+ if (c->module == NvRmModuleID_GraphicsHost)
+ max = 111000;
+ else
+ max = NvRmPowerModuleGetMaxFrequency(s_hRmGlobal, c->module);
+ return min(((unsigned long)max) * 1000, rate);
+}
static struct clk_ops tegra_periph_clk_ops = {
.init = tegra_periph_clk_init,
@@ -177,6 +197,7 @@ static struct clk_ops tegra_periph_clk_ops = {
.disable = tegra_periph_clk_disable,
.set_rate = tegra_periph_clk_set_rate,
.get_rate = tegra_periph_clk_get_rate,
+ .round_rate = tegra_periph_clk_round_rate,
};
static unsigned long tegra_clksrc_clk_get_rate(struct clk *c)
@@ -212,11 +233,12 @@ static struct clk_ops dfs_clk_ops = {
#define NvRmModuleID_Afi NvRmPrivModuleID_Afi
#define NvRmModuleID_PcieXclk NvRmPrivModuleID_PcieXclk
-#define PERIPH_CLK(_name, _dev, _modname, _instance, _tol, _min, _pow) \
+#define PERIPH_CLK(_name, _dev, _con, _modname, _instance, _tol, _min, _pow) \
{ \
.name = _name, \
.lookup = { \
.dev_id = _dev, \
+ .con_id = _con, \
}, \
.module = NVRM_MODULE_ID(NvRmModuleID_##_modname, _instance), \
.ops = &tegra_periph_clk_ops, \
@@ -226,19 +248,23 @@ static struct clk_ops dfs_clk_ops = {
}
static struct clk tegra_periph_clk[] = {
- PERIPH_CLK("rtc", "rtc-tegra", Rtc, 0, 0, 0, false),
- PERIPH_CLK("kbc", "tegra-kbc", Kbc, 0, 0, 0, false),
- PERIPH_CLK("uarta", "uart.0", Uart, 0, 5, 0, true),
- PERIPH_CLK("uartb", "uart.1", Uart, 1, 5, 0, true),
- PERIPH_CLK("uartc", "uart.2", Uart, 2, 5, 0, true),
- PERIPH_CLK("uartd", "uart.3", Uart, 3, 5, 0, true),
- PERIPH_CLK("uarte", "uart.4", Uart, 4, 5, 0, true),
- PERIPH_CLK("sdmmc1", "tegra-sdhci.0", Sdio, 0, 0, 400, false),
- PERIPH_CLK("sdmmc2", "tegra-sdhci.1", Sdio, 1, 0, 400, false),
- PERIPH_CLK("sdmmc3", "tegra-sdhci.2", Sdio, 2, 0, 400, false),
- PERIPH_CLK("sdmmc4", "tegra-sdhci.3", Sdio, 3, 0, 400, false),
- PERIPH_CLK("pcie", "tegra_pcie", Pcie, 0, 0, 0, true),
- PERIPH_CLK("pcie_xclk", "tegra_pcie_xclk", PcieXclk, 0, 0, 0, false),
+ PERIPH_CLK("rtc", "rtc-tegra", NULL, Rtc, 0, 0, 0, false),
+ PERIPH_CLK("kbc", "tegra-kbc", NULL, Kbc, 0, 0, 0, false),
+ PERIPH_CLK("uarta", "uart.0", NULL, Uart, 0, 5, 0, true),
+ PERIPH_CLK("uartb", "uart.1", NULL, Uart, 1, 5, 0, true),
+ PERIPH_CLK("uartc", "uart.2", NULL, Uart, 2, 5, 0, true),
+ PERIPH_CLK("uartd", "uart.3", NULL, Uart, 3, 5, 0, true),
+ PERIPH_CLK("uarte", "uart.4", NULL, Uart, 4, 5, 0, true),
+ PERIPH_CLK("sdmmc1", "tegra-sdhci.0", NULL, Sdio, 0, 0, 400, false),
+ PERIPH_CLK("sdmmc2", "tegra-sdhci.1", NULL, Sdio, 1, 0, 400, false),
+ PERIPH_CLK("sdmmc3", "tegra-sdhci.2", NULL, Sdio, 2, 0, 400, false),
+ PERIPH_CLK("sdmmc4", "tegra-sdhci.3", NULL, Sdio, 3, 0, 400, false),
+ PERIPH_CLK("pcie", "tegra_pcie", NULL, Pcie, 0, 0, 0, true),
+ PERIPH_CLK("pcie_xclk", "tegra_pcie_xclk", NULL, PcieXclk, 0, 0, 0, false),
+ PERIPH_CLK("gr3d", "tegra_grhost", "gr3d", 3D, 0, 0, 0, true),
+ PERIPH_CLK("gr2d", "tegra_grhost", "gr2d", 2D, 0, 0, 0, true),
+ PERIPH_CLK("host1x", "tegra_grhost", "host1x", GraphicsHost, 0, 0, 0, true),
+ PERIPH_CLK("epp", "tegra_grhost", "epp", Epp, 0, 0, 0, true),
};
static struct clk tegra_clk_cpu = {
@@ -390,6 +416,16 @@ unsigned long clk_get_rate(struct clk *c)
}
EXPORT_SYMBOL(clk_get_rate);
+long clk_round_rate(struct clk *c, unsigned long rate)
+{
+ if (c->ops && c->ops->round_rate)
+ return c->ops->round_rate(c, rate);
+
+ return -ENOSYS;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+
void __init tegra_init_clock(void)
{
NvError e;
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index ce9decb0dfa2..d6bee1d29bdf 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/cpu.h>
+#include <linux/nvmap.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/cacheflush.h>
@@ -27,7 +28,6 @@
#include <mach/iomap.h>
#include <mach/dma.h>
-#include <mach/nvmem.h>
#include "board.h"
diff --git a/arch/arm/mach-tegra/include/mach/iomap.h b/arch/arm/mach-tegra/include/mach/iomap.h
index bd28f03dd4fc..d5fb089864ad 100644
--- a/arch/arm/mach-tegra/include/mach/iomap.h
+++ b/arch/arm/mach-tegra/include/mach/iomap.h
@@ -26,6 +26,9 @@
#define TEGRA_IRAM_BASE 0x40000000
#define TEGRA_IRAM_SIZE SZ_256K
+#define TEGRA_HOST1X_BASE 0x50000000
+#define TEGRA_HOST1X_SIZE 0x24000
+
#define TEGRA_ARM_PERIF_BASE 0x50040000
#define TEGRA_ARM_PERIF_SIZE SZ_8K
@@ -35,6 +38,15 @@
#define TEGRA_ARM_INT_DIST_BASE 0x50041000
#define TEGRA_ARM_INT_DIST_SIZE SZ_4K
+#define TEGRA_MPE_BASE 0x54040000
+#define TEGRA_MPE_SIZE SZ_256K
+
+#define TEGRA_VI_BASE 0x54080000
+#define TEGRA_VI_SIZE SZ_256K
+
+#define TEGRA_ISP_BASE 0x54100000
+#define TEGRA_ISP_SIZE SZ_256K
+
#define TEGRA_DISPLAY_BASE 0x54200000
#define TEGRA_DISPLAY_SIZE SZ_256K
diff --git a/arch/arm/mach-tegra/include/mach/irqs.h b/arch/arm/mach-tegra/include/mach/irqs.h
index abd8449b72d7..91388d8967fc 100644
--- a/arch/arm/mach-tegra/include/mach/irqs.h
+++ b/arch/arm/mach-tegra/include/mach/irqs.h
@@ -166,9 +166,12 @@
#define INT_QUAD_RES_30 (INT_QUAD_BASE + 30)
#define INT_QUAD_RES_31 (INT_QUAD_BASE + 31)
-#define INT_GPIO_BASE (INT_QUAD_BASE + 32)
-#define INT_GPIO_NR (28 * 8)
+#define INT_SYNCPT_THRESH_BASE (INT_QUAD_BASE + 32)
+#define INT_SYNCPT_THRESH_NR 32
+#define INT_GPIO_BASE (INT_SYNCPT_THRESH_BASE + \
+ INT_SYNCPT_THRESH_NR)
+#define INT_GPIO_NR (28 * 8)
#endif
#define NR_IRQS (INT_GPIO_BASE + INT_GPIO_NR)
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
index 687a5474191a..7101a9458b6a 100644
--- a/arch/arm/mach-tegra/irq.c
+++ b/arch/arm/mach-tegra/irq.c
@@ -33,10 +33,6 @@
#define INT_SYS_SZ (INT_SEC_BASE - INT_PRI_BASE)
#define PPI_NR ((INT_SYS_NR+INT_SYS_SZ-1)/INT_SYS_SZ)
-#define APBDMA_IRQ_STA_CPU 0x14
-#define APBDMA_IRQ_MASK_SET 0x20
-#define APBDMA_IRQ_MASK_CLR 0x24
-
#define ICTLR_CPU_IER 0x20
#define ICTLR_CPU_IER_SET 0x24
#define ICTLR_CPU_IER_CLR 0x28
@@ -46,6 +42,13 @@
#define ICTLR_COP_IER_CLR 0x38
#define ICTLR_COP_IEP_CLASS 0x3c
+#define HOST1X_SYNC_OFFSET 0x3000
+#define HOST1X_SYNC_SIZE 0x800
+enum {
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS = 0x40,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE = 0x60
+};
+
static void (*gic_mask_irq)(unsigned int irq) = NULL;
static void (*gic_unmask_irq)(unsigned int irq) = NULL;
@@ -84,6 +87,66 @@ static struct irq_chip tegra_irq = {
#endif
};
+static void syncpt_thresh_mask(unsigned int irq)
+{
+ (void)irq;
+}
+
+static void syncpt_thresh_unmask(unsigned int irq)
+{
+ (void)irq;
+}
+
+static void syncpt_thresh_cascade(unsigned int irq, struct irq_desc *desc)
+{
+ void __iomem *sync_regs = get_irq_desc_data(desc);
+ u32 reg;
+ int id;
+
+ desc->chip->ack(irq);
+
+ reg = readl(sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
+
+ while ((id = __fls(reg)) >= 0) {
+ reg ^= BIT(id);
+ generic_handle_irq(id + INT_SYNCPT_THRESH_BASE);
+ }
+
+ desc->chip->unmask(irq);
+}
+
+static struct irq_chip syncpt_thresh_irq = {
+ .name = "syncpt",
+ .mask = syncpt_thresh_mask,
+ .unmask = syncpt_thresh_unmask
+};
+
+void __init syncpt_init_irq(void)
+{
+ void __iomem *sync_regs;
+ unsigned int i;
+
+ sync_regs = ioremap(TEGRA_HOST1X_BASE + HOST1X_SYNC_OFFSET,
+ HOST1X_SYNC_SIZE);
+ BUG_ON(!sync_regs);
+
+ writel(0xffffffffUL,
+ sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
+ writel(0xffffffffUL,
+ sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
+
+ for (i = INT_SYNCPT_THRESH_BASE; i < INT_GPIO_BASE; i++) {
+ set_irq_chip(i, &syncpt_thresh_irq);
+ set_irq_chip_data(i, sync_regs);
+ set_irq_handler(i, handle_simple_irq);
+ set_irq_flags(i, IRQF_VALID);
+ }
+ if (set_irq_data(INT_HOST1X_MPCORE_SYNCPT, sync_regs))
+ BUG();
+ set_irq_chained_handler(INT_HOST1X_MPCORE_SYNCPT,
+ syncpt_thresh_cascade);
+}
+
void __init tegra_init_irq(void)
{
struct irq_chip *gic;
@@ -105,11 +168,13 @@ void __init tegra_init_irq(void)
tegra_irq.set_affinity = gic->set_affinity;
#endif
- for (i=INT_PRI_BASE; i<INT_GPIO_BASE; i++) {
+ for (i=INT_PRI_BASE; i<INT_SYNCPT_THRESH_BASE; i++) {
set_irq_chip(i, &tegra_irq);
set_irq_handler(i, handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
+
+ syncpt_init_irq();
}
#ifdef CONFIG_PM
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 4390ddf8c7d9..efdc34a86601 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -123,35 +123,6 @@ config BFIN_JTAG_COMM_CONSOLE
bool "Console on Blackfin JTAG"
depends on BFIN_JTAG_COMM=y
-config DEVNVMAP
- bool "/dev/nvmap device support"
- depends on ARCH_TEGRA
- default y
- help
- Say Y here if you want to support the /dev/nvmap device. This device
- is used by NVIDIA Tegra graphics and multimedia drivers for managing
- graphics memory.
-
-config DEVNVMAP_PARANOID
- bool "Validate all user-provided /dev/nvmap object references"
- depends on DEVNVMAP
- default n
- help
- Say Y here to enable additional process-level validations and
- permissions for /dev/nvmap object references provided via ioctls.
- May result in a decrease in performance.
-
-config DEVNVMAP_RECLAIM_UNPINNED_VM
- bool "Allow /dev/nvmap to reclaim unpinned I/O virtual memory"
- depends on DEVNVMAP && TEGRA_IOVMM
- default y
- help
- Say Y here to enable /dev/nvmap to reclaim I/O virtual memory after
- it has been unpinned, and re-use it for other objects. This can
- allow a larger virtual I/O VM space than would normally be
- supported by the hardware, at a slight cost in performance.
-
-
config SERIAL_NONSTANDARD
bool "Non-standard serial port support"
depends on HAS_IOMEM
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 9e54b3d5a56d..ee776cb54ec5 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -9,7 +9,6 @@ FONTMAPFILE = cp437.uni
obj-y += mem.o random.o tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o tty_buffer.o tty_port.o
-obj-$(CONFIG_DEVNVMAP) += nvmap.o
obj-$(CONFIG_LEGACY_PTYS) += pty.o
obj-$(CONFIG_UNIX98_PTYS) += pty.o
obj-y += misc.o
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index fc6750156402..a559ea047b7f 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1056,19 +1056,6 @@ config FB_RIVA_BACKLIGHT
help
Say Y here if you want to control the backlight of your display.
-config FB_TEGRA
- boolean "NVIDIA Tegra dummy framebuffer driver"
- depends on ARCH_TEGRA && FB
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- default FB
- help
- This driver implements a dummy framebuffer device for systems based
- on the NVIDIA Tegra family of SoCs. Display initialization is
- expected to happen prior to the kernel start-up for this driver to
- function.
-
config FB_I810
tristate "Intel 810/815 support (EXPERIMENTAL)"
depends on EXPERIMENTAL && FB && PCI && X86_32 && AGP_INTEL
@@ -2175,6 +2162,8 @@ config FB_BROADSHEET
source "drivers/video/omap/Kconfig"
+source "drivers/video/tegra/Kconfig"
+
source "drivers/video/backlight/Kconfig"
source "drivers/video/display/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 927664e8bd59..769aff1399a7 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -38,7 +38,6 @@ obj-$(CONFIG_FB_PM3) += pm3fb.o
obj-$(CONFIG_FB_MATROX) += matrox/
obj-$(CONFIG_FB_RIVA) += riva/
obj-$(CONFIG_FB_NVIDIA) += nvidia/
-obj-$(CONFIG_FB_TEGRA) += tegra-fb.o
obj-$(CONFIG_FB_ATY) += aty/ macmodes.o
obj-$(CONFIG_FB_ATY128) += aty/ macmodes.o
obj-$(CONFIG_FB_RADEON) += aty/
@@ -129,6 +128,7 @@ obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
obj-$(CONFIG_FB_CARMINE) += carminefb.o
obj-$(CONFIG_FB_MB862XX) += mb862xx/
obj-$(CONFIG_FB_MSM) += msm/
+obj-$(CONFIG_FB_TEGRA) += tegra/
# Platform or fallback drivers go here
obj-$(CONFIG_FB_UVESA) += uvesafb.o
diff --git a/drivers/video/tegra/Kconfig b/drivers/video/tegra/Kconfig
new file mode 100644
index 000000000000..8d44256bc099
--- /dev/null
+++ b/drivers/video/tegra/Kconfig
@@ -0,0 +1,54 @@
+config FB_TEGRA
+ boolean "NVIDIA Tegra graphics support"
+ depends on FB && ARCH_TEGRA
+ default FB
+ help
+ Graphics services for NVIDIA Tegra products
+
+config FB_TEGRA_DUMMY
+ boolean "Tegra dummy framebuffer driver"
+ depends on FB_TEGRA
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ default FB
+ help
+ This driver implements a dummy framebuffer device for systems based
+ on the NVIDIA Tegra family of SoCs. Display initialization is
+ expected to happen prior to the kernel start-up for this driver to
+ function.
+
+config FB_TEGRA_GRHOST
+ tristate "Tegra graphics host driver"
+ depends on FB_TEGRA && TEGRA_IOVMM
+ default n
+ help
+ Driver for the Tegra graphics host hardware.
+
+config DEVNVMAP
+ bool "/dev/nvmap device support"
+ depends on ARCH_TEGRA
+ default y
+ help
+ Say Y here if you want to support the /dev/nvmap device. This device
+ is used by NVIDIA Tegra graphics and multimedia drivers for managing
+ graphics memory.
+
+config DEVNVMAP_PARANOID
+ bool "Validate all user-provided /dev/nvmap object references"
+ depends on DEVNVMAP
+ default n
+ help
+ Say Y here to enable additional process-level validations and
+ permissions for /dev/nvmap object references provided via ioctls.
+ May result in a decrease in performance.
+
+config DEVNVMAP_RECLAIM_UNPINNED_VM
+ bool "Allow /dev/nvmap to reclaim unpinned I/O virtual memory"
+ depends on DEVNVMAP && TEGRA_IOVMM
+ default y
+ help
+ Say Y here to enable /dev/nvmap to reclaim I/O virtual memory after
+ it has been unpinned, and re-use it for other objects. This can
+ allow a larger virtual I/O VM space than would normally be
+ supported by the hardware, at a slight cost in performance.
diff --git a/drivers/video/tegra/Makefile b/drivers/video/tegra/Makefile
new file mode 100644
index 000000000000..80c35780c6b9
--- /dev/null
+++ b/drivers/video/tegra/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_FB_TEGRA_GRHOST) += host/
+obj-$(CONFIG_FB_TEGRA_DUMMY) += tegra-fb.o
+obj-$(CONFIG_DEVNVMAP) += nvmap.o
diff --git a/drivers/video/tegra/host/Makefile b/drivers/video/tegra/host/Makefile
new file mode 100644
index 000000000000..92d9ba118d1c
--- /dev/null
+++ b/drivers/video/tegra/host/Makefile
@@ -0,0 +1,11 @@
+nvhost-objs = \
+ nvhost_acm.o \
+ nvhost_syncpt.o \
+ nvhost_cdma.o \
+ nvhost_cpuaccess.o \
+ nvhost_intr.o \
+ nvhost_channel.o \
+ nvhost_3dctx.o \
+ nvhost_dev.o
+
+obj-$(CONFIG_FB_TEGRA_GRHOST) += nvhost.o \ No newline at end of file
diff --git a/drivers/video/tegra/host/nvhost_3dctx.c b/drivers/video/tegra/host/nvhost_3dctx.c
new file mode 100644
index 000000000000..c7c4c30ac9d3
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_3dctx.c
@@ -0,0 +1,490 @@
+/*
+ * drivers/video/tegra/host/nvhost_3dctx.c
+ *
+ * Tegra Graphics Host 3d hardware context
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_hwctx.h"
+#include "nvhost_dev.h"
+
+const struct hwctx_reginfo ctxsave_regs_3d[] = {
+ HWCTX_REGINFO(0xe00, 16, DIRECT),
+ HWCTX_REGINFO(0xe10, 16, DIRECT),
+ HWCTX_REGINFO(0xe20, 1, DIRECT),
+ HWCTX_REGINFO(0xe21, 1, DIRECT),
+ HWCTX_REGINFO(0xe22, 1, DIRECT),
+ HWCTX_REGINFO(0xe25, 1, DIRECT),
+ HWCTX_REGINFO(0xe26, 1, DIRECT),
+ HWCTX_REGINFO(0xe28, 2, DIRECT),
+ HWCTX_REGINFO(0xe2a, 1, DIRECT),
+ HWCTX_REGINFO(0x1, 1, DIRECT),
+ HWCTX_REGINFO(0x2, 1, DIRECT),
+ HWCTX_REGINFO(0xc, 2, DIRECT),
+ HWCTX_REGINFO(0xe, 2, DIRECT),
+ HWCTX_REGINFO(0x10, 2, DIRECT),
+ HWCTX_REGINFO(0x12, 2, DIRECT),
+ HWCTX_REGINFO(0x14, 2, DIRECT),
+ HWCTX_REGINFO(0x100, 32, DIRECT),
+ HWCTX_REGINFO(0x120, 1, DIRECT),
+ HWCTX_REGINFO(0x121, 1, DIRECT),
+ HWCTX_REGINFO(0x124, 1, DIRECT),
+ HWCTX_REGINFO(0x125, 1, DIRECT),
+ HWCTX_REGINFO(0x200, 1, DIRECT),
+ HWCTX_REGINFO(0x201, 1, DIRECT),
+ HWCTX_REGINFO(0x202, 1, DIRECT),
+ HWCTX_REGINFO(0x203, 1, DIRECT),
+ HWCTX_REGINFO(0x204, 1, DIRECT),
+ HWCTX_REGINFO(0x207, 1024, INDIRECT),
+ HWCTX_REGINFO(0x209, 1, DIRECT),
+ HWCTX_REGINFO(0x300, 64, DIRECT),
+ HWCTX_REGINFO(0x343, 1, DIRECT),
+ HWCTX_REGINFO(0x344, 1, DIRECT),
+ HWCTX_REGINFO(0x345, 1, DIRECT),
+ HWCTX_REGINFO(0x346, 1, DIRECT),
+ HWCTX_REGINFO(0x347, 1, DIRECT),
+ HWCTX_REGINFO(0x348, 1, DIRECT),
+ HWCTX_REGINFO(0x349, 1, DIRECT),
+ HWCTX_REGINFO(0x34a, 1, DIRECT),
+ HWCTX_REGINFO(0x34b, 1, DIRECT),
+ HWCTX_REGINFO(0x34c, 1, DIRECT),
+ HWCTX_REGINFO(0x34d, 1, DIRECT),
+ HWCTX_REGINFO(0x34e, 1, DIRECT),
+ HWCTX_REGINFO(0x34f, 1, DIRECT),
+ HWCTX_REGINFO(0x350, 1, DIRECT),
+ HWCTX_REGINFO(0x351, 1, DIRECT),
+ HWCTX_REGINFO(0x352, 1, DIRECT),
+ HWCTX_REGINFO(0x353, 1, DIRECT),
+ HWCTX_REGINFO(0x354, 1, DIRECT),
+ HWCTX_REGINFO(0x355, 1, DIRECT),
+ HWCTX_REGINFO(0x356, 1, DIRECT),
+ HWCTX_REGINFO(0x357, 1, DIRECT),
+ HWCTX_REGINFO(0x358, 1, DIRECT),
+ HWCTX_REGINFO(0x359, 1, DIRECT),
+ HWCTX_REGINFO(0x35a, 1, DIRECT),
+ HWCTX_REGINFO(0x35b, 1, DIRECT),
+ HWCTX_REGINFO(0x363, 1, DIRECT),
+ HWCTX_REGINFO(0x364, 1, DIRECT),
+ HWCTX_REGINFO(0x400, 2, DIRECT),
+ HWCTX_REGINFO(0x402, 1, DIRECT),
+ HWCTX_REGINFO(0x403, 1, DIRECT),
+ HWCTX_REGINFO(0x404, 1, DIRECT),
+ HWCTX_REGINFO(0x405, 1, DIRECT),
+ HWCTX_REGINFO(0x406, 1, DIRECT),
+ HWCTX_REGINFO(0x407, 1, DIRECT),
+ HWCTX_REGINFO(0x408, 1, DIRECT),
+ HWCTX_REGINFO(0x409, 1, DIRECT),
+ HWCTX_REGINFO(0x40a, 1, DIRECT),
+ HWCTX_REGINFO(0x40b, 1, DIRECT),
+ HWCTX_REGINFO(0x40c, 1, DIRECT),
+ HWCTX_REGINFO(0x40d, 1, DIRECT),
+ HWCTX_REGINFO(0x40e, 1, DIRECT),
+ HWCTX_REGINFO(0x40f, 1, DIRECT),
+ HWCTX_REGINFO(0x411, 1, DIRECT),
+ HWCTX_REGINFO(0x500, 1, DIRECT),
+ HWCTX_REGINFO(0x501, 1, DIRECT),
+ HWCTX_REGINFO(0x502, 1, DIRECT),
+ HWCTX_REGINFO(0x503, 1, DIRECT),
+ HWCTX_REGINFO(0x520, 32, DIRECT),
+ HWCTX_REGINFO(0x540, 64, INDIRECT),
+ HWCTX_REGINFO(0x600, 0, INDIRECT_OFFSET),
+ HWCTX_REGINFO(0x602, 16, INDIRECT_DATA),
+ HWCTX_REGINFO(0x603, 128, INDIRECT),
+ HWCTX_REGINFO(0x608, 4, DIRECT),
+ HWCTX_REGINFO(0x60e, 1, DIRECT),
+ HWCTX_REGINFO(0x700, 64, INDIRECT),
+ HWCTX_REGINFO(0x710, 16, DIRECT),
+ HWCTX_REGINFO(0x720, 32, DIRECT),
+ HWCTX_REGINFO(0x740, 1, DIRECT),
+ HWCTX_REGINFO(0x741, 1, DIRECT),
+ HWCTX_REGINFO(0x800, 0, INDIRECT_OFFSET),
+ HWCTX_REGINFO(0x802, 16, INDIRECT_DATA),
+ HWCTX_REGINFO(0x803, 512, INDIRECT),
+ HWCTX_REGINFO(0x805, 64, INDIRECT),
+ HWCTX_REGINFO(0x820, 32, DIRECT),
+ HWCTX_REGINFO(0x900, 64, INDIRECT),
+ HWCTX_REGINFO(0x902, 1, DIRECT),
+ HWCTX_REGINFO(0x903, 1, DIRECT),
+ HWCTX_REGINFO(0xa02, 1, DIRECT),
+ HWCTX_REGINFO(0xa03, 1, DIRECT),
+ HWCTX_REGINFO(0xa04, 1, DIRECT),
+ HWCTX_REGINFO(0xa05, 1, DIRECT),
+ HWCTX_REGINFO(0xa06, 1, DIRECT),
+ HWCTX_REGINFO(0xa07, 1, DIRECT),
+ HWCTX_REGINFO(0xa08, 1, DIRECT),
+ HWCTX_REGINFO(0xa09, 1, DIRECT),
+ HWCTX_REGINFO(0xa0a, 1, DIRECT),
+ HWCTX_REGINFO(0xa0b, 1, DIRECT),
+ HWCTX_REGINFO(0x205, 1024, INDIRECT)
+};
+
+
+/*** restore ***/
+
+static unsigned int context_restore_size = 0;
+
+static void restore_begin(u32 *ptr, u32 waitbase)
+{
+ /* set class to host */
+ ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_INCR_SYNCPT_BASE, 1);
+ /* increment sync point base */
+ ptr[1] = nvhost_class_host_incr_syncpt_base(waitbase, 1);
+ /* set class to 3D */
+ ptr[2] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ /* program PSEQ_QUAD_ID */
+ ptr[3] = nvhost_opcode_imm(0x545, 0);
+}
+#define RESTORE_BEGIN_SIZE 4
+
+static void restore_end(u32 *ptr, u32 syncpt_id)
+{
+ /* syncpt increment to track restore gather. */
+ ptr[0] = nvhost_opcode_imm(0x0, ((1UL << 8) | (u8)(syncpt_id & 0xff)));
+}
+#define RESTORE_END_SIZE 1
+
+static void restore_direct(u32 *ptr, u32 start_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_incr(start_reg, count);
+}
+#define RESTORE_DIRECT_SIZE 1
+
+static void restore_indoffset(u32 *ptr, u32 offset_reg, u32 offset)
+{
+ ptr[0] = nvhost_opcode_imm(offset_reg, offset);
+}
+#define RESTORE_INDOFFSET_SIZE 1
+
+static void restore_inddata(u32 *ptr, u32 data_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_nonincr(data_reg, count);
+}
+#define RESTORE_INDDATA_SIZE 1
+
+static void restore_registers_from_fifo(u32 *ptr, unsigned int count,
+ struct nvhost_channel *channel,
+ unsigned int *pending)
+{
+ void __iomem *chan_regs = channel->aperture;
+ unsigned int entries = *pending;
+ while (count) {
+ unsigned int num;
+
+ while (!entries) {
+ /* query host for number of entries in fifo */
+ entries = nvhost_channel_fifostat_outfentries(
+ readl(chan_regs + HOST1X_CHANNEL_FIFOSTAT));
+ if (!entries)
+ cpu_relax();
+ /* TODO: [ahowe 2010-06-14] timeout */
+ }
+ num = min(entries, count);
+ entries -= num;
+ count -= num;
+
+ while (num & ~0x3) {
+ u32 arr[4];
+ arr[0] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ arr[1] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ arr[2] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ arr[3] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ memcpy(ptr, arr, 4*sizeof(u32));
+ ptr += 4;
+ num -= 4;
+ }
+ while (num--)
+ *ptr++ = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ }
+ *pending = entries;
+}
+
+static void setup_restore(u32 *ptr, u32 waitbase)
+{
+ const struct hwctx_reginfo *r;
+ const struct hwctx_reginfo *rend;
+
+ restore_begin(ptr, waitbase);
+ ptr += RESTORE_BEGIN_SIZE;
+
+ r = ctxsave_regs_3d;
+ rend = ctxsave_regs_3d + ARRAY_SIZE(ctxsave_regs_3d);
+ for ( ; r != rend; ++r) {
+ u32 offset = r->offset;
+ u32 count = r->count;
+ switch (r->type) {
+ case HWCTX_REGINFO_DIRECT:
+ restore_direct(ptr, offset, count);
+ ptr += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT:
+ restore_indoffset(ptr, offset, 0);
+ ptr += RESTORE_INDOFFSET_SIZE;
+ restore_inddata(ptr, offset + 1, count);
+ ptr += RESTORE_INDDATA_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_OFFSET:
+ restore_indoffset(ptr, offset, count);
+ ptr += RESTORE_INDOFFSET_SIZE;
+ continue; /* INDIRECT_DATA follows with real count */
+ case HWCTX_REGINFO_INDIRECT_DATA:
+ restore_inddata(ptr, offset, count);
+ ptr += RESTORE_INDDATA_SIZE;
+ break;
+ }
+ ptr += count;
+ }
+
+ restore_end(ptr, NVSYNCPT_3D);
+ wmb();
+}
+
+/*** save ***/
+
+/* the same context save command sequence is used for all contexts. */
+static struct nvmap_handle *context_save_buf = NULL;
+static u32 context_save_phys = 0;
+static u32 *context_save_ptr = NULL;
+static unsigned int context_save_size = 0;
+
+static void save_begin(u32 *ptr, u32 syncpt_id, u32 waitbase)
+{
+ /* set class to the unit to flush */
+ ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ /*
+ * Flush pipe and signal context read thread to start reading
+ * sync point increment
+ */
+ ptr[1] = nvhost_opcode_imm(0, 0x100 | syncpt_id);
+ ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1);
+ /* wait for base+1 */
+ ptr[3] = nvhost_class_host_wait_syncpt_base(syncpt_id, waitbase, 1);
+ ptr[4] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ ptr[5] = nvhost_opcode_imm(0, syncpt_id);
+ ptr[6] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, 0, 0);
+}
+#define SAVE_BEGIN_SIZE 7
+
+static void save_direct(u32 *ptr, u32 start_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
+ ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ start_reg, true);
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
+}
+#define SAVE_DIRECT_SIZE 3
+
+static void save_indoffset(u32 *ptr, u32 offset_reg, u32 offset)
+{
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
+ ptr[1] = nvhost_class_host_indoff_reg_write(NV_HOST_MODULE_GR3D,
+ offset_reg, true);
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, 1);
+ ptr[3] = offset;
+}
+#define SAVE_INDOFFSET_SIZE 4
+
+static inline void save_inddata(u32 *ptr, u32 data_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
+ ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ data_reg, false);
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
+}
+#define SAVE_INDDDATA_SIZE 3
+
+static void save_end(u32 *ptr, u32 syncpt_id, u32 waitbase)
+{
+ /* Wait for context read service */
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1);
+ ptr[1] = nvhost_class_host_wait_syncpt_base(syncpt_id, waitbase, 3);
+ /* Increment syncpoint base */
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INCR_SYNCPT_BASE, 1);
+ ptr[3] = nvhost_class_host_incr_syncpt_base(waitbase, 3);
+ /* set class back to the unit */
+ ptr[4] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+}
+#define SAVE_END_SIZE 5
+
+static void __init setup_save(
+ u32 *ptr, unsigned int *words_save, unsigned int *words_restore,
+ u32 syncpt_id, u32 waitbase)
+{
+ const struct hwctx_reginfo *r;
+ const struct hwctx_reginfo *rend;
+ unsigned int save = SAVE_BEGIN_SIZE + SAVE_END_SIZE;
+ unsigned int restore = RESTORE_BEGIN_SIZE + RESTORE_END_SIZE;
+
+ if (ptr) {
+ save_begin(ptr, syncpt_id, waitbase);
+ ptr += SAVE_BEGIN_SIZE;
+ }
+
+ r = ctxsave_regs_3d;
+ rend = ctxsave_regs_3d + ARRAY_SIZE(ctxsave_regs_3d);
+ for ( ; r != rend; ++r) {
+ u32 offset = r->offset;
+ u32 count = r->count;
+ switch (r->type) {
+ case HWCTX_REGINFO_DIRECT:
+ if (ptr) {
+ save_direct(ptr, offset, count);
+ ptr += SAVE_DIRECT_SIZE;
+ }
+ save += SAVE_DIRECT_SIZE;
+ restore += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT:
+ if (ptr) {
+ save_indoffset(ptr, offset, 0);
+ ptr += SAVE_INDOFFSET_SIZE;
+ save_inddata(ptr, offset + 1, count);
+ ptr += SAVE_INDDDATA_SIZE;
+ }
+ save += SAVE_INDOFFSET_SIZE;
+ restore += RESTORE_INDOFFSET_SIZE;
+ save += SAVE_INDDDATA_SIZE;
+ restore += RESTORE_INDDATA_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_OFFSET:
+ if (ptr) {
+ save_indoffset(ptr, offset, count);
+ ptr += SAVE_INDOFFSET_SIZE;
+ }
+ save += SAVE_INDOFFSET_SIZE;
+ restore += RESTORE_INDOFFSET_SIZE;
+ continue; /* INDIRECT_DATA follows with real count */
+ case HWCTX_REGINFO_INDIRECT_DATA:
+ if (ptr) {
+ save_inddata(ptr, offset, count);
+ ptr += SAVE_INDDDATA_SIZE;
+ }
+ save += SAVE_INDDDATA_SIZE;
+ restore += RESTORE_INDDATA_SIZE;
+ break;
+ }
+ if (ptr) {
+ memset(ptr, 0, count * 4);
+ ptr += count;
+ }
+ save += count;
+ restore += count;
+ }
+
+ if (ptr)
+ save_end(ptr, syncpt_id, waitbase);
+
+ if (words_save)
+ *words_save = save;
+ if (words_restore)
+ *words_restore = restore;
+ wmb();
+}
+
+/*** ctx3d ***/
+
+static int ctx3d_init(struct nvhost_hwctx *ctx)
+{
+ ctx->restore = nvmap_alloc(context_restore_size * 4, 32,
+ NVMEM_HANDLE_WRITE_COMBINE,
+ (void**)&ctx->save_cpu_data);
+ if (IS_ERR_OR_NULL(ctx->restore))
+ return PTR_ERR(ctx->restore);
+
+ setup_restore(ctx->save_cpu_data, NVWAITBASE_3D);
+ ctx->restore_phys = nvmap_pin_single(ctx->restore);
+ ctx->restore_size = context_restore_size;
+ ctx->save = context_save_buf;
+ ctx->save_phys = context_save_phys;
+ ctx->save_size = context_save_size;
+ ctx->save_incrs = 3;
+ ctx->restore_incrs = 1;
+ ctx->valid = false;
+ return 0;
+}
+
+static void ctx3d_deinit(struct nvhost_hwctx *ctx)
+{
+ nvmap_free(ctx->restore, ctx->save_cpu_data);
+}
+
+static void ctx3d_save_service(struct nvhost_hwctx *ctx)
+{
+ const struct hwctx_reginfo *r;
+ const struct hwctx_reginfo *rend;
+ unsigned int pending = 0;
+ u32 *ptr = (u32 *)ctx->save_cpu_data + RESTORE_BEGIN_SIZE;
+
+ BUG_ON(!ctx->save_cpu_data);
+
+ r = ctxsave_regs_3d;
+ rend = ctxsave_regs_3d + ARRAY_SIZE(ctxsave_regs_3d);
+ for ( ; r != rend; ++r) {
+ u32 count = r->count;
+ switch (r->type) {
+ case HWCTX_REGINFO_DIRECT:
+ ptr += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT:
+ ptr += RESTORE_INDOFFSET_SIZE + RESTORE_INDDATA_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_OFFSET:
+ ptr += RESTORE_INDOFFSET_SIZE;
+ continue; /* INDIRECT_DATA follows with real count */
+ case HWCTX_REGINFO_INDIRECT_DATA:
+ ptr += RESTORE_INDDATA_SIZE;
+ break;
+ }
+ restore_registers_from_fifo(ptr, count, ctx->channel, &pending);
+ ptr += count;
+ }
+
+ BUG_ON((u32)((ptr + RESTORE_END_SIZE) - (u32*)ctx->save_cpu_data)
+ != context_restore_size);
+
+ wmb();
+ nvhost_syncpt_cpu_incr(&ctx->channel->dev->syncpt, NVSYNCPT_3D);
+}
+
+
+/*** nvhost_3dctx ***/
+
+int __init nvhost_3dctx_handler_init(struct nvhost_hwctx_handler *h)
+{
+ setup_save(NULL, &context_save_size, &context_restore_size, 0, 0);
+
+ context_save_buf = nvmap_alloc(context_save_size * 4, 32,
+ NVMEM_HANDLE_WRITE_COMBINE,
+ (void**)&context_save_ptr);
+ if (IS_ERR_OR_NULL(context_save_buf))
+ return PTR_ERR(context_save_buf);
+ context_save_phys = nvmap_pin_single(context_save_buf);
+ setup_save(context_save_ptr, NULL, NULL, NVSYNCPT_3D, NVWAITBASE_3D);
+
+ h->init = ctx3d_init;
+ h->deinit = ctx3d_deinit;
+ h->save_service = ctx3d_save_service;
+ return 0;
+}
+
+/* TODO: [ahatala 2010-05-27] */
+int __init nvhost_mpectx_handler_init(struct nvhost_hwctx_handler *h)
+{
+ return 0;
+}
diff --git a/drivers/video/tegra/host/nvhost_acm.c b/drivers/video/tegra/host/nvhost_acm.c
new file mode 100644
index 000000000000..360774b72bb6
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_acm.c
@@ -0,0 +1,148 @@
+/*
+ * drivers/video/tegra/host/nvhost_acm.c
+ *
+ * Tegra Graphics Host Automatic Clock Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_acm.h"
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/device.h>
+
+#define ACM_TIMEOUT 1*HZ
+
+void nvhost_module_busy(struct nvhost_module *mod)
+{
+ mutex_lock(&mod->lock);
+ cancel_delayed_work(&mod->powerdown);
+ if ((atomic_inc_return(&mod->refcount) == 1) && !mod->powered) {
+ int i;
+ if (mod->parent)
+ nvhost_module_busy(mod->parent);
+ for (i = 0; i < mod->num_clks; i++)
+ clk_enable(mod->clk[i]);
+ if (mod->func)
+ mod->func(mod, NVHOST_POWER_ACTION_ON);
+ mod->powered = true;
+ }
+ mutex_unlock(&mod->lock);
+}
+
+static void powerdown_handler(struct work_struct *work)
+{
+ struct nvhost_module *mod;
+ mod = container_of(to_delayed_work(work), struct nvhost_module, powerdown);
+ mutex_lock(&mod->lock);
+ BUG_ON(!mod->powered);
+ if (atomic_read(&mod->refcount) == 0) {
+ int i;
+ if (mod->func)
+ mod->func(mod, NVHOST_POWER_ACTION_OFF);
+ for (i = 0; i < mod->num_clks; i++)
+ clk_disable(mod->clk[i]);
+ mod->powered = false;
+ if (mod->parent)
+ nvhost_module_idle(mod->parent);
+ }
+ mutex_unlock(&mod->lock);
+}
+
+void nvhost_module_idle_mult(struct nvhost_module *mod, int refs)
+{
+ bool kick = false;
+
+ mutex_lock(&mod->lock);
+ if (atomic_sub_return(refs, &mod->refcount) == 0) {
+ BUG_ON(!mod->powered);
+ schedule_delayed_work(&mod->powerdown, ACM_TIMEOUT);
+ kick = true;
+ }
+ mutex_unlock(&mod->lock);
+
+ if (kick)
+ wake_up(&mod->idle);
+}
+
+static const char *get_module_clk_id(const char *module, int index)
+{
+ if (index == 1 && strcmp(module, "gr2d") == 0)
+ return "epp";
+ else if (index == 0)
+ return module;
+ return NULL;
+}
+
+int nvhost_module_init(struct nvhost_module *mod, const char *name,
+ nvhost_modulef func, struct nvhost_module *parent,
+ struct device *dev)
+{
+ int i = 0;
+
+ mod->name = name;
+
+ while (i < NVHOST_MODULE_MAX_CLOCKS) {
+ long rate;
+ mod->clk[i] = clk_get(dev, get_module_clk_id(name, i));
+ if (IS_ERR_OR_NULL(mod->clk[i]))
+ break;
+ rate = clk_round_rate(mod->clk[i], UINT_MAX);
+ if (rate < 0) {
+ pr_err("%s: can't get maximum rate for %s\n",
+ __func__, name);
+ break;
+ }
+ clk_set_rate(mod->clk[i], rate);
+ i++;
+ }
+
+ mod->num_clks = i;
+ mod->func = func;
+ mod->parent = parent;
+ mod->powered = false;
+ mutex_init(&mod->lock);
+ init_waitqueue_head(&mod->idle);
+ INIT_DELAYED_WORK(&mod->powerdown, powerdown_handler);
+
+ return 0;
+}
+
+static int is_module_idle(struct nvhost_module *mod)
+{
+ int count;
+ mutex_lock(&mod->lock);
+ count = atomic_read(&mod->refcount);
+ mutex_unlock(&mod->lock);
+ return (count == 0);
+}
+
+void nvhost_module_suspend(struct nvhost_module *mod)
+{
+ wait_event(mod->idle, is_module_idle(mod));
+ flush_delayed_work(&mod->powerdown);
+ BUG_ON(mod->powered);
+}
+
+void nvhost_module_deinit(struct nvhost_module *mod)
+{
+ int i;
+ nvhost_module_suspend(mod);
+ for (i = 0; i < mod->num_clks; i++)
+ clk_put(mod->clk[i]);
+}
diff --git a/drivers/video/tegra/host/nvhost_acm.h b/drivers/video/tegra/host/nvhost_acm.h
new file mode 100644
index 000000000000..7cf70e2be098
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_acm.h
@@ -0,0 +1,75 @@
+/*
+ * drivers/video/tegra/host/nvhost_acm.h
+ *
+ * Tegra Graphics Host Automatic Clock Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_ACM_H
+#define __NVHOST_ACM_H
+
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+
+#define NVHOST_MODULE_MAX_CLOCKS 2
+
+struct nvhost_module;
+
+enum nvhost_power_action {
+ NVHOST_POWER_ACTION_OFF,
+ NVHOST_POWER_ACTION_ON,
+};
+
+typedef void (*nvhost_modulef)(struct nvhost_module *mod, enum nvhost_power_action action);
+
+struct nvhost_module {
+ const char *name;
+ nvhost_modulef func;
+ struct delayed_work powerdown;
+ struct clk *clk[NVHOST_MODULE_MAX_CLOCKS];
+ int num_clks;
+ struct mutex lock;
+ bool powered;
+ atomic_t refcount;
+ wait_queue_head_t idle;
+ struct nvhost_module *parent;
+};
+
+int nvhost_module_init(struct nvhost_module *mod, const char *name,
+ nvhost_modulef func, struct nvhost_module *parent,
+ struct device *dev);
+void nvhost_module_deinit(struct nvhost_module *mod);
+void nvhost_module_suspend(struct nvhost_module *mod);
+
+void nvhost_module_busy(struct nvhost_module *mod);
+void nvhost_module_idle_mult(struct nvhost_module *mod, int refs);
+
+static inline bool nvhost_module_powered(struct nvhost_module *mod)
+{
+ return mod->powered;
+}
+
+static inline void nvhost_module_idle(struct nvhost_module *mod)
+{
+ nvhost_module_idle_mult(mod, 1);
+
+}
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_cdma.c b/drivers/video/tegra/host/nvhost_cdma.c
new file mode 100644
index 000000000000..1807a1c0760b
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_cdma.c
@@ -0,0 +1,597 @@
+/*
+ * drivers/video/tegra/host/nvhost_cdma.c
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_cdma.h"
+#include "nvhost_dev.h"
+#include <asm/cacheflush.h>
+
+/*
+ * TODO:
+ * stats
+ * - for figuring out what to optimize further
+ * resizable push buffer & sync queue
+ * - some channels hardly need any, some channels (3d) could use more
+ */
+
+#define cdma_to_channel(cdma) container_of(cdma, struct nvhost_channel, cdma)
+#define cdma_to_dev(cdma) ((cdma_to_channel(cdma))->dev)
+
+/*
+ * push_buffer
+ *
+ * The push buffer is a circular array of words to be fetched by command DMA.
+ * Note that it works slightly differently to the sync queue; fence == cur
+ * means that the push buffer is full, not empty.
+ */
+
+// 8 bytes per slot. (This number does not include the final RESTART.)
+#define PUSH_BUFFER_SIZE (NVHOST_GATHER_QUEUE_SIZE * 8)
+
+static void destroy_push_buffer(struct push_buffer *pb);
+
+/**
+ * Reset to empty push buffer
+ */
+static void reset_push_buffer(struct push_buffer *pb)
+{
+ pb->fence = PUSH_BUFFER_SIZE - 8;
+ pb->cur = 0;
+}
+
+/**
+ * Init push buffer resources
+ */
+static int init_push_buffer(struct push_buffer *pb)
+{
+ pb->mem = NULL;
+ pb->mapped = NULL;
+ pb->phys = 0;
+ reset_push_buffer(pb);
+
+ /* allocate and map pushbuffer memory */
+ pb->mem = nvmap_alloc(PUSH_BUFFER_SIZE + 4, 32,
+ NVMEM_HANDLE_WRITE_COMBINE, (void**)&pb->mapped);
+ if (IS_ERR_OR_NULL(pb->mem)) {
+ pb->mem = NULL;
+ goto fail;
+ }
+
+ /* pin pushbuffer and get physical address */
+ pb->phys = nvmap_pin_single(pb->mem);
+
+ /* put the restart at the end of pushbuffer memory */
+ *(pb->mapped + (PUSH_BUFFER_SIZE >> 2)) = nvhost_opcode_restart(pb->phys);
+
+ return 0;
+
+fail:
+ destroy_push_buffer(pb);
+ return -ENOMEM;
+}
+
+/**
+ * Clean up push buffer resources
+ */
+static void destroy_push_buffer(struct push_buffer *pb)
+{
+ if (pb->mem) {
+ if (pb->phys != 0) {
+ nvmap_unpin(&pb->mem, 1);
+ pb->phys = 0;
+ }
+
+ nvmap_free(pb->mem, pb->mapped);
+ pb->mem = NULL;
+ pb->mapped = NULL;
+ }
+}
+
+/**
+ * Push two words to the push buffer
+ * Caller must ensure push buffer is not full
+ */
+static void push_to_push_buffer(struct push_buffer *pb, u32 op1, u32 op2)
+{
+ u32 cur = pb->cur;
+ u32 *p = (u32*)((u32)pb->mapped + cur);
+ BUG_ON(cur == pb->fence);
+ *(p++) = op1;
+ *(p++) = op2;
+ pb->cur = (cur + 8) & (PUSH_BUFFER_SIZE - 1);
+ /* printk("push_to_push_buffer: op1=%08x; op2=%08x; cur=%x\n", op1, op2, pb->cur); */
+}
+
+/**
+ * Pop a number of two word slots from the push buffer
+ * Caller must ensure push buffer is not empty
+ */
+static void pop_from_push_buffer(struct push_buffer *pb, unsigned int slots)
+{
+ pb->fence = (pb->fence + slots * 8) & (PUSH_BUFFER_SIZE - 1);
+}
+
+/**
+ * Return the number of two word slots free in the push buffer
+ */
+static u32 push_buffer_space(struct push_buffer *pb)
+{
+ return ((pb->fence - pb->cur) & (PUSH_BUFFER_SIZE - 1)) / 8;
+}
+
+static u32 push_buffer_putptr(struct push_buffer *pb)
+{
+ return pb->phys + pb->cur;
+}
+
+
+/* Sync Queue
+ *
+ * The sync queue is a circular buffer of u32s interpreted as:
+ * 0: SyncPointID
+ * 1: SyncPointValue
+ * 2: NumSlots (how many pushbuffer slots to free)
+ * 3: NumHandles
+ * 4..: NumHandles * nvmemhandle to unpin
+ *
+ * There's always one word unused, so (accounting for wrap):
+ * - Write == Read => queue empty
+ * - Write + 1 == Read => queue full
+ * The queue must not be left with less than SYNC_QUEUE_MIN_ENTRY words
+ * of space at the end of the array.
+ *
+ * We want to pass contiguous arrays of handles to NrRmMemUnpin, so arrays
+ * that would wrap at the end of the buffer will be split into two (or more)
+ * entries.
+ */
+
+/* Number of words needed to store an entry containing one handle */
+#define SYNC_QUEUE_MIN_ENTRY 5
+
+/**
+ * Reset to empty queue.
+ */
+static void reset_sync_queue(struct sync_queue *queue)
+{
+ queue->read = 0;
+ queue->write = 0;
+}
+
+/**
+ * Find the number of handles that can be stashed in the sync queue without
+ * waiting.
+ * 0 -> queue is full, must update to wait for some entries to be freed.
+ */
+static unsigned int sync_queue_space(struct sync_queue *queue)
+{
+ unsigned int read = queue->read;
+ unsigned int write = queue->write;
+ u32 size;
+
+ BUG_ON(read > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
+ BUG_ON(write > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
+
+ /*
+ * We can use all of the space up to the end of the buffer, unless the
+ * read position is within that space (the read position may advance
+ * asynchronously, but that can't take space away once we've seen it).
+ */
+ if (read > write) {
+ size = (read - 1) - write;
+ } else {
+ size = NVHOST_SYNC_QUEUE_SIZE - write;
+
+ /*
+ * If the read position is zero, it gets complicated. We can't
+ * use the last word in the buffer, because that would leave
+ * the queue empty.
+ * But also if we use too much we would not leave enough space
+ * for a single handle packet, and would have to wrap in
+ * add_to_sync_queue - also leaving write == read == 0,
+ * an empty queue.
+ */
+ if (read == 0)
+ size -= SYNC_QUEUE_MIN_ENTRY;
+ }
+
+ /*
+ * There must be room for an entry header and at least one handle,
+ * otherwise we report a full queue.
+ */
+ if (size < SYNC_QUEUE_MIN_ENTRY)
+ return 0;
+ /* Minimum entry stores one handle */
+ return (size - SYNC_QUEUE_MIN_ENTRY) + 1;
+}
+
+/**
+ * Add an entry to the sync queue.
+ */
+static void add_to_sync_queue(struct sync_queue *queue,
+ u32 sync_point_id, u32 sync_point_value,
+ u32 nr_slots,
+ struct nvmap_handle **handles, u32 nr_handles)
+{
+ u32 write = queue->write;
+ u32 *p = queue->buffer + write;
+ u32 size = 4 + nr_handles;
+
+ BUG_ON(sync_point_id == NVSYNCPT_INVALID);
+ BUG_ON(sync_queue_space(queue) < nr_handles);
+
+ write += size;
+ BUG_ON(write > NVHOST_SYNC_QUEUE_SIZE);
+
+ *p++ = sync_point_id;
+ *p++ = sync_point_value;
+ *p++ = nr_slots;
+ *p++ = nr_handles;
+ if (nr_handles)
+ memcpy(p, handles, nr_handles*sizeof(struct nvmap_handle *));
+
+ /* If there's not enough room for another entry, wrap to the start. */
+ if ((write + SYNC_QUEUE_MIN_ENTRY) > NVHOST_SYNC_QUEUE_SIZE) {
+ /*
+ * It's an error for the read position to be zero, as that
+ * would mean we emptied the queue while adding something.
+ */
+ BUG_ON(queue->read == 0);
+ write = 0;
+ }
+
+ queue->write = write;
+}
+
+/**
+ * Get a pointer to the next entry in the queue, or NULL if the queue is empty.
+ * Doesn't consume the entry.
+ */
+static u32 *sync_queue_head(struct sync_queue *queue)
+{
+ u32 read = queue->read;
+ u32 write = queue->write;
+
+ BUG_ON(read > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
+ BUG_ON(write > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
+
+ if (read == write)
+ return NULL;
+ return queue->buffer + read;
+}
+
+/**
+ * Advances to the next queue entry, if you want to consume it.
+ */
+static void
+dequeue_sync_queue_head(struct sync_queue *queue)
+{
+ u32 read = queue->read;
+ u32 size;
+
+ BUG_ON(read == queue->write);
+
+ size = 4 + queue->buffer[read + 3];
+
+ read += size;
+ BUG_ON(read > NVHOST_SYNC_QUEUE_SIZE);
+
+ /* If there's not enough room for another entry, wrap to the start. */
+ if ((read + SYNC_QUEUE_MIN_ENTRY) > NVHOST_SYNC_QUEUE_SIZE)
+ read = 0;
+
+ queue->read = read;
+}
+
+
+/*** Cdma internal stuff ***/
+
+/**
+ * Start channel DMA
+ */
+static void start_cdma(struct nvhost_cdma *cdma)
+{
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+ if (cdma->running)
+ return;
+
+ cdma->last_put = push_buffer_putptr(&cdma->push_buffer);
+
+ writel(nvhost_channel_dmactrl(true, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ /* set base, put, end pointer (all of memory) */
+ writel(0, chan_regs + HOST1X_CHANNEL_DMASTART);
+ writel(cdma->last_put, chan_regs + HOST1X_CHANNEL_DMAPUT);
+ writel(0xFFFFFFFF, chan_regs + HOST1X_CHANNEL_DMAEND);
+
+ /* reset GET */
+ writel(nvhost_channel_dmactrl(true, true, true),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ /* start the command DMA */
+ writel(nvhost_channel_dmactrl(false, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ cdma->running = true;
+}
+
+/**
+ * Kick channel DMA into action by writing its PUT offset (if it has changed)
+ */
+static void kick_cdma(struct nvhost_cdma *cdma)
+{
+ u32 put = push_buffer_putptr(&cdma->push_buffer);
+ if (put != cdma->last_put) {
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+ wmb();
+ writel(put, chan_regs + HOST1X_CHANNEL_DMAPUT);
+ cdma->last_put = put;
+ }
+}
+
+/**
+ * Return the status of the cdma's sync queue or push buffer for the given event
+ * - sq empty: returns 1 for empty, 0 for not empty (as in "1 empty queue" :-)
+ * - sq space: returns the number of handles that can be stored in the queue
+ * - pb space: returns the number of free slots in the channel's push buffer
+ * Must be called with the cdma lock held.
+ */
+static unsigned int cdma_status(struct nvhost_cdma *cdma, enum cdma_event event)
+{
+ switch (event) {
+ case CDMA_EVENT_SYNC_QUEUE_EMPTY:
+ return sync_queue_head(&cdma->sync_queue) ? 0 : 1;
+ case CDMA_EVENT_SYNC_QUEUE_SPACE:
+ return sync_queue_space(&cdma->sync_queue);
+ case CDMA_EVENT_PUSH_BUFFER_SPACE:
+ return push_buffer_space(&cdma->push_buffer);
+ default:
+ return 0;
+ }
+}
+
+/**
+ * Sleep (if necessary) until the requested event happens
+ * - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
+ * - Returns 1
+ * - CDMA_EVENT_SYNC_QUEUE_SPACE : there is space in the sync queue.
+ * - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
+ * - Return the amount of space (> 0)
+ * Must be called with the cdma lock held.
+ */
+static unsigned int wait_cdma(struct nvhost_cdma *cdma, enum cdma_event event)
+{
+ for (;;) {
+ unsigned int space = cdma_status(cdma, event);
+ if (space)
+ return space;
+
+ BUG_ON(cdma->event != CDMA_EVENT_NONE);
+ cdma->event = event;
+
+ mutex_unlock(&cdma->lock);
+ down(&cdma->sem);
+ mutex_lock(&cdma->lock);
+ }
+}
+
+/**
+ * For all sync queue entries that have already finished according to the
+ * current sync point registers:
+ * - unpin & unref their mems
+ * - pop their push buffer slots
+ * - remove them from the sync queue
+ * This is normally called from the host code's worker thread, but can be
+ * called manually if necessary.
+ * Must be called with the cdma lock held.
+ */
+static void update_cdma(struct nvhost_cdma *cdma)
+{
+ bool signal = false;
+ struct nvhost_dev *dev = cdma_to_dev(cdma);
+
+ BUG_ON(!cdma->running);
+
+ /*
+ * Walk the sync queue, reading the sync point registers as necessary,
+ * to consume as many sync queue entries as possible without blocking
+ */
+ for (;;) {
+ u32 syncpt_id, syncpt_val;
+ unsigned int nr_slots, nr_handles;
+ struct nvmap_handle **handles;
+ u32 *sync;
+
+ sync = sync_queue_head(&cdma->sync_queue);
+ if (!sync) {
+ if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
+ signal = true;
+ break;
+ }
+
+ syncpt_id = sync[0];
+ syncpt_val = sync[1];
+
+ BUG_ON(syncpt_id == NVSYNCPT_INVALID);
+
+ /* Check whether this syncpt has completed, and bail if not */
+ if (!nvhost_syncpt_min_cmp(&dev->syncpt, syncpt_id, syncpt_val))
+ break;
+
+ nr_slots = sync[2];
+ nr_handles = sync[3];
+ handles = (struct nvmap_handle **)(sync + 4);
+
+ /* Unpin the memory */
+ if (nr_handles)
+ nvmap_unpin(handles, nr_handles);
+
+ /* Pop push buffer slots */
+ if (nr_slots) {
+ pop_from_push_buffer(&cdma->push_buffer, nr_slots);
+ if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
+ signal = true;
+ }
+
+ dequeue_sync_queue_head(&cdma->sync_queue);
+ if (cdma->event == CDMA_EVENT_SYNC_QUEUE_SPACE)
+ signal = true;
+ }
+
+ /* Wake up CdmaWait() if the requested event happened */
+ if (signal) {
+ cdma->event = CDMA_EVENT_NONE;
+ up(&cdma->sem);
+ }
+}
+
+/**
+ * Create a cdma
+ */
+int nvhost_cdma_init(struct nvhost_cdma *cdma)
+{
+ int err;
+
+ mutex_init(&cdma->lock);
+ sema_init(&cdma->sem, 0);
+ cdma->event = CDMA_EVENT_NONE;
+ cdma->running = false;
+ err = init_push_buffer(&cdma->push_buffer);
+ if (err)
+ return err;
+ reset_sync_queue(&cdma->sync_queue);
+ return 0;
+}
+
+/**
+ * Destroy a cdma
+ */
+void nvhost_cdma_deinit(struct nvhost_cdma *cdma)
+{
+ BUG_ON(cdma->running);
+ destroy_push_buffer(&cdma->push_buffer);
+}
+
+void nvhost_cdma_stop(struct nvhost_cdma *cdma)
+{
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+ mutex_lock(&cdma->lock);
+ if (cdma->running) {
+ wait_cdma(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
+ writel(nvhost_channel_dmactrl(true, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+ cdma->running = false;
+ }
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Begin a cdma submit
+ */
+void nvhost_cdma_begin(struct nvhost_cdma *cdma)
+{
+ mutex_lock(&cdma->lock);
+ if (!cdma->running)
+ start_cdma(cdma);
+ cdma->slots_free = 0;
+ cdma->slots_used = 0;
+}
+
+/**
+ * Push two words into a push buffer slot
+ * Blocks as necessary if the push buffer is full.
+ */
+void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2)
+{
+ u32 slots_free = cdma->slots_free;
+ if (slots_free == 0) {
+ kick_cdma(cdma);
+ slots_free = wait_cdma(cdma, CDMA_EVENT_PUSH_BUFFER_SPACE);
+ }
+ cdma->slots_free = slots_free - 1;
+ cdma->slots_used++;
+ push_to_push_buffer(&cdma->push_buffer, op1, op2);
+}
+
+/**
+ * End a cdma submit
+ * Kick off DMA, add a contiguous block of memory handles to the sync queue,
+ * and a number of slots to be freed from the pushbuffer.
+ * Blocks as necessary if the sync queue is full.
+ * The handles for a submit must all be pinned at the same time, but they
+ * can be unpinned in smaller chunks.
+ */
+void nvhost_cdma_end(struct nvhost_cdma *cdma,
+ u32 sync_point_id, u32 sync_point_value,
+ struct nvmap_handle **handles, unsigned int nr_handles)
+{
+ kick_cdma(cdma);
+
+ while (nr_handles || cdma->slots_used) {
+ unsigned int count;
+ /*
+ * Wait until there's enough room in the
+ * sync queue to write something.
+ */
+ count = wait_cdma(cdma, CDMA_EVENT_SYNC_QUEUE_SPACE);
+
+ /* Add reloc entries to sync queue (as many as will fit) */
+ if (count > nr_handles)
+ count = nr_handles;
+ add_to_sync_queue(&cdma->sync_queue,
+ sync_point_id, sync_point_value,
+ cdma->slots_used, handles, count);
+ /* NumSlots only goes in the first packet */
+ cdma->slots_used = 0;
+ handles += count;
+ nr_handles -= count;
+ }
+
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Update cdma state according to current sync point values
+ */
+void nvhost_cdma_update(struct nvhost_cdma *cdma)
+{
+ mutex_lock(&cdma->lock);
+ update_cdma(cdma);
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Manually spin until all CDMA has finished. Used if an async update
+ * cannot be scheduled for any reason.
+ */
+void nvhost_cdma_flush(struct nvhost_cdma *cdma)
+{
+ mutex_lock(&cdma->lock);
+ while (sync_queue_head(&cdma->sync_queue)) {
+ update_cdma(cdma);
+ mutex_unlock(&cdma->lock);
+ schedule();
+ mutex_lock(&cdma->lock);
+ }
+ mutex_unlock(&cdma->lock);
+}
diff --git a/drivers/video/tegra/host/nvhost_cdma.h b/drivers/video/tegra/host/nvhost_cdma.h
new file mode 100644
index 000000000000..f0f40928fa4e
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_cdma.h
@@ -0,0 +1,99 @@
+/*
+ * drivers/video/tegra/host/nvhost_cdma.h
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_CDMA_H
+#define __NVHOST_CDMA_H
+
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/nvhost.h>
+#include <linux/nvmap.h>
+
+#include "nvhost_acm.h"
+
+/*
+ * cdma
+ *
+ * This is in charge of a host command DMA channel.
+ * Sends ops to a push buffer, and takes responsibility for unpinning
+ * (& possibly freeing) of memory after those ops have completed.
+ * Producer:
+ * begin
+ * push - send ops to the push buffer
+ * end - start command DMA and enqueue handles to be unpinned
+ * Consumer:
+ * update - call to update sync queue and push buffer, unpin memory
+ */
+
+/* Size of the sync queue. If it is too small, we won't be able to queue up
+ * many command buffers. If it is too large, we waste memory. */
+#define NVHOST_SYNC_QUEUE_SIZE 8192
+
+/* Number of gathers we allow to be queued up per channel. Must be a
+ power of two. Currently sized such that pushbuffer is 4KB (512*8B). */
+#define NVHOST_GATHER_QUEUE_SIZE 512
+
+struct push_buffer {
+ struct nvmap_handle *mem; /* handle to pushbuffer memory */
+ u32 *mapped; /* mapped pushbuffer memory */
+ u32 phys; /* physical address of pushbuffer */
+ u32 fence; /* index we've written */
+ u32 cur; /* index to write to */
+};
+
+struct sync_queue {
+ unsigned int read; /* read position within buffer */
+ unsigned int write; /* write position within buffer */
+ u32 buffer[NVHOST_SYNC_QUEUE_SIZE]; /* queue data */
+};
+
+enum cdma_event {
+ CDMA_EVENT_NONE, /* not waiting for any event */
+ CDMA_EVENT_SYNC_QUEUE_EMPTY, /* wait for empty sync queue */
+ CDMA_EVENT_SYNC_QUEUE_SPACE, /* wait for space in sync queue */
+ CDMA_EVENT_PUSH_BUFFER_SPACE /* wait for space in push buffer */
+};
+
+struct nvhost_cdma {
+ struct mutex lock; /* controls access to shared state */
+ struct semaphore sem; /* signalled when event occurs */
+ enum cdma_event event; /* event that sem is waiting for */
+ unsigned int slots_used; /* pb slots used in current submit */
+ unsigned int slots_free; /* pb slots free in current submit */
+ unsigned int last_put; /* last value written to DMAPUT */
+ struct push_buffer push_buffer; /* channel's push buffer */
+ struct sync_queue sync_queue; /* channel's sync queue */
+ bool running;
+};
+
+int nvhost_cdma_init(struct nvhost_cdma *cdma);
+void nvhost_cdma_deinit(struct nvhost_cdma *cdma);
+void nvhost_cdma_stop(struct nvhost_cdma *cdma);
+void nvhost_cdma_begin(struct nvhost_cdma *cdma);
+void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2);
+void nvhost_cdma_end(struct nvhost_cdma *cdma,
+ u32 sync_point_id, u32 sync_point_value,
+ struct nvmap_handle **handles, unsigned int nr_handles);
+void nvhost_cdma_update(struct nvhost_cdma *cdma);
+void nvhost_cdma_flush(struct nvhost_cdma *cdma);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_channel.c b/drivers/video/tegra/host/nvhost_channel.c
new file mode 100644
index 000000000000..7215e597699b
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_channel.c
@@ -0,0 +1,247 @@
+/*
+ * drivers/video/tegra/host/nvhost_channel.c
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_channel.h"
+#include "nvhost_dev.h"
+#include "nvhost_hwctx.h"
+#include <linux/platform_device.h>
+
+#define NVMODMUTEX_2D_FULL (1)
+#define NVMODMUTEX_2D_SIMPLE (2)
+#define NVMODMUTEX_2D_SB_A (3)
+#define NVMODMUTEX_2D_SB_B (4)
+#define NVMODMUTEX_3D (5)
+#define NVMODMUTEX_DISPLAYA (6)
+#define NVMODMUTEX_DISPLAYB (7)
+#define NVMODMUTEX_VI (8)
+#define NVMODMUTEX_DSI (9)
+
+static void power_2d(struct nvhost_module *mod, enum nvhost_power_action action);
+static void power_3d(struct nvhost_module *mod, enum nvhost_power_action action);
+static void power_mpe(struct nvhost_module *mod, enum nvhost_power_action action);
+
+static const struct nvhost_channeldesc channelmap[] = {
+{
+ /* channel 0 */
+ .name = "display",
+ .syncpts = BIT(NVSYNCPT_DISP0) | BIT(NVSYNCPT_DISP1) |
+ BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1),
+ .modulemutexes = BIT(NVMODMUTEX_DISPLAYA) | BIT(NVMODMUTEX_DISPLAYB),
+},
+{
+ /* channel 1 */
+ .name = "gr3d",
+ .syncpts = BIT(NVSYNCPT_3D),
+ .waitbases = BIT(NVWAITBASE_3D),
+ .modulemutexes = BIT(NVMODMUTEX_3D),
+ .class = NV_GRAPHICS_3D_CLASS_ID,
+ .power = power_3d,
+},
+{
+ /* channel 2 */
+ .name = "gr2d",
+ .syncpts = BIT(NVSYNCPT_2D_0) | BIT(NVSYNCPT_2D_1),
+ .waitbases = BIT(NVWAITBASE_2D_0) | BIT(NVWAITBASE_2D_1),
+ .modulemutexes = BIT(NVMODMUTEX_2D_FULL) | BIT(NVMODMUTEX_2D_SIMPLE) |
+ BIT(NVMODMUTEX_2D_SB_A) | BIT(NVMODMUTEX_2D_SB_B),
+ .power = power_2d,
+},
+{
+ /* channel 3 */
+ .name = "isp",
+ .syncpts = 0,
+},
+{
+ /* channel 4 */
+ .name = "vi",
+ .syncpts = BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_1) |
+ BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) |
+ BIT(NVSYNCPT_VI_ISP_4) | BIT(NVSYNCPT_VI_ISP_5),
+ .modulemutexes = BIT(NVMODMUTEX_VI),
+ .exclusive = true,
+},
+{
+ /* channel 5 */
+ .name = "mpe",
+ .syncpts = BIT(NVSYNCPT_MPE) | BIT(NVSYNCPT_MPE_EBM_EOF) |
+ BIT(NVSYNCPT_MPE_WR_SAFE),
+ .waitbases = BIT(NVWAITBASE_MPE),
+ .class = NV_VIDEO_ENCODE_MPEG_CLASS_ID,
+ .power = power_mpe,
+ .exclusive = true,
+},
+{
+ /* channel 6 */
+ .name = "dsi",
+ .syncpts = BIT(NVSYNCPT_DSI),
+ .modulemutexes = BIT(NVMODMUTEX_DSI),
+}};
+
+static inline void __iomem *channel_aperture(void __iomem *p, int ndx)
+{
+ ndx += NVHOST_CHANNEL_BASE;
+ p += NV_HOST1X_CHANNEL0_BASE;
+ p += ndx * NV_HOST1X_CHANNEL_MAP_SIZE_BYTES;
+ return p;
+}
+
+int __init nvhost_channel_init(struct nvhost_channel *ch,
+ struct nvhost_dev *dev, int index)
+{
+ BUILD_BUG_ON(NVHOST_NUMCHANNELS != ARRAY_SIZE(channelmap));
+
+ ch->dev = dev;
+ ch->desc = &channelmap[index];
+ ch->aperture = channel_aperture(dev->aperture, index);
+ mutex_init(&ch->reflock);
+ mutex_init(&ch->submitlock);
+
+ return nvhost_hwctx_handler_init(&ch->ctxhandler, ch->desc->name);
+}
+
+struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch)
+{
+ int err = 0;
+ mutex_lock(&ch->reflock);
+ if (ch->refcount == 0) {
+ err = nvhost_module_init(&ch->mod, ch->desc->name,
+ ch->desc->power, &ch->dev->mod,
+ &ch->dev->pdev->dev);
+ if (!err) {
+ err = nvhost_cdma_init(&ch->cdma);
+ if (err)
+ nvhost_module_deinit(&ch->mod);
+ }
+ } else if (ch->desc->exclusive) {
+ err = -EBUSY;
+ }
+ if (!err) {
+ ch->refcount++;
+ }
+ mutex_unlock(&ch->reflock);
+
+ return err ? ERR_PTR(err) : ch;
+}
+
+void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx)
+{
+ if (ctx) {
+ mutex_lock(&ch->submitlock);
+ if (ch->cur_ctx == ctx)
+ ch->cur_ctx = NULL;
+ mutex_unlock(&ch->submitlock);
+ }
+
+ mutex_lock(&ch->reflock);
+ if (ch->refcount == 1) {
+ nvhost_module_deinit(&ch->mod);
+ /* cdma may already be stopped, that's ok */
+ nvhost_cdma_stop(&ch->cdma);
+ nvhost_cdma_deinit(&ch->cdma);
+ }
+ ch->refcount--;
+ mutex_unlock(&ch->reflock);
+}
+
+void nvhost_channel_suspend(struct nvhost_channel *ch)
+{
+ mutex_lock(&ch->reflock);
+ BUG_ON(nvhost_module_powered(&ch->mod));
+ if (ch->refcount)
+ nvhost_cdma_stop(&ch->cdma);
+ mutex_unlock(&ch->reflock);
+}
+
+void nvhost_channel_submit(
+ struct nvhost_channel *ch,
+ struct nvhost_op_pair *ops,
+ int num_pairs,
+ struct nvhost_cpuinterrupt *intrs,
+ int num_intrs,
+ struct nvmap_handle **unpins,
+ int num_unpins,
+ u32 syncpt_id,
+ u32 syncpt_val)
+{
+ int i;
+ struct nvhost_op_pair* p;
+
+ /* schedule interrupts */
+ for (i = 0; i < num_intrs; i++) {
+ nvhost_intr_add_action(&ch->dev->intr, syncpt_id, intrs[i].syncpt_val,
+ NVHOST_INTR_ACTION_CTXSAVE, intrs[i].intr_data, NULL);
+ }
+
+ /* begin a CDMA submit */
+ nvhost_cdma_begin(&ch->cdma);
+
+ /* push ops */
+ for (i = 0, p = ops; i < num_pairs; i++, p++)
+ nvhost_cdma_push(&ch->cdma, p->op1, p->op2);
+
+ /* end CDMA submit & stash pinned hMems into sync queue for later cleanup */
+ nvhost_cdma_end(&ch->cdma, syncpt_id, syncpt_val, unpins, num_unpins);
+}
+
+static void power_2d(struct nvhost_module *mod, enum nvhost_power_action action)
+{
+ /* TODO: [ahatala 2010-06-17] reimplement EPP hang war */
+ if (action == NVHOST_POWER_ACTION_OFF) {
+ /* TODO: [ahatala 2010-06-17] reset EPP */
+ }
+}
+
+static void power_3d(struct nvhost_module *mod, enum nvhost_power_action action)
+{
+ struct nvhost_channel *ch = container_of(mod, struct nvhost_channel, mod);
+
+ if (action == NVHOST_POWER_ACTION_OFF) {
+ mutex_lock(&ch->submitlock);
+ if (ch->cur_ctx) {
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ struct nvhost_op_pair save;
+ struct nvhost_cpuinterrupt ctxsw;
+ u32 syncval;
+ syncval = nvhost_syncpt_incr_max(&ch->dev->syncpt,
+ NVSYNCPT_3D,
+ ch->cur_ctx->save_incrs);
+ save.op1 = nvhost_opcode_gather(0, ch->cur_ctx->save_size);
+ save.op2 = ch->cur_ctx->save_phys;
+ ctxsw.intr_data = ch->cur_ctx;
+ ctxsw.syncpt_val = syncval - 1;
+ nvhost_channel_submit(ch, &save, 1, &ctxsw, 1, NULL, 0, NVSYNCPT_3D, syncval);
+ ch->cur_ctx->last_access_id = NVSYNCPT_3D;
+ ch->cur_ctx->last_access_value = syncval;
+ ch->cur_ctx->valid = true;
+ ch->cur_ctx = NULL;
+ nvhost_intr_add_action(&ch->dev->intr, NVSYNCPT_3D, syncval,
+ NVHOST_INTR_ACTION_WAKEUP, &wq, NULL);
+ wait_event(wq, nvhost_syncpt_min_cmp(&ch->dev->syncpt, NVSYNCPT_3D, syncval));
+ nvhost_cdma_update(&ch->cdma);
+ }
+ mutex_unlock(&ch->submitlock);
+ }
+}
+
+static void power_mpe(struct nvhost_module *mod, enum nvhost_power_action action)
+{
+}
diff --git a/drivers/video/tegra/host/nvhost_channel.h b/drivers/video/tegra/host/nvhost_channel.h
new file mode 100644
index 000000000000..a32f97266c02
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_channel.h
@@ -0,0 +1,94 @@
+/*
+ * drivers/video/tegra/host/nvhost_channel.h
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_CHANNEL_H
+#define __NVHOST_CHANNEL_H
+
+#include "nvhost_cdma.h"
+#include "nvhost_acm.h"
+#include "nvhost_hwctx.h"
+
+#include <linux/cdev.h>
+#include <linux/io.h>
+
+#define NVHOST_CHANNEL_BASE 0
+#define NVHOST_NUMCHANNELS (NV_HOST1X_CHANNELS - 1)
+#define NVHOST_MAX_GATHERS 512
+#define NVHOST_MAX_HANDLES 1280
+
+struct nvhost_dev;
+
+struct nvhost_channeldesc {
+ const char *name;
+ nvhost_modulef power;
+ u32 syncpts;
+ u32 waitbases;
+ u32 modulemutexes;
+ u32 class;
+ bool exclusive;
+};
+
+struct nvhost_channel {
+ int refcount;
+ struct mutex reflock;
+ struct mutex submitlock;
+ void __iomem *aperture;
+ struct nvhost_dev *dev;
+ const struct nvhost_channeldesc *desc;
+ struct nvhost_hwctx *cur_ctx;
+ struct device *node;
+ struct cdev cdev;
+ struct nvhost_hwctx_handler ctxhandler;
+ struct nvhost_module mod;
+ struct nvhost_cdma cdma;
+};
+
+struct nvhost_op_pair {
+ u32 op1;
+ u32 op2;
+};
+
+struct nvhost_cpuinterrupt {
+ u32 syncpt_val;
+ void *intr_data;
+};
+
+int nvhost_channel_init(
+ struct nvhost_channel *ch,
+ struct nvhost_dev *dev, int index);
+
+void nvhost_channel_submit(
+ struct nvhost_channel *ch,
+ struct nvhost_op_pair *ops,
+ int num_pairs,
+ struct nvhost_cpuinterrupt *intrs,
+ int num_intrs,
+ struct nvmap_handle **unpins,
+ int num_unpins,
+ u32 syncpt_id,
+ u32 syncpt_val);
+
+struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch);
+void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx);
+void nvhost_channel_suspend(struct nvhost_channel *ch);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_cpuaccess.c b/drivers/video/tegra/host/nvhost_cpuaccess.c
new file mode 100644
index 000000000000..efc4c07ba12d
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_cpuaccess.c
@@ -0,0 +1,139 @@
+/*
+ * drivers/video/tegra/host/nvhost_cpuaccess.c
+ *
+ * Tegra Graphics Host Cpu Register Access
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_cpuaccess.h"
+#include "nvhost_dev.h"
+#include <linux/string.h>
+
+#define cpuaccess_to_dev(ctx) container_of(ctx, struct nvhost_dev, cpuaccess)
+
+static const char *module_resource_names[NVHOST_MODULE_NUM] = {
+ "display",
+ "display2",
+ "vi",
+ "isp",
+ "mpe"
+};
+
+int nvhost_cpuaccess_init(struct nvhost_cpuaccess *ctx,
+ struct platform_device *pdev)
+{
+ int i;
+ for (i = 0; i < NVHOST_MODULE_NUM; i++) {
+ struct resource *mem;
+ mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ module_resource_names[i]);
+ if (!mem) {
+ if (i == NVHOST_MODULE_DISPLAY_A ||
+ i == NVHOST_MODULE_DISPLAY_B) {
+ ctx->reg_mem[i] = NULL;
+ ctx->regs[i] = NULL;
+ continue;
+ }
+ dev_err(&pdev->dev, "missing module memory resource\n");
+ return -ENXIO;
+ }
+ ctx->reg_mem[i] = request_mem_region(mem->start,
+ resource_size(mem), pdev->name);
+ if (!ctx->reg_mem[i]) {
+ dev_err(&pdev->dev, "failed to get module memory\n");
+ return -ENXIO;
+ }
+ ctx->regs[i] = ioremap(mem->start, resource_size(mem));
+ if (!ctx->regs[i]) {
+ dev_err(&pdev->dev, "failed to map module registers\n");
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+void nvhost_cpuaccess_deinit(struct nvhost_cpuaccess *ctx)
+{
+ int i;
+ for (i = 0; i < NVHOST_MODULE_NUM; i++) {
+ if (ctx->regs[i])
+ iounmap(ctx->regs[i]);
+ if (ctx->reg_mem[i])
+ release_resource(ctx->reg_mem[i]);
+ }
+}
+
+int nvhost_mutex_try_lock(struct nvhost_cpuaccess *ctx, unsigned int idx)
+{
+ struct nvhost_dev *dev = cpuaccess_to_dev(ctx);
+ void __iomem *sync_regs = dev->sync_aperture;
+ u32 reg;
+
+ /* mlock registers returns 0 when the lock is aquired.
+ * writing 0 clears the lock. */
+ nvhost_module_busy(&dev->mod);
+ reg = readl(sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
+ if (reg) {
+ nvhost_module_idle(&dev->mod);
+ return -ERESTARTSYS;
+ }
+ return 0;
+}
+
+void nvhost_mutex_unlock(struct nvhost_cpuaccess *ctx, unsigned int idx)
+{
+ struct nvhost_dev *dev = cpuaccess_to_dev(ctx);
+ void __iomem *sync_regs = dev->sync_aperture;
+ writel(0, sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
+ nvhost_module_idle(&dev->mod);
+}
+
+void nvhost_read_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, void *values)
+{
+ struct nvhost_dev *dev = cpuaccess_to_dev(ctx);
+ void __iomem *p = ctx->regs[module] + offset;
+ u32* out = (u32*)values;
+ BUG_ON(size & 3);
+ size >>= 2;
+ nvhost_module_busy(&dev->mod);
+ while (size--) {
+ *(out++) = readl(p);
+ p += 4;
+ }
+ rmb();
+ nvhost_module_idle(&dev->mod);
+}
+
+void nvhost_write_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, const void *values)
+{
+ struct nvhost_dev *dev = cpuaccess_to_dev(ctx);
+ void __iomem *p = ctx->regs[module] + offset;
+ const u32* in = (const u32*)values;
+ BUG_ON(size & 3);
+ size >>= 2;
+ nvhost_module_busy(&dev->mod);
+ while (size--) {
+ writel(*(in++), p);
+ p += 4;
+ }
+ wmb();
+ nvhost_module_idle(&dev->mod);
+}
diff --git a/drivers/video/tegra/host/nvhost_cpuaccess.h b/drivers/video/tegra/host/nvhost_cpuaccess.h
new file mode 100644
index 000000000000..919e47c97b9e
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_cpuaccess.h
@@ -0,0 +1,71 @@
+/*
+ * drivers/video/tegra/host/nvhost_cpuaccess.h
+ *
+ * Tegra Graphics Host Cpu Register Access
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_CPUACCESS_H
+#define __NVHOST_CPUACCESS_H
+
+#include "nvhost_hardware.h"
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+enum nvhost_module_id {
+ NVHOST_MODULE_DISPLAY_A = 0,
+ NVHOST_MODULE_DISPLAY_B,
+ NVHOST_MODULE_VI,
+ NVHOST_MODULE_ISP,
+ NVHOST_MODULE_MPE,
+#if 0
+ /* TODO: [ahatala 2010-07-02] find out if these are needed */
+ NVHOST_MODULE_FUSE,
+ NVHOST_MODULE_APB_MISC,
+ NVHOST_MODULE_CLK_RESET,
+#endif
+ NVHOST_MODULE_NUM
+};
+
+struct nvhost_cpuaccess {
+ struct resource *reg_mem[NVHOST_MODULE_NUM];
+ void __iomem *regs[NVHOST_MODULE_NUM];
+};
+
+int nvhost_cpuaccess_init(struct nvhost_cpuaccess *ctx,
+ struct platform_device *pdev);
+
+void nvhost_cpuaccess_deinit(struct nvhost_cpuaccess *ctx);
+
+int nvhost_mutex_try_lock(struct nvhost_cpuaccess *ctx, unsigned int idx);
+
+void nvhost_mutex_unlock(struct nvhost_cpuaccess *ctx, unsigned int idx);
+
+static inline bool nvhost_access_module_regs(
+ struct nvhost_cpuaccess *ctx, u32 module)
+{
+ return (module < NVHOST_MODULE_NUM) && ctx->reg_mem[module];
+}
+
+void nvhost_read_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, void *values);
+
+void nvhost_write_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, const void *values);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_dev.c b/drivers/video/tegra/host/nvhost_dev.c
new file mode 100644
index 000000000000..6a5dbcec58ea
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_dev.c
@@ -0,0 +1,779 @@
+/*
+ * drivers/video/tegra/host/nvhost_dev.c
+ *
+ * Tegra Graphics Host Driver Entrypoint
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_dev.h"
+
+#include <linux/nvhost.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+#include <asm/io.h>
+
+#define DRIVER_NAME "tegra_grhost"
+#define IFACE_NAME "nvhost"
+
+static int nvhost_major = NVHOST_MAJOR;
+static int nvhost_minor = NVHOST_CHANNEL_BASE;
+
+struct nvhost_channel_userctx {
+ struct nvhost_channel *ch;
+ struct nvhost_hwctx *hwctx;
+ struct file *nvmapctx;
+ u32 syncpt_id;
+ u32 syncpt_incrs;
+ u32 cmdbufs_pending;
+ u32 relocs_pending;
+ struct nvmap_handle *gather_mem;
+ struct nvhost_op_pair *gathers;
+ int num_gathers;
+ int pinarray_size;
+ struct nvmap_pinarray_elem pinarray[NVHOST_MAX_HANDLES];
+ struct nvmap_handle *unpinarray[NVHOST_MAX_HANDLES];
+ /* hw context (if needed) */
+};
+
+struct nvhost_ctrl_userctx {
+ struct nvhost_dev *dev;
+ u32 mod_locks[NV_HOST1X_NB_MLOCKS];
+};
+
+static int nvhost_channelrelease(struct inode *inode, struct file *filp)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+ filp->private_data = NULL;
+
+ nvhost_putchannel(priv->ch, priv->hwctx);
+ if (priv->hwctx) {
+ if (priv->hwctx->valid)
+ nvhost_syncpt_wait(&priv->ch->dev->syncpt,
+ priv->hwctx->last_access_id,
+ priv->hwctx->last_access_value);
+ priv->ch->ctxhandler.deinit(priv->hwctx);
+ }
+ if (priv->gather_mem)
+ nvmap_free(priv->gather_mem, priv->gathers);
+ if (priv->nvmapctx)
+ fput(priv->nvmapctx);
+ kfree(priv);
+ return 0;
+}
+
+static int nvhost_channelopen(struct inode *inode, struct file *filp)
+{
+ struct nvhost_channel_userctx *priv;
+ struct nvhost_channel *ch;
+ size_t hwctx_mem = 0;
+ size_t alloc_size = 0;
+
+ ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
+ ch = nvhost_getchannel(ch);
+ if (IS_ERR(ch))
+ return PTR_ERR(ch);
+
+ alloc_size += sizeof(*priv);
+ if (ch->ctxhandler.init) {
+ hwctx_mem = alloc_size;
+ alloc_size += sizeof(struct nvhost_hwctx);
+ }
+ priv = kzalloc(alloc_size, GFP_KERNEL);
+ if (!priv) {
+ nvhost_putchannel(ch, NULL);
+ return -ENOMEM;
+ }
+ filp->private_data = priv;
+ priv->ch = ch;
+ priv->gather_mem = nvmap_alloc(
+ sizeof(struct nvhost_op_pair) * NVHOST_MAX_GATHERS, 32,
+ NVMEM_HANDLE_CACHEABLE, (void**)&priv->gathers);
+ if (IS_ERR_OR_NULL(priv->gather_mem))
+ goto fail;
+ if (ch->ctxhandler.init) {
+ priv->hwctx = (struct nvhost_hwctx *)(((u8*)priv) + hwctx_mem);
+ priv->hwctx->channel = ch;
+ if (ch->ctxhandler.init(priv->hwctx) < 0) {
+ priv->hwctx = NULL;
+ goto fail;
+ }
+ }
+
+ return 0;
+fail:
+ nvhost_channelrelease(inode, filp);
+ return -ENOMEM;
+}
+
+static void add_gather(struct nvhost_channel_userctx *ctx, int idx,
+ struct nvmap_handle *mem, u32 words, u32 offset)
+{
+ struct nvmap_pinarray_elem *pin;
+ pin = &ctx->pinarray[ctx->pinarray_size++];
+ pin->patch_mem = ctx->gather_mem;
+ pin->patch_offset = (idx * sizeof(struct nvhost_op_pair)) +
+ offsetof(struct nvhost_op_pair, op2);
+ pin->pin_mem = mem;
+ pin->pin_offset = offset;
+ ctx->gathers[idx].op1 = nvhost_opcode_gather(0, words);
+}
+
+static void reset_submit(struct nvhost_channel_userctx *ctx)
+{
+ ctx->cmdbufs_pending = 0;
+ ctx->relocs_pending = 0;
+}
+
+static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+ size_t remaining = count;
+ int err = 0;
+
+ while (remaining) {
+ size_t consumed;
+ if (!priv->relocs_pending && !priv->cmdbufs_pending) {
+ consumed = sizeof(struct nvhost_submit_hdr);
+ if (remaining < consumed)
+ break;
+ if (copy_from_user(&priv->syncpt_id, buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ if (!priv->cmdbufs_pending) {
+ err = -EFAULT;
+ break;
+ }
+ /* leave room for ctx switch */
+ priv->num_gathers = 2;
+ priv->pinarray_size = 0;
+ } else if (priv->cmdbufs_pending) {
+ struct nvhost_cmdbuf cmdbuf;
+ consumed = sizeof(cmdbuf);
+ if (remaining < consumed)
+ break;
+ if (copy_from_user(&cmdbuf, buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ add_gather(priv, priv->num_gathers++,
+ (struct nvmap_handle *)cmdbuf.mem,
+ cmdbuf.words, cmdbuf.offset);
+ priv->cmdbufs_pending--;
+ } else if (priv->relocs_pending) {
+ int numrelocs = remaining / sizeof(struct nvhost_reloc);
+ if (!numrelocs)
+ break;
+ numrelocs = min_t(int, numrelocs, priv->relocs_pending);
+ consumed = numrelocs * sizeof(struct nvhost_reloc);
+ if (copy_from_user(&priv->pinarray[priv->pinarray_size],
+ buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ priv->pinarray_size += numrelocs;
+ priv->relocs_pending -= numrelocs;
+ } else {
+ err = -EFAULT;
+ break;
+ }
+ remaining -= consumed;
+ buf += consumed;
+ }
+
+ if (err < 0) {
+ dev_err(&priv->ch->dev->pdev->dev, "channel write error\n");
+ reset_submit(priv);
+ return err;
+ }
+
+ return (count - remaining);
+}
+
+static int nvhost_ioctl_channel_flush(
+ struct nvhost_channel_userctx *ctx,
+ struct nvhost_get_param_args *args)
+{
+ struct nvhost_cpuinterrupt ctxsw;
+ int gather_idx = 2;
+ int num_intrs = 0;
+ u32 syncval;
+ int num_unpin;
+ int err;
+
+ if (ctx->relocs_pending || ctx->cmdbufs_pending) {
+ reset_submit(ctx);
+ dev_err(&ctx->ch->dev->pdev->dev, "channel submit out of sync\n");
+ return -EFAULT;
+ }
+ if (!ctx->nvmapctx) {
+ dev_err(&ctx->ch->dev->pdev->dev, "no nvmap context set\n");
+ return -EFAULT;
+ }
+ if (ctx->num_gathers <= 2)
+ return 0;
+
+ /* keep module powered */
+ nvhost_module_busy(&ctx->ch->mod);
+
+ /* pin mem handles and patch physical addresses */
+ err = nvmap_pin_array(ctx->nvmapctx, ctx->pinarray, ctx->pinarray_size,
+ ctx->unpinarray, &num_unpin, true);
+ if (err) {
+ dev_warn(&ctx->ch->dev->pdev->dev, "nvmap_pin_array failed: %d\n", err);
+ nvhost_module_idle(&ctx->ch->mod);
+ return err;
+ }
+
+ /* get submit lock */
+ err = mutex_lock_interruptible(&ctx->ch->submitlock);
+ if (err) {
+ nvmap_unpin(ctx->unpinarray, num_unpin);
+ nvhost_module_idle(&ctx->ch->mod);
+ return err;
+ }
+
+ /* context switch */
+ if (ctx->ch->cur_ctx != ctx->hwctx) {
+ struct nvhost_hwctx *hw = ctx->hwctx;
+ if (hw && hw->valid) {
+ gather_idx--;
+ ctx->gathers[gather_idx].op1 =
+ nvhost_opcode_gather(0, hw->restore_size);
+ ctx->gathers[gather_idx].op2 = hw->restore_phys;
+ ctx->syncpt_incrs += hw->restore_incrs;
+ }
+ hw = ctx->ch->cur_ctx;
+ if (hw) {
+ gather_idx--;
+ ctx->gathers[gather_idx].op1 =
+ nvhost_opcode_gather(0, hw->save_size);
+ ctx->gathers[gather_idx].op2 = hw->save_phys;
+ ctx->syncpt_incrs += hw->save_incrs;
+ num_intrs = 1;
+ ctxsw.syncpt_val = hw->save_incrs - 1;
+ ctxsw.intr_data = hw;
+ }
+ }
+
+ /* add a setclass for modules that require it */
+ if (gather_idx == 2 && ctx->ch->desc->class) {
+ gather_idx--;
+ ctx->gathers[gather_idx].op1 =
+ nvhost_opcode_setclass(ctx->ch->desc->class, 0, 0);
+ ctx->gathers[gather_idx].op2 = NVHOST_OPCODE_NOOP;
+ }
+
+ /* get absolute sync value */
+ if (BIT(ctx->syncpt_id) & NVSYNCPTS_CLIENT_MANAGED)
+ syncval = nvhost_syncpt_set_max(&ctx->ch->dev->syncpt,
+ ctx->syncpt_id, ctx->syncpt_incrs);
+ else
+ syncval = nvhost_syncpt_incr_max(&ctx->ch->dev->syncpt,
+ ctx->syncpt_id, ctx->syncpt_incrs);
+
+ /* patch absolute syncpt value into interrupt triggers */
+ ctxsw.syncpt_val += syncval - ctx->syncpt_incrs;
+
+ nvhost_channel_submit(ctx->ch, &ctx->gathers[gather_idx],
+ ctx->num_gathers - gather_idx, &ctxsw, num_intrs,
+ ctx->unpinarray, num_unpin, ctx->syncpt_id, syncval);
+
+ /* schedule a submit complete interrupt */
+ nvhost_intr_add_action(&ctx->ch->dev->intr, ctx->syncpt_id, syncval,
+ NVHOST_INTR_ACTION_SUBMIT_COMPLETE, ctx->ch, NULL);
+
+ /* update current context */
+ if (ctx->ch->cur_ctx != ctx->hwctx) {
+ struct nvhost_hwctx *hw = ctx->ch->cur_ctx;
+ if (hw) {
+ hw->last_access_id = ctx->syncpt_id;
+ hw->last_access_value = syncval;
+ hw->valid = true;
+ }
+ hw = ctx->hwctx;
+ hw->last_access_id = ctx->syncpt_id;
+ hw->last_access_value = syncval;
+ ctx->ch->cur_ctx = hw;
+ }
+ mutex_unlock(&ctx->ch->submitlock);
+ args->value = syncval;
+ return 0;
+}
+
+static long nvhost_channelctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+ u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
+ int err = 0;
+
+ if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
+ (_IOC_NR(cmd) == 0) ||
+ (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST))
+ return -EFAULT;
+
+ BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE);
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case NVHOST_IOCTL_CHANNEL_FLUSH:
+ err = nvhost_ioctl_channel_flush(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->desc->syncpts;
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->desc->waitbases;
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->desc->modulemutexes;
+ break;
+ case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
+ {
+ int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
+ struct file *newctx = NULL;
+ if (fd) {
+ newctx = fget(fd);
+ if (!newctx) {
+ err = -EFAULT;
+ break;
+ }
+ err = nvmap_validate_file(newctx);
+ if (err) {
+ fput(newctx);
+ break;
+ }
+ }
+ if (priv->nvmapctx)
+ fput(priv->nvmapctx);
+ priv->nvmapctx = newctx;
+ break;
+ }
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
+ err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
+
+ return err;
+}
+
+static struct file_operations nvhost_channelops = {
+ .owner = THIS_MODULE,
+ .release = nvhost_channelrelease,
+ .open = nvhost_channelopen,
+ .write = nvhost_channelwrite,
+ .unlocked_ioctl = nvhost_channelctl
+};
+
+static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
+{
+ struct nvhost_ctrl_userctx *priv = filp->private_data;
+ int i;
+
+ filp->private_data = NULL;
+ if (priv->mod_locks[0])
+ nvhost_module_idle(&priv->dev->mod);
+ for (i = 1; i < NV_HOST1X_NB_MLOCKS; i++)
+ if (priv->mod_locks[i])
+ nvhost_mutex_unlock(&priv->dev->cpuaccess, i);
+ kfree(priv);
+ return 0;
+}
+
+static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
+{
+ struct nvhost_dev *host = container_of(inode->i_cdev, struct nvhost_dev, cdev);
+ struct nvhost_ctrl_userctx *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = host;
+ filp->private_data = priv;
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_read(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_read_args *args)
+{
+ if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
+ return -EINVAL;
+ args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_incr(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_incr_args *args)
+{
+ if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
+ return -EINVAL;
+ nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_wait(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_wait_args *args)
+{
+ u32 timeout;
+ if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
+ return -EINVAL;
+ if (args->timeout == NVHOST_NO_TIMEOUT)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = (u32)msecs_to_jiffies(args->timeout);
+
+ return nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
+ args->thresh, timeout);
+}
+
+static int nvhost_ioctl_ctrl_module_mutex(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_module_mutex_args *args)
+{
+ int err = 0;
+ if (args->id >= NV_HOST1X_SYNCPT_NB_PTS ||
+ args->lock > 1)
+ return -EINVAL;
+
+ if (args->lock && !ctx->mod_locks[args->id]) {
+ if (args->id == 0)
+ nvhost_module_busy(&ctx->dev->mod);
+ else
+ err = nvhost_mutex_try_lock(&ctx->dev->cpuaccess, args->id);
+ if (!err)
+ ctx->mod_locks[args->id] = 1;
+ }
+ else if (!args->lock && ctx->mod_locks[args->id]) {
+ if (args->id == 0)
+ nvhost_module_idle(&ctx->dev->mod);
+ else
+ nvhost_mutex_unlock(&ctx->dev->cpuaccess, args->id);
+ ctx->mod_locks[args->id] = 0;
+ }
+ return err;
+}
+
+static int nvhost_ioctl_ctrl_module_regrdwr(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_module_regrdwr_args *args)
+{
+ u32 num_offsets = args->num_offsets;
+ u32 *offsets = args->offsets;
+ void *values = args->values;
+ u32 vals[64];
+
+ if (!nvhost_access_module_regs(&ctx->dev->cpuaccess, args->id) ||
+ (num_offsets == 0))
+ return -EINVAL;
+
+ while (num_offsets--) {
+ u32 remaining = args->block_size;
+ u32 offs;
+ if (get_user(offs, offsets))
+ return -EFAULT;
+ offsets++;
+ while (remaining) {
+ u32 batch = min(remaining, 64*sizeof(u32));
+ if (args->write) {
+ if (copy_from_user(vals, values, batch))
+ return -EFAULT;
+ nvhost_write_module_regs(&ctx->dev->cpuaccess,
+ args->id, offs, batch, vals);
+ } else {
+ nvhost_read_module_regs(&ctx->dev->cpuaccess,
+ args->id, offs, batch, vals);
+ if (copy_to_user(values, vals, batch))
+ return -EFAULT;
+ }
+ remaining -= batch;
+ offs += batch;
+ values += batch;
+ }
+ }
+
+ return 0;
+}
+
+static long nvhost_ctrlctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvhost_ctrl_userctx *priv = filp->private_data;
+ u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
+ int err = 0;
+
+ if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
+ (_IOC_NR(cmd) == 0) ||
+ (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST))
+ return -EFAULT;
+
+ BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE);
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case NVHOST_IOCTL_CTRL_SYNCPT_READ:
+ err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
+ err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
+ err = nvhost_ioctl_ctrl_syncpt_wait(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
+ err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
+ err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
+ break;
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
+ err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
+
+ return err;
+}
+
+static struct file_operations nvhost_ctrlops = {
+ .owner = THIS_MODULE,
+ .release = nvhost_ctrlrelease,
+ .open = nvhost_ctrlopen,
+ .unlocked_ioctl = nvhost_ctrlctl
+};
+
+static void power_host(struct nvhost_module *mod, enum nvhost_power_action action)
+{
+ struct nvhost_dev *dev = container_of(mod, struct nvhost_dev, mod);
+
+ if (action == NVHOST_POWER_ACTION_ON) {
+ nvhost_intr_start(&dev->intr, clk_get_rate(mod->clk[0]));
+ nvhost_syncpt_reset(&dev->syncpt);
+ } else if (action == NVHOST_POWER_ACTION_OFF) {
+ int i;
+ for (i = 0; i < NVHOST_NUMCHANNELS; i++)
+ nvhost_channel_suspend(&dev->channels[i]);
+ nvhost_syncpt_save(&dev->syncpt);
+ nvhost_intr_stop(&dev->intr);
+ }
+}
+
+static int __init nvhost_user_init(struct nvhost_dev *host)
+{
+ int i, err, devno;
+
+ host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
+ if (IS_ERR(host->nvhost_class)) {
+ err = PTR_ERR(host->nvhost_class);
+ dev_err(&host->pdev->dev, "failed to create class\n");
+ goto fail;
+ }
+
+ if (nvhost_major) {
+ devno = MKDEV(nvhost_major, nvhost_minor);
+ err = register_chrdev_region(devno, NVHOST_NUMCHANNELS + 1, IFACE_NAME);
+ } else {
+ err = alloc_chrdev_region(&devno, nvhost_minor,
+ NVHOST_NUMCHANNELS + 1, IFACE_NAME);
+ nvhost_major = MAJOR(devno);
+ }
+ if (err < 0) {
+ dev_err(&host->pdev->dev, "failed to reserve chrdev region\n");
+ goto fail;
+ }
+
+ for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
+ struct nvhost_channel *ch = &host->channels[i];
+
+ if (!strcmp(ch->desc->name, "display") &&
+ !nvhost_access_module_regs(&host->cpuaccess,
+ NVHOST_MODULE_DISPLAY_A))
+ continue;
+
+ cdev_init(&ch->cdev, &nvhost_channelops);
+ ch->cdev.owner = THIS_MODULE;
+
+ devno = MKDEV(nvhost_major, nvhost_minor + i);
+ err = cdev_add(&ch->cdev, devno, 1);
+ if (err < 0) {
+ dev_err(&host->pdev->dev, "failed to add chan %i cdev\n", i);
+ goto fail;
+ }
+ ch->node = device_create(host->nvhost_class, NULL, devno, NULL,
+ IFACE_NAME "-%s", ch->desc->name);
+ if (IS_ERR(ch->node)) {
+ err = PTR_ERR(ch->node);
+ dev_err(&host->pdev->dev, "failed to create chan %i device\n", i);
+ goto fail;
+ }
+ }
+
+ cdev_init(&host->cdev, &nvhost_ctrlops);
+ host->cdev.owner = THIS_MODULE;
+ devno = MKDEV(nvhost_major, nvhost_minor + NVHOST_NUMCHANNELS);
+ err = cdev_add(&host->cdev, devno, 1);
+ if (err < 0)
+ goto fail;
+ host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
+ IFACE_NAME "-ctrl");
+ if (IS_ERR(host->ctrl)) {
+ err = PTR_ERR(host->ctrl);
+ dev_err(&host->pdev->dev, "failed to create ctrl device\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return err;
+}
+
+static int __init nvhost_probe(struct platform_device *pdev)
+{
+ struct nvhost_dev *host;
+ struct resource *regs, *intr0, *intr1;
+ int i, err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ intr0 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ intr1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+
+ if (!regs || !intr0 || !intr1) {
+ dev_err(&pdev->dev, "missing required platform resources\n");
+ return -ENXIO;
+ }
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->pdev = pdev;
+
+ host->reg_mem = request_mem_region(regs->start,
+ resource_size(regs), pdev->name);
+ if (!host->reg_mem) {
+ dev_err(&pdev->dev, "failed to get host register memory\n");
+ err = -ENXIO;
+ goto fail;
+ }
+ host->aperture = ioremap(regs->start, resource_size(regs));
+ if (!host->aperture) {
+ dev_err(&pdev->dev, "failed to remap host registers\n");
+ err = -ENXIO;
+ goto fail;
+ }
+ host->sync_aperture = host->aperture +
+ (NV_HOST1X_CHANNEL0_BASE +
+ HOST1X_CHANNEL_SYNC_REG_BASE);
+
+ for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
+ struct nvhost_channel *ch = &host->channels[i];
+ err = nvhost_channel_init(ch, host, i);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to init channel %d\n", i);
+ goto fail;
+ }
+ }
+
+ err = nvhost_cpuaccess_init(&host->cpuaccess, pdev);
+ if (err) goto fail;
+ err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
+ if (err) goto fail;
+ err = nvhost_user_init(host);
+ if (err) goto fail;
+ err = nvhost_module_init(&host->mod, "host1x", power_host, NULL, &pdev->dev);
+ if (err) goto fail;
+
+ platform_set_drvdata(pdev, host);
+
+ dev_info(&pdev->dev, "initialized\n");
+ return 0;
+
+fail:
+ /* TODO: [ahatala 2010-05-04] */
+ kfree(host);
+ return err;
+}
+
+static int __exit nvhost_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int nvhost_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct nvhost_dev *host = platform_get_drvdata(pdev);
+ dev_info(&pdev->dev, "suspending\n");
+ nvhost_module_suspend(&host->mod);
+ dev_info(&pdev->dev, "suspended\n");
+ return 0;
+}
+
+static struct platform_driver nvhost_driver = {
+ .remove = __exit_p(nvhost_remove),
+ .suspend = nvhost_suspend,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME
+ }
+};
+
+static int __init nvhost_mod_init(void)
+{
+ return platform_driver_probe(&nvhost_driver, nvhost_probe);
+}
+
+static void __exit nvhost_mod_exit(void)
+{
+ platform_driver_unregister(&nvhost_driver);
+}
+
+module_init(nvhost_mod_init);
+module_exit(nvhost_mod_exit);
+
+MODULE_AUTHOR("NVIDIA");
+MODULE_DESCRIPTION("Graphics host driver for Tegra products");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform-nvhost");
diff --git a/drivers/video/tegra/host/nvhost_dev.h b/drivers/video/tegra/host/nvhost_dev.h
new file mode 100644
index 000000000000..385dad8d867f
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_dev.h
@@ -0,0 +1,50 @@
+/*
+ * drivers/video/tegra/host/nvhost_dev.h
+ *
+ * Tegra Graphics Host Driver Entrypoint
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_DEV_H
+#define __NVHOST_DEV_H
+
+#include "nvhost_acm.h"
+#include "nvhost_syncpt.h"
+#include "nvhost_intr.h"
+#include "nvhost_cpuaccess.h"
+#include "nvhost_channel.h"
+#include "nvhost_hardware.h"
+
+#define NVHOST_MAJOR 0 /* dynamic */
+
+struct nvhost_dev {
+ void __iomem *aperture;
+ void __iomem *sync_aperture;
+ struct resource *reg_mem;
+ struct platform_device *pdev;
+ struct class *nvhost_class;
+ struct cdev cdev;
+ struct device *ctrl;
+ struct nvhost_syncpt syncpt;
+ struct nvhost_cpuaccess cpuaccess;
+ struct nvhost_intr intr;
+ struct nvhost_module mod;
+ struct nvhost_channel channels[NVHOST_NUMCHANNELS];
+};
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_hardware.h b/drivers/video/tegra/host/nvhost_hardware.h
new file mode 100644
index 000000000000..661f775e6be3
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_hardware.h
@@ -0,0 +1,219 @@
+/*
+ * drivers/video/tegra/host/nvhost_hardware.h
+ *
+ * Tegra Graphics Host Register Offsets
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_HARDWARE_H
+#define __NVHOST_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+/* class ids */
+enum {
+ NV_HOST1X_CLASS_ID = 0x1,
+ NV_VIDEO_ENCODE_MPEG_CLASS_ID = 0x20,
+ NV_GRAPHICS_3D_CLASS_ID = 0x60
+};
+
+
+/* channel registers */
+#define NV_HOST1X_CHANNELS 8
+#define NV_HOST1X_CHANNEL0_BASE 0
+#define NV_HOST1X_CHANNEL_MAP_SIZE_BYTES 16384
+
+enum {
+ HOST1X_CHANNEL_FIFOSTAT = 0x00,
+ HOST1X_CHANNEL_INDDATA = 0x0c,
+ HOST1X_CHANNEL_DMASTART = 0x14,
+ HOST1X_CHANNEL_DMAPUT = 0x18,
+ HOST1X_CHANNEL_DMAEND = 0x20,
+ HOST1X_CHANNEL_DMACTRL = 0x24
+};
+
+static inline unsigned nvhost_channel_fifostat_outfentries(u32 reg)
+{
+ return (reg >> 24) & 0x1f;
+}
+
+static inline u32 nvhost_channel_dmactrl(bool stop, bool get_rst, bool init_get)
+{
+ u32 v = stop ? 1 : 0;
+ if (get_rst)
+ v |= 2;
+ if (init_get)
+ v |= 4;
+ return v;
+}
+
+
+/* sync registers */
+#define NV_HOST1X_SYNCPT_NB_PTS 32
+#define NV_HOST1X_SYNCPT_NB_BASES 8
+#define NV_HOST1X_NB_MLOCKS 16
+#define HOST1X_CHANNEL_SYNC_REG_BASE 12288
+
+enum {
+ HOST1X_SYNC_INTMASK = 0x4,
+ HOST1X_SYNC_INTC0MASK = 0x8,
+ HOST1X_SYNC_HINTSTATUS = 0x20,
+ HOST1X_SYNC_HINTMASK = 0x24,
+ HOST1X_SYNC_HINTSTATUS_EXT = 0x28,
+ HOST1X_SYNC_HINTMASK_EXT = 0x2c,
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS = 0x40,
+ HOST1X_SYNC_SYNCPT_THRESH_CPU1_INT_STATUS = 0x48,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE = 0x60,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0 = 0x68,
+ HOST1X_SYNC_USEC_CLK = 0x1a4,
+ HOST1X_SYNC_CTXSW_TIMEOUT_CFG = 0x1a8,
+ HOST1X_SYNC_IP_BUSY_TIMEOUT = 0x1bc,
+ HOST1X_SYNC_IP_READ_TIMEOUT_ADDR = 0x1c0,
+ HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR = 0x1c4,
+ HOST1X_SYNC_MLOCK_0 = 0x2c0,
+ HOST1X_SYNC_MLOCK_OWNER_0 = 0x340,
+ HOST1X_SYNC_SYNCPT_0 = 0x400,
+ HOST1X_SYNC_SYNCPT_INT_THRESH_0 = 0x500,
+ HOST1X_SYNC_SYNCPT_BASE_0 = 0x600,
+ HOST1X_SYNC_SYNCPT_CPU_INCR = 0x700
+};
+
+static inline bool nvhost_sync_hintstatus_ext_ip_read_int(u32 reg)
+{
+ return (reg & BIT(30)) != 0;
+}
+
+static inline bool nvhost_sync_hintstatus_ext_ip_write_int(u32 reg)
+{
+ return (reg & BIT(31)) != 0;
+}
+
+static inline bool nvhost_sync_mlock_owner_ch_owns(u32 reg)
+{
+ return (reg & BIT(0)) != 0;
+}
+
+static inline bool nvhost_sync_mlock_owner_cpu_owns(u32 reg)
+{
+ return (reg & BIT(1)) != 0;
+}
+
+static inline unsigned int nvhost_sync_mlock_owner_owner_chid(u32 reg)
+{
+ return (reg >> 8) & 0xf;
+}
+
+
+/* host class */
+enum {
+ NV_CLASS_HOST_INCR_SYNCPT = 0x0,
+ NV_CLASS_HOST_WAIT_SYNCPT = 0x8,
+ NV_CLASS_HOST_WAIT_SYNCPT_BASE = 0x9,
+ NV_CLASS_HOST_INCR_SYNCPT_BASE = 0xc,
+ NV_CLASS_HOST_INDOFF = 0x2d,
+ NV_CLASS_HOST_INDDATA = 0x2e
+};
+
+static inline u32 nvhost_class_host_wait_syncpt_base(
+ unsigned indx, unsigned base_indx, unsigned offset)
+{
+ return (indx << 24) | (base_indx << 16) | offset;
+}
+
+static inline u32 nvhost_class_host_incr_syncpt_base(
+ unsigned base_indx, unsigned offset)
+{
+ return (base_indx << 24) | offset;
+}
+
+enum {
+ NV_HOST_MODULE_HOST1X = 0,
+ NV_HOST_MODULE_MPE = 1,
+ NV_HOST_MODULE_GR3D = 6
+};
+
+static inline u32 nvhost_class_host_indoff_reg_write(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = (0xf << 28) | (mod_id << 18) | (offset << 2);
+ if (auto_inc)
+ v |= BIT(27);
+ return v;
+}
+
+static inline u32 nvhost_class_host_indoff_reg_read(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = (mod_id << 18) | (offset << 2) | 1;
+ if (auto_inc)
+ v |= BIT(27);
+ return v;
+}
+
+
+/* cdma opcodes */
+static inline u32 nvhost_opcode_setclass(
+ unsigned class_id, unsigned offset, unsigned mask)
+{
+ return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 nvhost_opcode_incr(unsigned offset, unsigned count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_nonincr(unsigned offset, unsigned count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_mask(unsigned offset, unsigned mask)
+{
+ return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 nvhost_opcode_imm(unsigned offset, unsigned value)
+{
+ return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 nvhost_opcode_restart(unsigned address)
+{
+ return (5 << 28) | (address >> 4);
+}
+
+static inline u32 nvhost_opcode_gather(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_gather_nonincr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 nvhost_opcode_gather_incr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define NVHOST_OPCODE_NOOP nvhost_opcode_nonincr(0, 0)
+
+#endif /* __NVHOST_HARDWARE_H */
diff --git a/drivers/video/tegra/host/nvhost_hwctx.h b/drivers/video/tegra/host/nvhost_hwctx.h
new file mode 100644
index 000000000000..d1a8930e0ecb
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_hwctx.h
@@ -0,0 +1,86 @@
+/*
+ * drivers/video/tegra/host/nvhost_hwctx.h
+ *
+ * Tegra Graphics Host Hardware Context Interface
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_HWCTX_H
+#define __NVHOST_HWCTX_H
+
+#include <linux/string.h>
+#include <linux/nvhost.h>
+#include <linux/nvmap.h>
+
+struct nvhost_channel;
+
+struct nvhost_hwctx {
+ struct nvhost_channel *channel;
+ u32 last_access_id;
+ u32 last_access_value;
+ bool valid;
+
+ struct nvmap_handle *save;
+ u32 save_phys;
+ u32 save_size;
+ u32 save_incrs;
+ void *save_cpu_data;
+
+ struct nvmap_handle *restore;
+ u32 restore_phys;
+ u32 restore_size;
+ u32 restore_incrs;
+};
+
+struct nvhost_hwctx_handler {
+ int (*init) (struct nvhost_hwctx *ctx);
+ void (*deinit) (struct nvhost_hwctx *ctx);
+ void (*save_service) (struct nvhost_hwctx *ctx);
+};
+
+int nvhost_3dctx_handler_init(struct nvhost_hwctx_handler *h);
+int nvhost_mpectx_handler_init(struct nvhost_hwctx_handler *h);
+
+static inline int nvhost_hwctx_handler_init(
+ struct nvhost_hwctx_handler *h,
+ const char *module)
+{
+ if (strcmp(module, "gr3d") == 0)
+ return nvhost_3dctx_handler_init(h);
+ else if (strcmp(module, "mpe") == 0)
+ return nvhost_mpectx_handler_init(h);
+
+ return 0;
+}
+
+struct hwctx_reginfo {
+ unsigned int offset:12;
+ unsigned int count:16;
+ unsigned int type:2;
+};
+
+enum {
+ HWCTX_REGINFO_DIRECT = 0,
+ HWCTX_REGINFO_INDIRECT,
+ HWCTX_REGINFO_INDIRECT_OFFSET,
+ HWCTX_REGINFO_INDIRECT_DATA
+};
+
+#define HWCTX_REGINFO(offset, count, type) {offset, count, HWCTX_REGINFO_##type}
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_intr.c b/drivers/video/tegra/host/nvhost_intr.c
new file mode 100644
index 000000000000..3d101b221b71
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_intr.c
@@ -0,0 +1,561 @@
+/*
+ * drivers/video/tegra/host/nvhost_intr.c
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_intr.h"
+#include "nvhost_dev.h"
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#define intr_to_dev(x) container_of(x, struct nvhost_dev, intr)
+
+
+/*** HW host sync management ***/
+
+void init_host_sync(void __iomem *sync_regs)
+{
+ /* disable the ip_busy_timeout. this prevents write drops, etc.
+ * there's no real way to recover from a hung client anyway.
+ */
+ writel(0, sync_regs + HOST1X_SYNC_IP_BUSY_TIMEOUT);
+
+ /* increase the auto-ack timout to the maximum value. 2d will hang
+ * otherwise on ap20.
+ */
+ writel(0xff, sync_regs + HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
+}
+
+void set_host_clocks_per_microsecond(void __iomem *sync_regs, u32 cpm)
+{
+ /* write microsecond clock register */
+ writel(cpm, sync_regs + HOST1X_SYNC_USEC_CLK);
+}
+
+static void set_syncpt_threshold(void __iomem *sync_regs, u32 id, u32 thresh)
+{
+ thresh &= 0xffff;
+ writel(thresh, sync_regs + (HOST1X_SYNC_SYNCPT_INT_THRESH_0 + id * 4));
+}
+
+static void enable_syncpt_interrupt(void __iomem *sync_regs, u32 id)
+{
+ writel(BIT(id), sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0);
+}
+
+void disable_all_syncpt_interrupts(void __iomem *sync_regs)
+{
+ /* disable interrupts for both cpu's */
+ writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
+
+ /* clear status for both cpu's */
+ writel(0xfffffffful, sync_regs +
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
+ writel(0xfffffffful, sync_regs +
+ HOST1X_SYNC_SYNCPT_THRESH_CPU1_INT_STATUS);
+}
+
+
+/*** Wait list management ***/
+
+struct nvhost_waitlist {
+ struct list_head list;
+ struct kref refcount;
+ u32 thresh;
+ enum nvhost_intr_action action;
+ atomic_t state;
+ void *data;
+ int count;
+};
+
+enum waitlist_state
+{
+ WLS_PENDING,
+ WLS_REMOVED,
+ WLS_CANCELLED,
+ WLS_HANDLED
+};
+
+static void waiter_release(struct kref *kref)
+{
+ kfree(container_of(kref, struct nvhost_waitlist, refcount));
+}
+
+/**
+ * add a waiter to a waiter queue, sorted by threshold
+ * returns true if it was added at the head of the queue
+ */
+static bool add_waiter_to_queue(struct nvhost_waitlist *waiter,
+ struct list_head *queue)
+{
+ struct nvhost_waitlist *pos;
+ u32 thresh = waiter->thresh;
+
+ list_for_each_entry_reverse(pos, queue, list)
+ if ((s32)(pos->thresh - thresh) <= 0) {
+ list_add(&waiter->list, &pos->list);
+ return false;
+ }
+
+ list_add(&waiter->list, queue);
+ return true;
+}
+
+/**
+ * run through a waiter queue for a single sync point ID
+ * and gather all completed waiters into lists by actions
+ */
+static void remove_completed_waiters(struct list_head *head, u32 sync,
+ struct list_head completed[NVHOST_INTR_ACTION_COUNT])
+{
+ struct list_head *dest;
+ struct nvhost_waitlist *waiter, *next, *prev;
+
+ list_for_each_entry_safe(waiter, next, head, list) {
+ if ((s32)(waiter->thresh - sync) > 0)
+ break;
+
+ dest = completed + waiter->action;
+
+ /* consolidate submit cleanups */
+ if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE
+ && !list_empty(dest)) {
+ prev = list_entry(dest->prev,
+ struct nvhost_waitlist, list);
+ if (prev->data == waiter->data) {
+ prev->count++;
+ dest = NULL;
+ }
+ }
+
+ /* PENDING->REMOVED or CANCELLED->HANDLED */
+ if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
+ list_del(&waiter->list);
+ kref_put(&waiter->refcount, waiter_release);
+ } else {
+ list_move_tail(&waiter->list, dest);
+ }
+ }
+}
+
+void reset_threshold_interrupt(struct list_head *head,
+ unsigned int id, void __iomem *sync_regs)
+{
+ u32 thresh = list_first_entry(head,
+ struct nvhost_waitlist, list)->thresh;
+
+ set_syncpt_threshold(sync_regs, id, thresh);
+ enable_syncpt_interrupt(sync_regs, id);
+}
+
+
+static void action_submit_complete(struct nvhost_waitlist *waiter)
+{
+ struct nvhost_channel *channel = waiter->data;
+ int nr_completed = waiter->count;
+
+ nvhost_cdma_update(&channel->cdma);
+ nvhost_module_idle_mult(&channel->mod, nr_completed);
+}
+
+static void action_ctxsave(struct nvhost_waitlist *waiter)
+{
+ struct nvhost_hwctx *hwctx = waiter->data;
+ struct nvhost_channel *channel = hwctx->channel;
+
+ channel->ctxhandler.save_service(hwctx);
+}
+
+static void action_wakeup(struct nvhost_waitlist *waiter)
+{
+ wait_queue_head_t *wq = waiter->data;
+
+ wake_up(wq);
+}
+
+static void action_wakeup_interruptible(struct nvhost_waitlist *waiter)
+{
+ wait_queue_head_t *wq = waiter->data;
+
+ wake_up_interruptible(wq);
+}
+
+typedef void (*action_handler)(struct nvhost_waitlist *waiter);
+
+static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = {
+ action_submit_complete,
+ action_ctxsave,
+ action_wakeup,
+ action_wakeup_interruptible,
+};
+
+static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT])
+{
+ struct list_head *head = completed;
+ int i;
+
+ for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) {
+ action_handler handler = action_handlers[i];
+ struct nvhost_waitlist *waiter, *next;
+
+ list_for_each_entry_safe(waiter, next, head, list) {
+ list_del(&waiter->list);
+ handler(waiter);
+ atomic_set(&waiter->state, WLS_HANDLED);
+ smp_wmb();
+ kref_put(&waiter->refcount, waiter_release);
+ }
+ }
+}
+
+/**
+ * Remove & handle all waiters that have completed for the given syncpt
+ */
+int process_wait_list(struct nvhost_intr_syncpt *syncpt,
+ u32 threshold, void __iomem *sync_regs)
+{
+ struct list_head completed[NVHOST_INTR_ACTION_COUNT];
+ unsigned int i;
+ int empty;
+
+ for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i)
+ INIT_LIST_HEAD(completed + i);
+
+ spin_lock(&syncpt->lock);
+
+ remove_completed_waiters(&syncpt->wait_head, threshold, completed);
+
+ empty = list_empty(&syncpt->wait_head);
+ if (!empty)
+ reset_threshold_interrupt(&syncpt->wait_head,
+ syncpt->id, sync_regs);
+
+ spin_unlock(&syncpt->lock);
+
+ run_handlers(completed);
+
+ return empty;
+}
+
+
+/*** host syncpt interrupt service functions ***/
+
+/**
+ * Sync point threshold interrupt service function
+ * Handles sync point threshold triggers, in interrupt context
+ */
+static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
+{
+ struct nvhost_intr_syncpt *syncpt = dev_id;
+ unsigned int id = syncpt->id;
+ struct nvhost_intr *intr = container_of(syncpt, struct nvhost_intr,
+ syncpt[id]);
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+
+ writel(BIT(id),
+ sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
+ writel(BIT(id),
+ sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * Sync point threshold interrupt service thread function
+ * Handles sync point threshold triggers, in thread context
+ */
+static irqreturn_t syncpt_thresh_fn(int irq, void *dev_id)
+{
+ struct nvhost_intr_syncpt *syncpt = dev_id;
+ unsigned int id = syncpt->id;
+ struct nvhost_intr *intr = container_of(syncpt, struct nvhost_intr,
+ syncpt[id]);
+ struct nvhost_dev *dev = intr_to_dev(intr);
+
+ (void)process_wait_list(syncpt,
+ nvhost_syncpt_update_min(&dev->syncpt, id),
+ dev->sync_aperture);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * lazily request a syncpt's irq
+ */
+static int request_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
+{
+ int err;
+
+ if (syncpt->irq_requested)
+ return 0;
+
+ err = request_threaded_irq(syncpt->irq,
+ syncpt_thresh_isr, syncpt_thresh_fn,
+ 0, syncpt->thresh_irq_name, syncpt);
+ if (err)
+ return err;
+
+ syncpt->irq_requested = 1;
+ return 0;
+}
+
+/**
+ * free a syncpt's irq. syncpt interrupt should be disabled first.
+ */
+static void free_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
+{
+ if (syncpt->irq_requested) {
+ free_irq(syncpt->irq, syncpt);
+ syncpt->irq_requested = 0;
+ }
+}
+
+
+/*** host general interrupt service functions ***/
+
+/**
+ * Host general interrupt service function
+ * Handles read / write failures
+ */
+static irqreturn_t host1x_isr(int irq, void *dev_id)
+{
+ struct nvhost_intr *intr = dev_id;
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+ u32 stat;
+ u32 ext_stat;
+ u32 addr;
+
+ stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS);
+ ext_stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
+
+ if (nvhost_sync_hintstatus_ext_ip_read_int(ext_stat)) {
+ addr = readl(sync_regs + HOST1X_SYNC_IP_READ_TIMEOUT_ADDR);
+ pr_err("Host read timeout at address %x\n", addr);
+ }
+
+ if (nvhost_sync_hintstatus_ext_ip_write_int(ext_stat)) {
+ addr = readl(sync_regs + HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR);
+ pr_err("Host write timeout at address %x\n", addr);
+ }
+
+ writel(ext_stat, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
+ writel(stat, sync_regs + HOST1X_SYNC_HINTSTATUS);
+
+ return IRQ_HANDLED;
+}
+
+static int request_host_general_irq(struct nvhost_intr *intr)
+{
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+ int err;
+
+ if (intr->host_general_irq_requested)
+ return 0;
+
+ /* master disable for general (not syncpt) host interrupts */
+ writel(0, sync_regs + HOST1X_SYNC_INTMASK);
+
+ /* clear status & extstatus */
+ writel(0xfffffffful, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
+ writel(0xfffffffful, sync_regs + HOST1X_SYNC_HINTSTATUS);
+
+ err = request_irq(intr->host_general_irq, host1x_isr, 0,
+ "host_status", intr);
+ if (err)
+ return err;
+
+ /* enable extra interrupt sources IP_READ_INT and IP_WRITE_INT */
+ writel(BIT(30) | BIT(31), sync_regs + HOST1X_SYNC_HINTMASK_EXT);
+
+ /* enable extra interrupt sources */
+ writel(BIT(31), sync_regs + HOST1X_SYNC_HINTMASK);
+
+ /* enable host module interrupt to CPU0 */
+ writel(BIT(0), sync_regs + HOST1X_SYNC_INTC0MASK);
+
+ /* master enable for general (not syncpt) host interrupts */
+ writel(BIT(0), sync_regs + HOST1X_SYNC_INTMASK);
+
+ intr->host_general_irq_requested = true;
+
+ return err;
+}
+
+static void free_host_general_irq(struct nvhost_intr *intr)
+{
+ if (intr->host_general_irq_requested) {
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+
+ /* master disable for general (not syncpt) host interrupts */
+ writel(0, sync_regs + HOST1X_SYNC_INTMASK);
+
+ free_irq(intr->host_general_irq, intr);
+ intr->host_general_irq_requested = false;
+ }
+}
+
+
+/*** Main API ***/
+
+int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
+ enum nvhost_intr_action action, void *data,
+ void **ref)
+{
+ struct nvhost_waitlist *waiter;
+ struct nvhost_intr_syncpt *syncpt;
+ void __iomem *sync_regs;
+ int queue_was_empty;
+ int err;
+
+ /* create and initialize a new waiter */
+ waiter = kmalloc(sizeof(*waiter), GFP_KERNEL);
+ if (!waiter)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&waiter->list);
+ kref_init(&waiter->refcount);
+ if (ref)
+ kref_get(&waiter->refcount);
+ waiter->thresh = thresh;
+ waiter->action = action;
+ atomic_set(&waiter->state, WLS_PENDING);
+ waiter->data = data;
+ waiter->count = 1;
+
+ BUG_ON(id >= NV_HOST1X_SYNCPT_NB_PTS);
+ syncpt = intr->syncpt + id;
+ sync_regs = intr_to_dev(intr)->sync_aperture;
+
+ spin_lock(&syncpt->lock);
+
+ /* lazily request irq for this sync point */
+ if (!syncpt->irq_requested) {
+ spin_unlock(&syncpt->lock);
+
+ mutex_lock(&intr->mutex);
+ err = request_syncpt_irq(syncpt);
+ mutex_unlock(&intr->mutex);
+
+ if (err) {
+ kfree(waiter);
+ return err;
+ }
+
+ spin_lock(&syncpt->lock);
+ }
+
+ queue_was_empty = list_empty(&syncpt->wait_head);
+
+ if (add_waiter_to_queue(waiter, &syncpt->wait_head)) {
+ /* added at head of list - new threshold value */
+ set_syncpt_threshold(sync_regs, id, thresh);
+
+ /* added as first waiter - enable interrupt */
+ if (queue_was_empty)
+ enable_syncpt_interrupt(sync_regs, id);
+ }
+
+ spin_unlock(&syncpt->lock);
+
+ if (ref)
+ *ref = waiter;
+ return 0;
+}
+
+void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref)
+{
+ struct nvhost_waitlist *waiter = ref;
+
+ while (atomic_cmpxchg(&waiter->state,
+ WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED)
+ schedule();
+
+ kref_put(&waiter->refcount, waiter_release);
+}
+
+
+/*** Init & shutdown ***/
+
+int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
+{
+ unsigned int id;
+ struct nvhost_intr_syncpt *syncpt;
+
+ mutex_init(&intr->mutex);
+ intr->host_general_irq = irq_gen;
+ intr->host_general_irq_requested = false;
+
+ for (id = 0, syncpt = intr->syncpt;
+ id < NV_HOST1X_SYNCPT_NB_PTS;
+ ++id, ++syncpt) {
+ syncpt->id = id;
+ syncpt->irq = irq_sync + id;
+ syncpt->irq_requested = 0;
+ spin_lock_init(&syncpt->lock);
+ INIT_LIST_HEAD(&syncpt->wait_head);
+ snprintf(syncpt->thresh_irq_name,
+ sizeof(syncpt->thresh_irq_name),
+ "host_sp_%02d", id);
+ }
+
+ return 0;
+}
+
+void nvhost_intr_deinit(struct nvhost_intr *intr)
+{
+ nvhost_intr_stop(intr);
+}
+
+void nvhost_intr_start(struct nvhost_intr *intr, u32 hz)
+{
+ struct nvhost_dev *dev = intr_to_dev(intr);
+ void __iomem *sync_regs = dev->sync_aperture;
+
+ mutex_lock(&intr->mutex);
+
+ init_host_sync(sync_regs);
+ set_host_clocks_per_microsecond(sync_regs, (hz + 1000000 - 1)/1000000);
+
+ request_host_general_irq(intr);
+
+ mutex_unlock(&intr->mutex);
+}
+
+void nvhost_intr_stop(struct nvhost_intr *intr)
+{
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+ unsigned int id;
+ struct nvhost_intr_syncpt *syncpt;
+
+ mutex_lock(&intr->mutex);
+
+ disable_all_syncpt_interrupts(sync_regs);
+
+ for (id = 0, syncpt = intr->syncpt;
+ id < NV_HOST1X_SYNCPT_NB_PTS;
+ ++id, ++syncpt) {
+ BUG_ON(!list_empty(&syncpt->wait_head));
+ free_syncpt_irq(syncpt);
+ }
+
+ free_host_general_irq(intr);
+
+ mutex_unlock(&intr->mutex);
+}
diff --git a/drivers/video/tegra/host/nvhost_intr.h b/drivers/video/tegra/host/nvhost_intr.h
new file mode 100644
index 000000000000..fb3e613d70da
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_intr.h
@@ -0,0 +1,104 @@
+/*
+ * drivers/video/tegra/host/nvhost_intr.h
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_INTR_H
+#define __NVHOST_INTR_H
+
+#include <linux/kthread.h>
+#include <linux/semaphore.h>
+
+#include "nvhost_hardware.h"
+
+struct nvhost_channel;
+
+enum nvhost_intr_action {
+ /**
+ * Perform cleanup after a submit has completed.
+ * 'data' points to a channel
+ */
+ NVHOST_INTR_ACTION_SUBMIT_COMPLETE = 0,
+
+ /**
+ * Save a HW context.
+ * 'data' points to a context
+ */
+ NVHOST_INTR_ACTION_CTXSAVE,
+
+ /**
+ * Wake up a task.
+ * 'data' points to a wait_queue_head_t
+ */
+ NVHOST_INTR_ACTION_WAKEUP,
+
+ /**
+ * Wake up a interruptible task.
+ * 'data' points to a wait_queue_head_t
+ */
+ NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
+
+ NVHOST_INTR_ACTION_COUNT
+};
+
+struct nvhost_intr_syncpt {
+ u8 id;
+ u8 irq_requested;
+ u16 irq;
+ spinlock_t lock;
+ struct list_head wait_head;
+ char thresh_irq_name[12];
+};
+
+struct nvhost_intr {
+ struct nvhost_intr_syncpt syncpt[NV_HOST1X_SYNCPT_NB_PTS];
+ struct mutex mutex;
+ int host_general_irq;
+ bool host_general_irq_requested;
+};
+
+/**
+ * Schedule an action to be taken when a sync point reaches the given threshold.
+ *
+ * @id the sync point
+ * @thresh the threshold
+ * @action the action to take
+ * @data a pointer to extra data depending on action, see above
+ * @ref must be passed if cancellation is possible, else NULL
+ *
+ * This is a non-blocking api.
+ */
+int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
+ enum nvhost_intr_action action, void *data,
+ void **ref);
+
+/**
+ * Unreference an action submitted to nvhost_intr_add_action().
+ * You must call this if you passed non-NULL as ref.
+ * @ref the ref returned from nvhost_intr_add_action()
+ */
+void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref);
+
+int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync);
+void nvhost_intr_deinit(struct nvhost_intr *intr);
+void nvhost_intr_start(struct nvhost_intr *intr, u32 hz);
+void nvhost_intr_stop(struct nvhost_intr *intr);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_mpectx.c b/drivers/video/tegra/host/nvhost_mpectx.c
new file mode 100644
index 000000000000..a5812e7469a3
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_mpectx.c
@@ -0,0 +1,23 @@
+/*
+ * drivers/video/tegra/host/nvhost_mpectx.c
+ *
+ * Tegra Graphics Host MPE HW Context
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* Placeholder */
diff --git a/drivers/video/tegra/host/nvhost_syncpt.c b/drivers/video/tegra/host/nvhost_syncpt.c
new file mode 100644
index 000000000000..e2474df830b4
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_syncpt.c
@@ -0,0 +1,252 @@
+/*
+ * drivers/video/tegra/host/nvhost_syncpt.c
+ *
+ * Tegra Graphics Host Syncpoints
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_syncpt.h"
+#include "nvhost_dev.h"
+
+#define client_managed(id) (BIT(id) & NVSYNCPTS_CLIENT_MANAGED)
+#define syncpt_to_dev(sp) container_of(sp, struct nvhost_dev, syncpt)
+#define SYNCPT_CHECK_PERIOD 2*HZ
+
+static bool check_max(struct nvhost_syncpt *sp, u32 id, u32 real)
+{
+ u32 max;
+ if (client_managed(id))
+ return true;
+ smp_rmb();
+ max = (u32)atomic_read(&sp->max_val[id]);
+ return ((s32)(max - real) >= 0);
+}
+
+/**
+ * Write the current syncpoint value back to hw.
+ */
+static void reset_syncpt(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_dev *dev = syncpt_to_dev(sp);
+ int min;
+ smp_rmb();
+ min = atomic_read(&sp->min_val[id]);
+ writel(min, dev->sync_aperture + (HOST1X_SYNC_SYNCPT_0 + id * 4));
+}
+
+/**
+ * Write the current waitbase value back to hw.
+ */
+static void reset_syncpt_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_dev *dev = syncpt_to_dev(sp);
+ writel(sp->base_val[id],
+ dev->sync_aperture + (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
+}
+
+/**
+ * Read waitbase value from hw.
+ */
+static void read_syncpt_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_dev *dev = syncpt_to_dev(sp);
+ sp->base_val[id] = readl(dev->sync_aperture +
+ (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
+}
+
+/**
+ * Resets syncpoint and waitbase values to sw shadows
+ */
+void nvhost_syncpt_reset(struct nvhost_syncpt *sp)
+{
+ u32 i;
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++)
+ reset_syncpt(sp, i);
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++)
+ reset_syncpt_wait_base(sp, i);
+ wmb();
+}
+
+/**
+ * Updates sw shadow state for client managed registers
+ */
+void nvhost_syncpt_save(struct nvhost_syncpt *sp)
+{
+ u32 i;
+
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
+ if (client_managed(i))
+ nvhost_syncpt_update_min(sp, i);
+ else
+ BUG_ON(!nvhost_syncpt_min_eq_max(sp, i));
+ }
+
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++)
+ read_syncpt_wait_base(sp, i);
+}
+
+/**
+ * Updates the last value read from hardware.
+ */
+u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_dev *dev = syncpt_to_dev(sp);
+ void __iomem *sync_regs = dev->sync_aperture;
+ u32 old, live;
+
+ do {
+ smp_rmb();
+ old = (u32)atomic_read(&sp->min_val[id]);
+ live = readl(sync_regs + (HOST1X_SYNC_SYNCPT_0 + id * 4));
+ } while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old);
+
+ BUG_ON(!check_max(sp, id, live));
+
+ return live;
+}
+
+/**
+ * Get the current syncpoint value
+ */
+u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
+{
+ u32 val;
+
+ nvhost_module_busy(&syncpt_to_dev(sp)->mod);
+ val = nvhost_syncpt_update_min(sp, id);
+ nvhost_module_idle(&syncpt_to_dev(sp)->mod);
+ return val;
+}
+
+/**
+ * Write a cpu syncpoint increment to the hardware, without touching
+ * the cache. Caller is responsible for host being powered.
+ */
+void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_dev *dev = syncpt_to_dev(sp);
+ BUG_ON(!client_managed(id) && nvhost_syncpt_min_eq_max(sp, id));
+ writel(BIT(id), dev->sync_aperture + HOST1X_SYNC_SYNCPT_CPU_INCR);
+ wmb();
+}
+
+/**
+ * Increment syncpoint value from cpu, updating cache
+ */
+void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
+{
+ nvhost_syncpt_incr_max(sp, id, 1);
+ nvhost_module_busy(&syncpt_to_dev(sp)->mod);
+ nvhost_syncpt_cpu_incr(sp, id);
+ nvhost_module_idle(&syncpt_to_dev(sp)->mod);
+}
+
+/**
+ * Main entrypoint for syncpoint value waits.
+ */
+int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
+ u32 thresh, u32 timeout)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ void *ref;
+ int err = 0;
+
+ BUG_ON(!check_max(sp, id, thresh));
+
+ /* first check cache */
+ if (nvhost_syncpt_min_cmp(sp, id, thresh))
+ return 0;
+
+ /* keep host alive */
+ nvhost_module_busy(&syncpt_to_dev(sp)->mod);
+
+ if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) {
+ /* try to read from register */
+ u32 val = nvhost_syncpt_update_min(sp, id);
+ if ((s32)(val - thresh) >= 0)
+ goto done;
+ }
+
+ if (!timeout) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ /* schedule a wakeup when the syncpoint value is reached */
+ err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
+ NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, &ref);
+ if (err)
+ goto done;
+
+ /* wait for the syncpoint, or timeout, or signal */
+ while (timeout) {
+ u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
+ err = wait_event_interruptible_timeout(wq,
+ nvhost_syncpt_min_cmp(sp, id, thresh),
+ check);
+ if (err != 0)
+ break;
+ if (timeout != NVHOST_NO_TIMEOUT)
+ timeout -= SYNCPT_CHECK_PERIOD;
+ if (timeout) {
+ dev_warn(&syncpt_to_dev(sp)->pdev->dev,
+ "syncpoint id %d (%s) stuck waiting %d\n",
+ id, nvhost_syncpt_name(id), thresh);
+ nvhost_syncpt_debug(sp);
+ }
+ };
+ if (err > 0)
+ err = 0;
+ else if (err == 0)
+ err = -EAGAIN;
+ nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref);
+
+done:
+ nvhost_module_idle(&syncpt_to_dev(sp)->mod);
+ return err;
+}
+
+static const char *s_syncpt_names[32] = {
+ "", "", "", "", "", "", "", "", "", "", "", "",
+ "vi_isp_0", "vi_isp_1", "vi_isp_2", "vi_isp_3", "vi_isp_4", "vi_isp_5",
+ "2d_0", "2d_1",
+ "", "",
+ "3d", "mpe", "disp0", "disp1", "vblank0", "vblank1", "mpe_ebm_eof", "mpe_wr_safe",
+ "2d_tinyblt", "dsi"
+};
+
+const char *nvhost_syncpt_name(u32 id)
+{
+ BUG_ON(id > ARRAY_SIZE(s_syncpt_names));
+ return s_syncpt_names[id];
+}
+
+void nvhost_syncpt_debug(struct nvhost_syncpt *sp)
+{
+ u32 i;
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
+ u32 max = nvhost_syncpt_read_max(sp, i);
+ if (!max)
+ continue;
+ dev_info(&syncpt_to_dev(sp)->pdev->dev,
+ "id %d (%s) min %d max %d\n",
+ i, nvhost_syncpt_name(i),
+ nvhost_syncpt_update_min(sp, i), max);
+
+ }
+}
diff --git a/drivers/video/tegra/host/nvhost_syncpt.h b/drivers/video/tegra/host/nvhost_syncpt.h
new file mode 100644
index 000000000000..f161f2051406
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_syncpt.h
@@ -0,0 +1,150 @@
+/*
+ * drivers/video/tegra/host/nvhost_syncpt.h
+ *
+ * Tegra Graphics Host Syncpoints
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_SYNCPT_H
+#define __NVHOST_SYNCPT_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+
+#include "nvhost_hardware.h"
+
+#define NVSYNCPT_VI_ISP_0 (12)
+#define NVSYNCPT_VI_ISP_1 (13)
+#define NVSYNCPT_VI_ISP_2 (14)
+#define NVSYNCPT_VI_ISP_3 (15)
+#define NVSYNCPT_VI_ISP_4 (16)
+#define NVSYNCPT_VI_ISP_5 (17)
+#define NVSYNCPT_2D_0 (18)
+#define NVSYNCPT_2D_1 (19)
+#define NVSYNCPT_3D (22)
+#define NVSYNCPT_MPE (23)
+#define NVSYNCPT_DISP0 (24)
+#define NVSYNCPT_DISP1 (25)
+#define NVSYNCPT_VBLANK0 (26)
+#define NVSYNCPT_VBLANK1 (27)
+#define NVSYNCPT_MPE_EBM_EOF (28)
+#define NVSYNCPT_MPE_WR_SAFE (29)
+#define NVSYNCPT_DSI (31)
+#define NVSYNCPT_INVALID (-1)
+
+/*#define NVSYNCPT_2D_CHANNEL2_0 (20) */
+/*#define NVSYNCPT_2D_CHANNEL2_1 (21) */
+/*#define NVSYNCPT_2D_TINYBLT_WAR (30)*/
+/*#define NVSYNCPT_2D_TINYBLT_RESTORE_CLASS_ID (30)*/
+
+/* sync points that are wholly managed by the client */
+#define NVSYNCPTS_CLIENT_MANAGED ( \
+ BIT(NVSYNCPT_DISP0) | BIT(NVSYNCPT_DISP1) | BIT(NVSYNCPT_DSI) | \
+ BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_2) | \
+ BIT(NVSYNCPT_VI_ISP_3) | BIT(NVSYNCPT_VI_ISP_4) | BIT(NVSYNCPT_VI_ISP_5) | \
+ BIT(NVSYNCPT_MPE_EBM_EOF) | BIT(NVSYNCPT_MPE_WR_SAFE) | \
+ BIT(NVSYNCPT_2D_1))
+
+#define NVWAITBASE_2D_0 (1)
+#define NVWAITBASE_2D_1 (2)
+#define NVWAITBASE_3D (3)
+#define NVWAITBASE_MPE (4)
+
+struct nvhost_syncpt {
+ atomic_t min_val[NV_HOST1X_SYNCPT_NB_PTS];
+ atomic_t max_val[NV_HOST1X_SYNCPT_NB_PTS];
+ u32 base_val[NV_HOST1X_SYNCPT_NB_BASES];
+};
+
+/**
+ * Updates the value sent to hardware.
+ */
+static inline u32 nvhost_syncpt_incr_max(struct nvhost_syncpt *sp,
+ u32 id, u32 incrs)
+{
+ return (u32)atomic_add_return(incrs, &sp->max_val[id]);
+}
+
+/**
+ * Updated the value sent to hardware.
+ */
+static inline u32 nvhost_syncpt_set_max(struct nvhost_syncpt *sp,
+ u32 id, u32 val)
+{
+ atomic_set(&sp->max_val[id], val);
+ smp_wmb();
+ return val;
+}
+
+static inline u32 nvhost_syncpt_read_max(struct nvhost_syncpt *sp, u32 id)
+{
+ smp_rmb();
+ return (u32)atomic_read(&sp->max_val[id]);
+}
+
+/**
+ * Returns true if syncpoint has reached threshold
+ */
+static inline bool nvhost_syncpt_min_cmp(struct nvhost_syncpt *sp,
+ u32 id, u32 thresh)
+{
+ u32 cur;
+ smp_rmb();
+ cur = (u32)atomic_read(&sp->min_val[id]);
+ return ((s32)(cur - thresh) >= 0);
+}
+
+/**
+ * Returns true if syncpoint min == max
+ */
+static inline bool nvhost_syncpt_min_eq_max(struct nvhost_syncpt *sp, u32 id)
+{
+ int min, max;
+ smp_rmb();
+ min = atomic_read(&sp->min_val[id]);
+ max = atomic_read(&sp->max_val[id]);
+ return (min == max);
+}
+
+void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id);
+
+u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id);
+
+void nvhost_syncpt_save(struct nvhost_syncpt *sp);
+
+void nvhost_syncpt_reset(struct nvhost_syncpt *sp);
+
+u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id);
+
+void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id);
+
+int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh,
+ u32 timeout);
+
+static inline int nvhost_syncpt_wait(struct nvhost_syncpt *sp, u32 id, u32 thresh)
+{
+ return nvhost_syncpt_wait_timeout(sp, id, thresh, MAX_SCHEDULE_TIMEOUT);
+}
+
+
+const char *nvhost_syncpt_name(u32 id);
+
+void nvhost_syncpt_debug(struct nvhost_syncpt *sp);
+
+#endif
diff --git a/drivers/char/nvmap.c b/drivers/video/tegra/nvmap.c
index bc9edb7aa88d..448ac8f34771 100644
--- a/drivers/char/nvmap.c
+++ b/drivers/video/tegra/nvmap.c
@@ -41,9 +41,9 @@
#include <linux/rbtree.h>
#include <linux/proc_fs.h>
#include <linux/ctype.h>
+#include <linux/nvmap.h>
#include <asm/tlbflush.h>
#include <mach/iovmm.h>
-#include <mach/nvmem.h>
#include "nvcommon.h"
#include "nvrm_memmgr.h"
#include "nvbootargs.h"
@@ -101,6 +101,9 @@ static struct backing_dev_info nvmap_bdi = {
#define NVMAP_END (NVMAP_BASE + NVMAP_SIZE)
#define NVMAP_PAGES (NVMAP_SIZE >> PAGE_SHIFT)
+/* private nvmap_handle flag for pinning duplicate detection */
+#define NVMEM_HANDLE_VISITED (0x1ul << 31)
+
/* Heaps to use for kernel allocs when no heap list supplied */
#define NVMAP_KERNEL_DEFAULT_HEAPS (NVMEM_HEAP_SYSMEM | NVMEM_HEAP_CARVEOUT_GENERIC)
@@ -251,7 +254,7 @@ static unsigned long _nvmap_carveout_blockstat(struct nvmap_carveout *co,
val = max_t(unsigned long, val, co->blocks[idx].size);
idx = co->blocks[idx].next_free;
break;
- }
+ }
}
spin_unlock(&co->lock);
@@ -1102,10 +1105,10 @@ static int nvmap_pagealloc(struct nvmap_handle *h, bool contiguous)
for (i=0; i<cnt; i++) {
pages[i] = alloc_page(nvmap_gfp);
if (!pages[i]) {
- pr_err("failed to allocate %u pages after %u entries\n",
- cnt, i);
- goto fail;
- }
+ pr_err("failed to allocate %u pages after %u entries\n",
+ cnt, i);
+ goto fail;
+ }
}
}
@@ -2567,7 +2570,7 @@ static void _nvmap_create_heap_attrs(struct nvmap_carveout_node *n)
pr_err("%s: failed to create heap-%s device\n",
__func__, n->carveout.name);
return;
- }
+ }
if (sysfs_create_group(&n->dev.kobj, &nvmap_heap_defattr_group))
pr_err("%s: failed to create attribute group for heap-%s "
"device\n", __func__, n->carveout.name);
@@ -2871,7 +2874,7 @@ static int nvmap_split_carveout_heap(struct nvmap_carveout *co, size_t size,
/* NvRmMemMgr APIs implemented on top of nvmap */
-#ifdef CONFIG_TEGRA_NVRM
+#if defined(CONFIG_TEGRA_NVRM)
#include <linux/freezer.h>
NvU32 NvRmMemGetAddress(NvRmMemHandle hMem, NvU32 Offset)
@@ -3298,4 +3301,229 @@ NvError NvRmMemHandlePreserveHandle(NvRmMemHandle hMem, NvU32 *pKey)
{
return NvError_NotSupported;
}
+
#endif
+
+static u32 nvmap_get_physaddr(struct nvmap_handle *h)
+{
+ u32 addr;
+
+ if (h->heap_pgalloc && h->pgalloc.contig) {
+ addr = page_to_phys(h->pgalloc.pages[0]);
+ } else if (h->heap_pgalloc) {
+ BUG_ON(!h->pgalloc.area);
+ addr = h->pgalloc.area->iovm_start;
+ } else {
+ addr = h->carveout.base;
+ }
+
+ return addr;
+}
+
+struct nvmap_handle *nvmap_alloc(
+ size_t size, size_t align,
+ unsigned int flags, void **map)
+{
+ struct nvmap_handle_ref *r = NULL;
+ struct nvmap_handle *h;
+ int err;
+
+ err = _nvmap_do_create(&nvmap_context.init_data,
+ NVMEM_IOC_CREATE, (unsigned long)size, true, &r);
+ if (err || !r)
+ return ERR_PTR(err);
+ h = r->h;
+
+ err = _nvmap_do_alloc(&nvmap_context.init_data,
+ (unsigned long)h, NVMAP_KERNEL_DEFAULT_HEAPS,
+ align, flags);
+ if (err) {
+ _nvmap_do_free(&nvmap_context.init_data, (unsigned long)h);
+ return ERR_PTR(err);
+ }
+
+ if (!map)
+ return h;
+
+ if (h->heap_pgalloc) {
+ *map = vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT, -1,
+ _nvmap_flag_to_pgprot(h->flags, pgprot_kernel));
+ } else {
+ size_t mapaddr = h->carveout.base;
+ size_t mapsize = h->size;
+
+ mapsize += (mapaddr & ~PAGE_MASK);
+ mapaddr &= PAGE_MASK;
+ mapsize = (mapsize + PAGE_SIZE - 1) & PAGE_MASK;
+
+ /* TODO: [ahatala 2010-06-21] honor coherency flag? */
+ *map = ioremap_wc(mapaddr, mapsize);
+ if (*map)
+ *map += (h->carveout.base - mapaddr);
+ }
+ if (!*map) {
+ _nvmap_do_free(&nvmap_context.init_data, (unsigned long)h);
+ return ERR_PTR(-ENOMEM);
+ }
+ /* TODO: [ahatala 2010-06-22] get rid of kern_map */
+ h->kern_map = *map;
+ return h;
+}
+
+void nvmap_free(struct nvmap_handle *h, void *map)
+{
+ if (map) {
+ BUG_ON(h->kern_map != map);
+
+ if (h->heap_pgalloc) {
+ vm_unmap_ram(map, h->size >> PAGE_SHIFT);
+ } else {
+ unsigned long addr = (unsigned long)map;
+ addr &= ~PAGE_MASK;
+ iounmap((void *)addr);
+ }
+ h->kern_map = NULL;
+ }
+ _nvmap_do_free(&nvmap_context.init_data, (unsigned long)h);
+}
+
+u32 nvmap_pin_single(struct nvmap_handle *h)
+{
+ int ret;
+ do {
+ ret = _nvmap_handle_pin_fast(1, &h);
+ if (ret) {
+ pr_err("%s: failed to pin handle\n", __func__);
+ dump_stack();
+ }
+ } while (ret);
+
+ return nvmap_get_physaddr(h);
+}
+
+int nvmap_pin_array(struct file *filp,
+ struct nvmap_pinarray_elem *arr, int num_elems,
+ struct nvmap_handle **unique_arr, int *num_unique, bool wait)
+{
+ struct nvmap_pinarray_elem *elem;
+ struct nvmap_file_priv *priv = filp->private_data;
+ int i, unique_idx = 0;
+ unsigned long pfn = 0;
+ void *pteaddr = NULL;
+ int ret = 0;
+
+ mutex_lock(&nvmap_pin_lock);
+
+ /* find unique handles, pin them and collect into unpin array */
+ for (elem = arr, i = num_elems; i && !ret; i--, elem++) {
+ struct nvmap_handle *to_pin = elem->pin_mem;
+ if (to_pin->poison != NVDA_POISON) {
+ pr_err("%s: handle is poisoned\n", __func__);
+ ret = -EFAULT;
+ }
+ else if (!(to_pin->flags & NVMEM_HANDLE_VISITED)) {
+ if (!priv->su && !to_pin->global) {
+ struct nvmap_handle_ref *r;
+ spin_lock(&priv->ref_lock);
+ r = _nvmap_ref_lookup_locked(priv,
+ (unsigned long)to_pin);
+ spin_unlock(&priv->ref_lock);
+ if (!r) {
+ pr_err("%s: handle access failure\n", __func__);
+ ret = -EPERM;
+ break;
+ }
+ }
+ if (wait) {
+ ret = wait_event_interruptible(
+ nvmap_pin_wait,
+ !_nvmap_handle_pin_locked(to_pin));
+ }
+ else
+ ret = _nvmap_handle_pin_locked(to_pin);
+ if (!ret) {
+ to_pin->flags |= NVMEM_HANDLE_VISITED;
+ unique_arr[unique_idx++] = to_pin;
+ }
+ }
+ }
+
+ /* clear visited flags before releasing mutex */
+ i = unique_idx;
+ while (i--)
+ unique_arr[i]->flags &= ~NVMEM_HANDLE_VISITED;
+
+ mutex_unlock(&nvmap_pin_lock);
+
+ if (!ret)
+ ret = nvmap_map_pte(pfn, pgprot_kernel, &pteaddr);
+
+ if (unlikely(ret)) {
+ int do_wake = 0;
+ i = unique_idx;
+ while (i--)
+ do_wake |= _nvmap_handle_unpin(unique_arr[i]);
+ if (do_wake)
+ wake_up(&nvmap_pin_wait);
+ return ret;
+ }
+
+ for (elem = arr, i = num_elems; i; i--, elem++) {
+ struct nvmap_handle *h_patch = elem->patch_mem;
+ struct nvmap_handle *h_pin = elem->pin_mem;
+ struct page *page = NULL;
+ u32* patch_addr;
+
+ /* commit iovmm mapping */
+ if (h_pin->heap_pgalloc && h_pin->pgalloc.dirty)
+ _nvmap_handle_iovmm_map(h_pin);
+
+ /* patch */
+ if (h_patch->kern_map) {
+ patch_addr = (u32*)((unsigned long)h_patch->kern_map +
+ elem->patch_offset);
+ } else {
+ unsigned long phys, new_pfn;
+ if (h_patch->heap_pgalloc) {
+ page = h_patch->pgalloc.pages[elem->patch_offset >> PAGE_SHIFT];
+ get_page(page);
+ phys = page_to_phys(page) + (elem->patch_offset & ~PAGE_MASK);
+ } else {
+ phys = h_patch->carveout.base + elem->patch_offset;
+ }
+ new_pfn = __phys_to_pfn(phys);
+ if (new_pfn != pfn) {
+ _nvmap_set_pte_at((unsigned long)pteaddr, new_pfn,
+ _nvmap_flag_to_pgprot(h_patch->flags, pgprot_kernel));
+ pfn = new_pfn;
+ }
+ patch_addr = (u32*)((unsigned long)pteaddr + (phys & ~PAGE_MASK));
+ }
+
+ *patch_addr = nvmap_get_physaddr(h_pin) + elem->pin_offset;
+
+ if (page)
+ put_page(page);
+ }
+ nvmap_unmap_pte(pteaddr);
+ *num_unique = unique_idx;
+ return 0;
+}
+
+void nvmap_unpin(struct nvmap_handle **h, int num_handles)
+{
+ int do_wake = 0;
+
+ while (num_handles--) {
+ BUG_ON(!*h);
+ do_wake |= _nvmap_handle_unpin(*h);
+ h++;
+ }
+
+ if (do_wake) wake_up(&nvmap_pin_wait);
+}
+
+int nvmap_validate_file(struct file *f)
+{
+ return (f->f_op==&knvmap_fops || f->f_op==&nvmap_fops) ? 0 : -EFAULT;
+}
diff --git a/drivers/video/tegra-fb.c b/drivers/video/tegra/tegra-fb.c
index 68e4506aad3e..68e4506aad3e 100644
--- a/drivers/video/tegra-fb.c
+++ b/drivers/video/tegra/tegra-fb.c
diff --git a/include/linux/nvhost.h b/include/linux/nvhost.h
new file mode 100644
index 000000000000..26a1a4e00faf
--- /dev/null
+++ b/include/linux/nvhost.h
@@ -0,0 +1,123 @@
+/*
+ * include/linux/nvhost.h
+ *
+ * Tegra graphics host driver, userspace interface
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_H
+#define __NVHOST_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#if !defined(__KERNEL__)
+#define __user
+#endif
+
+#define NVHOST_NO_TIMEOUT (-1)
+#define NVHOST_IOCTL_MAGIC 'H'
+
+struct nvhost_submit_hdr {
+ __u32 syncpt_id;
+ __u32 syncpt_incrs;
+ __u32 num_cmdbufs;
+ __u32 num_relocs;
+};
+
+struct nvhost_cmdbuf {
+ __u32 mem;
+ __u32 offset;
+ __u32 words;
+};
+
+struct nvhost_reloc {
+ __u32 cmdbuf_mem;
+ __u32 cmdbuf_offset;
+ __u32 target;
+ __u32 target_offset;
+};
+
+struct nvhost_get_param_args {
+ __u32 value;
+};
+
+struct nvhost_set_nvmap_fd_args {
+ __u32 fd;
+};
+
+#define NVHOST_IOCTL_CHANNEL_FLUSH \
+ _IOR(NVHOST_IOCTL_MAGIC, 1, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS \
+ _IOR(NVHOST_IOCTL_MAGIC, 2, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_GET_WAITBASES \
+ _IOR(NVHOST_IOCTL_MAGIC, 3, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES \
+ _IOR(NVHOST_IOCTL_MAGIC, 4, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD \
+ _IOW(NVHOST_IOCTL_MAGIC, 5, struct nvhost_set_nvmap_fd_args)
+#define NVHOST_IOCTL_CHANNEL_LAST \
+ _IOC_NR(NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD)
+#define NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE sizeof(struct nvhost_get_param_args)
+
+struct nvhost_ctrl_syncpt_read_args {
+ __u32 id;
+ __u32 value;
+};
+
+struct nvhost_ctrl_syncpt_incr_args {
+ __u32 id;
+};
+
+struct nvhost_ctrl_syncpt_wait_args {
+ __u32 id;
+ __u32 thresh;
+ __s32 timeout;
+};
+
+struct nvhost_ctrl_module_mutex_args {
+ __u32 id;
+ __u32 lock;
+};
+
+struct nvhost_ctrl_module_regrdwr_args {
+ __u32 id;
+ __u32 num_offsets;
+ __u32 block_size;
+ __u32 *offsets;
+ __u32 *values;
+ __u32 write;
+};
+
+#define NVHOST_IOCTL_CTRL_SYNCPT_READ \
+ _IOWR(NVHOST_IOCTL_MAGIC, 1, struct nvhost_ctrl_syncpt_read_args)
+#define NVHOST_IOCTL_CTRL_SYNCPT_INCR \
+ _IOW(NVHOST_IOCTL_MAGIC, 2, struct nvhost_ctrl_syncpt_incr_args)
+#define NVHOST_IOCTL_CTRL_SYNCPT_WAIT \
+ _IOW(NVHOST_IOCTL_MAGIC, 3, struct nvhost_ctrl_syncpt_wait_args)
+
+#define NVHOST_IOCTL_CTRL_MODULE_MUTEX \
+ _IOWR(NVHOST_IOCTL_MAGIC, 4, struct nvhost_ctrl_module_mutex_args)
+#define NVHOST_IOCTL_CTRL_MODULE_REGRDWR \
+ _IOWR(NVHOST_IOCTL_MAGIC, 5, struct nvhost_ctrl_module_regrdwr_args)
+
+#define NVHOST_IOCTL_CTRL_LAST \
+ _IOC_NR(NVHOST_IOCTL_CTRL_MODULE_REGRDWR)
+#define NVHOST_IOCTL_CTRL_MAX_ARG_SIZE sizeof(struct nvhost_ctrl_module_regrdwr_args)
+
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/nvmem.h b/include/linux/nvmap.h
index 3be28fff8856..ff1f65491025 100644
--- a/arch/arm/mach-tegra/include/mach/nvmem.h
+++ b/include/linux/nvmap.h
@@ -1,5 +1,5 @@
/*
- * arch/arm/mach-tegra/include/linux/nvmem_ioctl.h
+ * include/linux/nvmap.h
*
* structure declarations for nvmem and nvmap user-space ioctls
*
@@ -21,13 +21,14 @@
*/
#include <linux/ioctl.h>
+#include <linux/file.h>
#if !defined(__KERNEL__)
#define __user
#endif
-#ifndef _MACH_TEGRA_NVMEM_IOCTL_H_
-#define _MACH_TEGRA_NVMEM_IOCTL_H_
+#ifndef __NVMAP_H
+#define __NVMAP_H
struct nvmem_create_handle {
union {
@@ -152,7 +153,31 @@ struct nvmem_cache_op {
#define NVMEM_IOC_MAXNR (_IOC_NR(NVMEM_IOC_GET_ID))
+#if defined(__KERNEL__)
+
+struct nvmap_handle;
+
+struct nvmap_pinarray_elem {
+ struct nvmap_handle *patch_mem;
+ u32 patch_offset;
+ struct nvmap_handle *pin_mem;
+ u32 pin_offset;
+};
+
+int nvmap_validate_file(struct file *filep);
+struct nvmap_handle *nvmap_alloc(
+ size_t size, size_t align,
+ unsigned int flags, void **map);
+void nvmap_free(struct nvmap_handle *h, void *map);
+u32 nvmap_pin_single(struct nvmap_handle *h);
+int nvmap_pin_array(struct file *filp,
+ struct nvmap_pinarray_elem *arr, int num_elems,
+ struct nvmap_handle **unique_arr, int *num_unique, bool wait);
+void nvmap_unpin(struct nvmap_handle **h, int num_handles);
+
int nvmap_add_carveout_heap(unsigned long base, size_t size,
const char *name, unsigned int bitmask);
#endif
+
+#endif