summaryrefslogtreecommitdiff
path: root/drivers/mxc/amd-gpu/platform
diff options
context:
space:
mode:
authorIan Wisbon <ian.wisbon@timesys.com>2011-02-10 17:15:15 -0500
committerIan Wisbon <ian.wisbon@timesys.com>2011-02-14 14:29:06 -0500
commit14a4057959f8ee0a2249eb2abd64fd6b1f571d98 (patch)
tree6df655a665a5a56bd6b7cae5e2689fc5a1b6c965 /drivers/mxc/amd-gpu/platform
parent0eb553bf96e2c990d3bfccaa07da0863624c89ab (diff)
Digi Release Code - 02142011 Missing Files Fix2.6.31-digi-201102141503
Diffstat (limited to 'drivers/mxc/amd-gpu/platform')
-rw-r--r--drivers/mxc/amd-gpu/platform/hal/linux/gsl_hal.c579
-rw-r--r--drivers/mxc/amd-gpu/platform/hal/linux/gsl_hwaccess.h142
-rw-r--r--drivers/mxc/amd-gpu/platform/hal/linux/gsl_kmod.c964
-rw-r--r--drivers/mxc/amd-gpu/platform/hal/linux/gsl_kmod_cleanup.c269
-rw-r--r--drivers/mxc/amd-gpu/platform/hal/linux/gsl_kmod_cleanup.h90
-rw-r--r--drivers/mxc/amd-gpu/platform/hal/linux/gsl_linux_map.c221
-rw-r--r--drivers/mxc/amd-gpu/platform/hal/linux/gsl_linux_map.h46
-rw-r--r--drivers/mxc/amd-gpu/platform/hal/linux/misc.c129
8 files changed, 2440 insertions, 0 deletions
diff --git a/drivers/mxc/amd-gpu/platform/hal/linux/gsl_hal.c b/drivers/mxc/amd-gpu/platform/hal/linux/gsl_hal.c
new file mode 100644
index 000000000000..8d452830cc08
--- /dev/null
+++ b/drivers/mxc/amd-gpu/platform/hal/linux/gsl_hal.c
@@ -0,0 +1,579 @@
+/* Copyright (c) 2008-2010, Advanced Micro Devices. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+/*
+ * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+#include "gsl_hal.h"
+#include "gsl_halconfig.h"
+#include "gsl_linux_map.h"
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include <asm/atomic.h>
+#include <linux/uaccess.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+#define GSL_HAL_MEM1 0
+#define GSL_HAL_MEM2 1
+#define GSL_HAL_MEM3 2
+
+/* #define GSL_HAL_DEBUG */
+
+extern phys_addr_t gpu_2d_regbase;
+extern int gpu_2d_regsize;
+extern phys_addr_t gpu_3d_regbase;
+extern int gpu_3d_regsize;
+extern int gmem_size;
+extern phys_addr_t gpu_reserved_mem;
+extern int gpu_reserved_mem_size;
+extern int gpu_2d_irq, gpu_3d_irq;
+
+
+KGSLHAL_API int
+kgsl_hal_allocphysical(unsigned int virtaddr, unsigned int numpages, unsigned int scattergatterlist[])
+{
+ /* allocate physically contiguous memory */
+
+ int i;
+ void *va;
+
+ va = gsl_linux_map_alloc(virtaddr, numpages*PAGE_SIZE);
+
+ if (!va)
+ return GSL_FAILURE_OUTOFMEM;
+
+ for (i = 0; i < numpages; i++) {
+ scattergatterlist[i] = page_to_phys(vmalloc_to_page(va));
+ va += PAGE_SIZE;
+ }
+
+ return GSL_SUCCESS;
+}
+
+/* --------------------------------------------------------------------------- */
+
+KGSLHAL_API int
+kgsl_hal_freephysical(unsigned int virtaddr, unsigned int numpages, unsigned int scattergatterlist[])
+{
+ /* free physical memory */
+
+ gsl_linux_map_free(virtaddr);
+
+ return GSL_SUCCESS;
+}
+
+/* ---------------------------------------------------------------------------- */
+
+KGSLHAL_API int
+kgsl_hal_init(void)
+{
+ gsl_hal_t *hal;
+ unsigned long totalsize, mem1size;
+ unsigned int va, pa;
+
+ if (gsl_driver.hal) {
+ return GSL_FAILURE_ALREADYINITIALIZED;
+ }
+
+ gsl_driver.hal = (void *)kos_malloc(sizeof(gsl_hal_t));
+
+ if (!gsl_driver.hal) {
+ return GSL_FAILURE_OUTOFMEM;
+ }
+
+ kos_memset(gsl_driver.hal, 0, sizeof(gsl_hal_t));
+
+
+ /* overlay structure on hal memory */
+ hal = (gsl_hal_t *) gsl_driver.hal;
+
+ if (gpu_3d_regbase && gpu_3d_regsize && gpu_3d_irq) {
+ hal->has_z430 = 1;
+ } else {
+ hal->has_z430 = 0;
+ }
+
+ if (gpu_2d_regbase && gpu_2d_regsize && gpu_2d_irq) {
+ hal->has_z160 = 1;
+ } else {
+ hal->has_z160 = 0;
+ }
+
+ /* there is still some problem to enable mmu currently */
+ gsl_driver.enable_mmu = 0;
+
+ /* setup register space */
+ if (hal->has_z430) {
+ hal->z430_regspace.mmio_phys_base = gpu_3d_regbase;
+ hal->z430_regspace.sizebytes = gpu_3d_regsize;
+ hal->z430_regspace.mmio_virt_base = (unsigned char *)ioremap(hal->z430_regspace.mmio_phys_base, hal->z430_regspace.sizebytes);
+
+ if (hal->z430_regspace.mmio_virt_base == NULL) {
+ return GSL_FAILURE_SYSTEMERROR;
+ }
+
+#ifdef GSL_HAL_DEBUG
+ printk(KERN_INFO "%s: hal->z430_regspace.mmio_phys_base = 0x%p\n", __func__, (void *)hal->z430_regspace.mmio_phys_base);
+ printk(KERN_INFO "%s: hal->z430_regspace.mmio_virt_base = 0x%p\n", __func__, (void *)hal->z430_regspace.mmio_virt_base);
+ printk(KERN_INFO "%s: hal->z430_regspace.sizebytes = 0x%08x\n", __func__, hal->z430_regspace.sizebytes);
+#endif
+ }
+
+ if (hal->has_z160) {
+ hal->z160_regspace.mmio_phys_base = gpu_2d_regbase;
+ hal->z160_regspace.sizebytes = gpu_2d_regsize;
+ hal->z160_regspace.mmio_virt_base = (unsigned char *)ioremap(hal->z160_regspace.mmio_phys_base, hal->z160_regspace.sizebytes);
+
+ if (hal->z160_regspace.mmio_virt_base == NULL) {
+ return GSL_FAILURE_SYSTEMERROR;
+ }
+
+#ifdef GSL_HAL_DEBUG
+ printk(KERN_INFO "%s: hal->z160_regspace.mmio_phys_base = 0x%p\n", __func__, (void *)hal->z160_regspace.mmio_phys_base);
+ printk(KERN_INFO "%s: hal->z160_regspace.mmio_virt_base = 0x%p\n", __func__, (void *)hal->z160_regspace.mmio_virt_base);
+ printk(KERN_INFO "%s: hal->z160_regspace.sizebytes = 0x%08x\n", __func__, hal->z160_regspace.sizebytes);
+#endif
+ }
+
+ if (gsl_driver.enable_mmu) {
+ totalsize = GSL_HAL_SHMEM_SIZE_EMEM2_MMU + GSL_HAL_SHMEM_SIZE_PHYS_MMU;
+ mem1size = GSL_HAL_SHMEM_SIZE_EMEM1_MMU;
+ if (gpu_reserved_mem && gpu_reserved_mem_size >= totalsize) {
+ pa = gpu_reserved_mem;
+ va = (unsigned int)ioremap(gpu_reserved_mem, totalsize);
+ } else {
+ va = (unsigned int)dma_alloc_coherent(0, totalsize, (dma_addr_t *)&pa, GFP_DMA | GFP_KERNEL);
+ }
+ } else {
+ if (gpu_reserved_mem && gpu_reserved_mem_size >= SZ_8M) {
+ totalsize = gpu_reserved_mem_size;
+ pa = gpu_reserved_mem;
+ va = (unsigned int)ioremap(gpu_reserved_mem, gpu_reserved_mem_size);
+ } else {
+ gpu_reserved_mem = 0;
+ totalsize = GSL_HAL_SHMEM_SIZE_EMEM1_NOMMU + GSL_HAL_SHMEM_SIZE_EMEM2_NOMMU + GSL_HAL_SHMEM_SIZE_PHYS_NOMMU;
+ va = (unsigned int)dma_alloc_coherent(0, totalsize, (dma_addr_t *)&pa, GFP_DMA | GFP_KERNEL);
+ }
+ mem1size = totalsize - (GSL_HAL_SHMEM_SIZE_EMEM2_NOMMU + GSL_HAL_SHMEM_SIZE_PHYS_NOMMU);
+ }
+
+ if (va) {
+ kos_memset((void *)va, 0, totalsize);
+
+ hal->memchunk.mmio_virt_base = (void *)va;
+ hal->memchunk.mmio_phys_base = pa;
+ hal->memchunk.sizebytes = totalsize;
+
+#ifdef GSL_HAL_DEBUG
+ printk(KERN_INFO "%s: hal->memchunk.mmio_phys_base = 0x%p\n", __func__, (void *)hal->memchunk.mmio_phys_base);
+ printk(KERN_INFO "%s: hal->memchunk.mmio_virt_base = 0x%p\n", __func__, (void *)hal->memchunk.mmio_virt_base);
+ printk(KERN_INFO "%s: hal->memchunk.sizebytes = 0x%08x\n", __func__, hal->memchunk.sizebytes);
+#endif
+
+ hal->memspace[GSL_HAL_MEM2].mmio_virt_base = (void *) va;
+ hal->memspace[GSL_HAL_MEM2].gpu_base = pa;
+ if (gsl_driver.enable_mmu) {
+ hal->memspace[GSL_HAL_MEM2].sizebytes = GSL_HAL_SHMEM_SIZE_EMEM2_MMU;
+ va += GSL_HAL_SHMEM_SIZE_EMEM2_MMU;
+ pa += GSL_HAL_SHMEM_SIZE_EMEM2_MMU;
+ } else {
+ hal->memspace[GSL_HAL_MEM2].sizebytes = GSL_HAL_SHMEM_SIZE_EMEM2_NOMMU;
+ va += GSL_HAL_SHMEM_SIZE_EMEM2_NOMMU;
+ pa += GSL_HAL_SHMEM_SIZE_EMEM2_NOMMU;
+ }
+
+#ifdef GSL_HAL_DEBUG
+ printk(KERN_INFO "%s: hal->memspace[GSL_HAL_MEM2].gpu_base = 0x%p\n", __func__, (void *)hal->memspace[GSL_HAL_MEM2].gpu_base);
+ printk(KERN_INFO "%s: hal->memspace[GSL_HAL_MEM2].mmio_virt_base = 0x%p\n", __func__, (void *)hal->memspace[GSL_HAL_MEM2].mmio_virt_base);
+ printk(KERN_INFO "%s: hal->memspace[GSL_HAL_MEM2].sizebytes = 0x%08x\n", __func__, hal->memspace[GSL_HAL_MEM2].sizebytes);
+#endif
+
+ hal->memspace[GSL_HAL_MEM3].mmio_virt_base = (void *) va;
+ hal->memspace[GSL_HAL_MEM3].gpu_base = pa;
+ if (gsl_driver.enable_mmu) {
+ hal->memspace[GSL_HAL_MEM3].sizebytes = GSL_HAL_SHMEM_SIZE_PHYS_MMU;
+ va += GSL_HAL_SHMEM_SIZE_PHYS_MMU;
+ pa += GSL_HAL_SHMEM_SIZE_PHYS_MMU;
+ } else {
+ hal->memspace[GSL_HAL_MEM3].sizebytes = GSL_HAL_SHMEM_SIZE_PHYS_NOMMU;
+ va += GSL_HAL_SHMEM_SIZE_PHYS_NOMMU;
+ pa += GSL_HAL_SHMEM_SIZE_PHYS_NOMMU;
+ }
+
+#ifdef GSL_HAL_DEBUG
+ printk(KERN_INFO "%s: hal->memspace[GSL_HAL_MEM3].gpu_base = 0x%p\n", __func__, (void *)hal->memspace[GSL_HAL_MEM3].gpu_base);
+ printk(KERN_INFO "%s: hal->memspace[GSL_HAL_MEM3].mmio_virt_base = 0x%p\n", __func__, (void *)hal->memspace[GSL_HAL_MEM3].mmio_virt_base);
+ printk(KERN_INFO "%s: hal->memspace[GSL_HAL_MEM3].sizebytes = 0x%08x\n", __func__, hal->memspace[GSL_HAL_MEM3].sizebytes);
+#endif
+
+ if (gsl_driver.enable_mmu) {
+ gsl_linux_map_init();
+ hal->memspace[GSL_HAL_MEM1].mmio_virt_base = (void *)GSL_LINUX_MAP_RANGE_START;
+ hal->memspace[GSL_HAL_MEM1].gpu_base = GSL_LINUX_MAP_RANGE_START;
+ hal->memspace[GSL_HAL_MEM1].sizebytes = mem1size;
+ } else {
+ hal->memspace[GSL_HAL_MEM1].mmio_virt_base = (void *) va;
+ hal->memspace[GSL_HAL_MEM1].gpu_base = pa;
+ hal->memspace[GSL_HAL_MEM1].sizebytes = mem1size;
+ }
+
+#ifdef GSL_HAL_DEBUG
+ printk(KERN_INFO "%s: hal->memspace[GSL_HAL_MEM1].gpu_base = 0x%p\n", __func__, (void *)hal->memspace[GSL_HAL_MEM1].gpu_base);
+ printk(KERN_INFO "%s: hal->memspace[GSL_HAL_MEM1].mmio_virt_base = 0x%p\n", __func__, (void *)hal->memspace[GSL_HAL_MEM1].mmio_virt_base);
+ printk(KERN_INFO "%s: hal->memspace[GSL_HAL_MEM1].sizebytes = 0x%08x\n", __func__, hal->memspace[GSL_HAL_MEM1].sizebytes);
+#endif
+ } else {
+ kgsl_hal_close();
+ return GSL_FAILURE_SYSTEMERROR;
+ }
+
+ return GSL_SUCCESS;
+}
+
+/* ---------------------------------------------------------------------------- */
+
+KGSLHAL_API int
+kgsl_hal_close(void)
+{
+ gsl_hal_t *hal;
+
+ if (gsl_driver.hal) {
+ /* overlay structure on hal memory */
+ hal = (gsl_hal_t *) gsl_driver.hal;
+
+ /* unmap registers */
+ if (hal->has_z430 && hal->z430_regspace.mmio_virt_base) {
+ iounmap(hal->z430_regspace.mmio_virt_base);
+ }
+
+ if (hal->has_z160 && hal->z160_regspace.mmio_virt_base) {
+ iounmap(hal->z160_regspace.mmio_virt_base);
+ }
+
+ /* free physical block */
+ if (hal->memchunk.mmio_virt_base && gpu_reserved_mem) {
+ iounmap(hal->memchunk.mmio_virt_base);
+ } else {
+ dma_free_coherent(0, hal->memchunk.sizebytes, hal->memchunk.mmio_virt_base, hal->memchunk.mmio_phys_base);
+ }
+
+ if (gsl_driver.enable_mmu) {
+ gsl_linux_map_destroy();
+ }
+
+ /* release hal struct */
+ kos_memset(hal, 0, sizeof(gsl_hal_t));
+ kos_free(gsl_driver.hal);
+ gsl_driver.hal = NULL;
+ }
+
+ return GSL_SUCCESS;
+}
+
+/* ---------------------------------------------------------------------------- */
+
+KGSLHAL_API int
+kgsl_hal_getshmemconfig(gsl_shmemconfig_t *config)
+{
+ int status = GSL_FAILURE_DEVICEERROR;
+ gsl_hal_t *hal = (gsl_hal_t *) gsl_driver.hal;
+
+ kos_memset(config, 0, sizeof(gsl_shmemconfig_t));
+
+ if (hal) {
+ config->numapertures = GSL_SHMEM_MAX_APERTURES;
+
+ if (gsl_driver.enable_mmu) {
+ config->apertures[0].id = GSL_APERTURE_MMU;
+ } else {
+ config->apertures[0].id = GSL_APERTURE_EMEM;
+ }
+ config->apertures[0].channel = GSL_CHANNEL_1;
+ config->apertures[0].hostbase = (unsigned int)hal->memspace[GSL_HAL_MEM1].mmio_virt_base;
+ config->apertures[0].gpubase = hal->memspace[GSL_HAL_MEM1].gpu_base;
+ config->apertures[0].sizebytes = hal->memspace[GSL_HAL_MEM1].sizebytes;
+
+ config->apertures[1].id = GSL_APERTURE_EMEM;
+ config->apertures[1].channel = GSL_CHANNEL_2;
+ config->apertures[1].hostbase = (unsigned int)hal->memspace[GSL_HAL_MEM2].mmio_virt_base;
+ config->apertures[1].gpubase = hal->memspace[GSL_HAL_MEM2].gpu_base;
+ config->apertures[1].sizebytes = hal->memspace[GSL_HAL_MEM2].sizebytes;
+
+ config->apertures[2].id = GSL_APERTURE_PHYS;
+ config->apertures[2].channel = GSL_CHANNEL_1;
+ config->apertures[2].hostbase = (unsigned int)hal->memspace[GSL_HAL_MEM3].mmio_virt_base;
+ config->apertures[2].gpubase = hal->memspace[GSL_HAL_MEM3].gpu_base;
+ config->apertures[2].sizebytes = hal->memspace[GSL_HAL_MEM3].sizebytes;
+
+ status = GSL_SUCCESS;
+ }
+
+ return status;
+}
+
+/* ---------------------------------------------------------------------------- */
+
+KGSLHAL_API int
+kgsl_hal_getdevconfig(gsl_deviceid_t device_id, gsl_devconfig_t *config)
+{
+ int status = GSL_FAILURE_DEVICEERROR;
+ gsl_hal_t *hal = (gsl_hal_t *) gsl_driver.hal;
+
+ kos_memset(config, 0, sizeof(gsl_devconfig_t));
+
+ if (hal) {
+ switch (device_id) {
+ case GSL_DEVICE_YAMATO:
+ {
+ if (hal->has_z430) {
+ mh_mmu_config_u mmu_config = {0};
+
+ config->gmemspace.gpu_base = 0;
+ config->gmemspace.mmio_virt_base = 0;
+ config->gmemspace.mmio_phys_base = 0;
+ if (gmem_size) {
+ config->gmemspace.sizebytes = gmem_size;
+ } else {
+ config->gmemspace.sizebytes = 0;
+ }
+
+ config->regspace.gpu_base = 0;
+ config->regspace.mmio_virt_base = (unsigned char *)hal->z430_regspace.mmio_virt_base;
+ config->regspace.mmio_phys_base = (unsigned int) hal->z430_regspace.mmio_phys_base;
+ config->regspace.sizebytes = GSL_HAL_SIZE_REG_YDX;
+
+ mmu_config.f.mmu_enable = 1;
+
+ if (gsl_driver.enable_mmu) {
+ mmu_config.f.split_mode_enable = 0;
+ mmu_config.f.rb_w_clnt_behavior = 1;
+ mmu_config.f.cp_w_clnt_behavior = 1;
+ mmu_config.f.cp_r0_clnt_behavior = 1;
+ mmu_config.f.cp_r1_clnt_behavior = 1;
+ mmu_config.f.cp_r2_clnt_behavior = 1;
+ mmu_config.f.cp_r3_clnt_behavior = 1;
+ mmu_config.f.cp_r4_clnt_behavior = 1;
+ mmu_config.f.vgt_r0_clnt_behavior = 1;
+ mmu_config.f.vgt_r1_clnt_behavior = 1;
+ mmu_config.f.tc_r_clnt_behavior = 1;
+ mmu_config.f.pa_w_clnt_behavior = 1;
+ }
+
+ config->mmu_config = mmu_config.val;
+
+ if (gsl_driver.enable_mmu) {
+ config->va_base = hal->memspace[GSL_HAL_MEM1].gpu_base;
+ config->va_range = hal->memspace[GSL_HAL_MEM1].sizebytes;
+ } else {
+ config->va_base = 0x00000000;
+ config->va_range = 0x00000000;
+ }
+
+ /* turn off memory protection unit by setting acceptable physical address range to include all pages */
+ config->mpu_base = 0x00000000; /* hal->memchunk.mmio_virt_base; */
+ config->mpu_range = 0xFFFFF000; /* hal->memchunk.sizebytes; */
+ status = GSL_SUCCESS;
+ }
+ break;
+ }
+
+ case GSL_DEVICE_G12:
+ {
+ mh_mmu_config_u mmu_config = {0};
+
+ config->regspace.gpu_base = 0;
+ config->regspace.mmio_virt_base = (unsigned char *)hal->z160_regspace.mmio_virt_base;
+ config->regspace.mmio_phys_base = (unsigned int) hal->z160_regspace.mmio_phys_base;
+ config->regspace.sizebytes = GSL_HAL_SIZE_REG_G12;
+
+ mmu_config.f.mmu_enable = 1;
+
+ if (gsl_driver.enable_mmu) {
+ config->mmu_config = 0x00555551;
+ config->va_base = hal->memspace[GSL_HAL_MEM1].gpu_base;
+ config->va_range = hal->memspace[GSL_HAL_MEM1].sizebytes;
+ } else {
+ config->mmu_config = mmu_config.val;
+ config->va_base = 0x00000000;
+ config->va_range = 0x00000000;
+ }
+
+ config->mpu_base = 0x00000000; /* (unsigned int) hal->memchunk.mmio_virt_base; */
+ config->mpu_range = 0xFFFFF000; /* hal->memchunk.sizebytes; */
+
+ status = GSL_SUCCESS;
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ return status;
+}
+
+/*----------------------------------------------------------------------------
+ * kgsl_hal_getchipid
+ *
+ * The proper platform method, build from RBBM_PERIPHIDx and RBBM_PATCH_RELEASE
+ *----------------------------------------------------------------------------
+ */
+KGSLHAL_API gsl_chipid_t
+kgsl_hal_getchipid(gsl_deviceid_t device_id)
+{
+ gsl_hal_t *hal = (gsl_hal_t *) gsl_driver.hal;
+ gsl_device_t *device = &gsl_driver.device[device_id-1];
+ gsl_chipid_t chipid = 0;
+ unsigned int coreid, majorid, minorid, patchid, revid;
+
+ if (hal->has_z430 && (device_id == GSL_DEVICE_YAMATO)) {
+ device->ftbl.device_regread(device, mmRBBM_PERIPHID1, &coreid);
+ coreid &= 0xF;
+
+ device->ftbl.device_regread(device, mmRBBM_PERIPHID2, &majorid);
+ majorid = (majorid >> 4) & 0xF;
+
+ device->ftbl.device_regread(device, mmRBBM_PATCH_RELEASE, &revid);
+
+ minorid = ((revid >> 0) & 0xFF); /* this is a 16bit field, but extremely unlikely it would ever get this high */
+
+ patchid = ((revid >> 16) & 0xFF);
+
+ chipid = ((coreid << 24) | (majorid << 16) | (minorid << 8) | (patchid << 0));
+ }
+
+ return chipid;
+}
+
+/* --------------------------------------------------------------------------- */
+
+KGSLHAL_API int
+kgsl_hal_setpowerstate(gsl_deviceid_t device_id, int state, unsigned int value)
+{
+ gsl_device_t *device = &gsl_driver.device[device_id-1];
+ struct clk *gpu_clk = NULL;
+ struct clk *garb_clk = NULL;
+ struct clk *emi_garb_clk = NULL;
+
+ /* unreferenced formal parameters */
+ (void) value;
+
+ switch (device_id) {
+ case GSL_DEVICE_G12:
+ gpu_clk = clk_get(0, "gpu2d_clk");
+ break;
+ case GSL_DEVICE_YAMATO:
+ gpu_clk = clk_get(0, "gpu3d_clk");
+ garb_clk = clk_get(0, "garb_clk");
+ emi_garb_clk = clk_get(0, "emi_garb_clk");
+ break;
+ default:
+ return GSL_FAILURE_DEVICEERROR;
+ }
+
+ if (!gpu_clk) {
+ return GSL_FAILURE_DEVICEERROR;
+ }
+
+ switch (state) {
+ case GSL_PWRFLAGS_CLK_ON:
+ break;
+ case GSL_PWRFLAGS_POWER_ON:
+ clk_enable(gpu_clk);
+ if (garb_clk) {
+ clk_enable(garb_clk);
+ }
+ if (emi_garb_clk) {
+ clk_enable(emi_garb_clk);
+ }
+ kgsl_device_autogate_init(&gsl_driver.device[device_id-1]);
+ break;
+ case GSL_PWRFLAGS_CLK_OFF:
+ break;
+ case GSL_PWRFLAGS_POWER_OFF:
+ if (device->ftbl.device_idle(device, GSL_TIMEOUT_DEFAULT) != GSL_SUCCESS) {
+ return GSL_FAILURE_DEVICEERROR;
+ }
+ kgsl_device_autogate_exit(&gsl_driver.device[device_id-1]);
+ clk_disable(gpu_clk);
+ if (garb_clk) {
+ clk_disable(garb_clk);
+ }
+ if (emi_garb_clk) {
+ clk_disable(emi_garb_clk);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return GSL_SUCCESS;
+}
+
+KGSLHAL_API int kgsl_clock(gsl_deviceid_t dev, int enable)
+{
+ struct clk *gpu_clk = NULL;
+ struct clk *garb_clk = NULL;
+ struct clk *emi_garb_clk = NULL;
+
+ switch (dev) {
+ case GSL_DEVICE_G12:
+ gpu_clk = clk_get(0, "gpu2d_clk");
+ break;
+ case GSL_DEVICE_YAMATO:
+ gpu_clk = clk_get(0, "gpu3d_clk");
+ garb_clk = clk_get(0, "garb_clk");
+ emi_garb_clk = clk_get(0, "emi_garb_clk");
+ break;
+ default:
+ printk(KERN_ERR "GPU device %d is invalid!\n", dev);
+ return GSL_FAILURE_DEVICEERROR;
+ }
+
+ if (IS_ERR(gpu_clk)) {
+ printk(KERN_ERR "%s: GPU clock get failed!\n", __func__);
+ return GSL_FAILURE_DEVICEERROR;
+ }
+
+ if (enable) {
+ clk_enable(gpu_clk);
+ if (garb_clk) {
+ clk_enable(garb_clk);
+ }
+ if (emi_garb_clk) {
+ clk_enable(emi_garb_clk);
+ }
+ } else {
+ clk_disable(gpu_clk);
+ if (garb_clk) {
+ clk_disable(garb_clk);
+ }
+ if (emi_garb_clk) {
+ clk_disable(emi_garb_clk);
+ }
+ }
+
+ return GSL_SUCCESS;
+}
diff --git a/drivers/mxc/amd-gpu/platform/hal/linux/gsl_hwaccess.h b/drivers/mxc/amd-gpu/platform/hal/linux/gsl_hwaccess.h
new file mode 100644
index 000000000000..305b2ee9066d
--- /dev/null
+++ b/drivers/mxc/amd-gpu/platform/hal/linux/gsl_hwaccess.h
@@ -0,0 +1,142 @@
+/* Copyright (c) 2008-2010, Advanced Micro Devices. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __GSL_HWACCESS_LINUX_H
+#define __GSL_HWACCESS_LINUX_H
+
+#ifdef _LINUX
+#include "gsl_linux_map.h"
+#endif
+
+#include <linux/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+OSINLINE void
+kgsl_hwaccess_memread(void *dst, unsigned int gpubase, unsigned int gpuoffset, unsigned int sizebytes, unsigned int touserspace)
+{
+ if (gsl_driver.enable_mmu && (gpubase >= GSL_LINUX_MAP_RANGE_START) && (gpubase < GSL_LINUX_MAP_RANGE_END)) {
+ gsl_linux_map_read(dst, gpubase+gpuoffset, sizebytes, touserspace);
+ } else {
+ mb();
+ dsb();
+ if (touserspace)
+ {
+ if (copy_to_user(dst, (void *)(gpubase + gpuoffset), sizebytes))
+ {
+ return;
+ }
+ }
+ else
+ {
+ kos_memcpy(dst, (void *) (gpubase + gpuoffset), sizebytes);
+ }
+ mb();
+ dsb();
+ }
+}
+
+//----------------------------------------------------------------------------
+
+OSINLINE void
+kgsl_hwaccess_memwrite(unsigned int gpubase, unsigned int gpuoffset, void *src, unsigned int sizebytes, unsigned int fromuserspace)
+{
+ if (gsl_driver.enable_mmu && (gpubase >= GSL_LINUX_MAP_RANGE_START) && (gpubase < GSL_LINUX_MAP_RANGE_END)) {
+ gsl_linux_map_write(src, gpubase+gpuoffset, sizebytes, fromuserspace);
+ } else {
+ mb();
+ dsb();
+ if (fromuserspace)
+ {
+ if (copy_from_user((void *)(gpubase + gpuoffset), src, sizebytes))
+ {
+ return;
+ }
+ }
+ else
+ {
+ kos_memcpy((void *)(gpubase + gpuoffset), src, sizebytes);
+ }
+ mb();
+ dsb();
+ }
+}
+
+//----------------------------------------------------------------------------
+
+OSINLINE void
+kgsl_hwaccess_memset(unsigned int gpubase, unsigned int gpuoffset, unsigned int value, unsigned int sizebytes)
+{
+ if (gsl_driver.enable_mmu && (gpubase >= GSL_LINUX_MAP_RANGE_START) && (gpubase < GSL_LINUX_MAP_RANGE_END)) {
+ gsl_linux_map_set(gpuoffset+gpubase, value, sizebytes);
+ } else {
+ mb();
+ dsb();
+ kos_memset((void *)(gpubase + gpuoffset), value, sizebytes);
+ mb();
+ dsb();
+ }
+}
+
+//----------------------------------------------------------------------------
+
+OSINLINE void
+kgsl_hwaccess_regread(gsl_deviceid_t device_id, unsigned int gpubase, unsigned int offsetwords, unsigned int *data)
+{
+ unsigned int *reg;
+
+ // unreferenced formal parameter
+ (void) device_id;
+
+ reg = (unsigned int *)(gpubase + (offsetwords << 2));
+
+ mb();
+ dsb();
+ *data = __raw_readl(reg);
+ mb();
+ dsb();
+}
+
+//----------------------------------------------------------------------------
+
+OSINLINE void
+kgsl_hwaccess_regwrite(gsl_deviceid_t device_id, unsigned int gpubase, unsigned int offsetwords, unsigned int data)
+{
+ unsigned int *reg;
+
+ // unreferenced formal parameter
+ (void) device_id;
+
+ reg = (unsigned int *)(gpubase + (offsetwords << 2));
+ mb();
+ dsb();
+ __raw_writel(data, reg);
+ mb();
+ dsb();
+}
+#endif // __GSL_HWACCESS_WINCE_MX51_H
diff --git a/drivers/mxc/amd-gpu/platform/hal/linux/gsl_kmod.c b/drivers/mxc/amd-gpu/platform/hal/linux/gsl_kmod.c
new file mode 100644
index 000000000000..bef8684ad131
--- /dev/null
+++ b/drivers/mxc/amd-gpu/platform/hal/linux/gsl_kmod.c
@@ -0,0 +1,964 @@
+/* Copyright (c) 2008-2010, Advanced Micro Devices. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "gsl_types.h"
+#include "gsl.h"
+#include "gsl_buildconfig.h"
+#include "gsl_halconfig.h"
+#include "gsl_ioctl.h"
+#include "gsl_kmod_cleanup.h"
+#include "gsl_linux_map.h"
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <asm/uaccess.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/cdev.h>
+
+#include <linux/platform_device.h>
+#include <linux/vmalloc.h>
+
+int gpu_2d_irq, gpu_3d_irq;
+
+phys_addr_t gpu_2d_regbase;
+int gpu_2d_regsize;
+phys_addr_t gpu_3d_regbase;
+int gpu_3d_regsize;
+int gmem_size;
+phys_addr_t gpu_reserved_mem;
+int gpu_reserved_mem_size;
+int z160_version;
+
+static ssize_t gsl_kmod_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr);
+static ssize_t gsl_kmod_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr);
+static int gsl_kmod_ioctl(struct inode *inode, struct file *fd, unsigned int cmd, unsigned long arg);
+static int gsl_kmod_mmap(struct file *fd, struct vm_area_struct *vma);
+static int gsl_kmod_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+static int gsl_kmod_open(struct inode *inode, struct file *fd);
+static int gsl_kmod_release(struct inode *inode, struct file *fd);
+static irqreturn_t z160_irq_handler(int irq, void *dev_id);
+static irqreturn_t z430_irq_handler(int irq, void *dev_id);
+
+static int gsl_kmod_major;
+static struct class *gsl_kmod_class;
+DEFINE_MUTEX(gsl_mutex);
+
+static const struct file_operations gsl_kmod_fops =
+{
+ .owner = THIS_MODULE,
+ .read = gsl_kmod_read,
+ .write = gsl_kmod_write,
+ .ioctl = gsl_kmod_ioctl,
+ .mmap = gsl_kmod_mmap,
+ .open = gsl_kmod_open,
+ .release = gsl_kmod_release
+};
+
+static struct vm_operations_struct gsl_kmod_vmops =
+{
+ .fault = gsl_kmod_fault,
+};
+
+static ssize_t gsl_kmod_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+{
+ return 0;
+}
+
+static ssize_t gsl_kmod_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+{
+ return 0;
+}
+
+static int gsl_kmod_ioctl(struct inode *inode, struct file *fd, unsigned int cmd, unsigned long arg)
+{
+ int kgslStatus = GSL_FAILURE;
+
+ switch (cmd) {
+ case IOCTL_KGSL_DEVICE_START:
+ {
+ kgsl_device_start_t param;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_start_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_device_start(param.device_id, param.flags);
+ break;
+ }
+ case IOCTL_KGSL_DEVICE_STOP:
+ {
+ kgsl_device_stop_t param;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_stop_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_device_stop(param.device_id);
+ break;
+ }
+ case IOCTL_KGSL_DEVICE_IDLE:
+ {
+ kgsl_device_idle_t param;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_idle_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_device_idle(param.device_id, param.timeout);
+ break;
+ }
+ case IOCTL_KGSL_DEVICE_GETPROPERTY:
+ {
+ kgsl_device_getproperty_t param;
+ void *tmp;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_getproperty_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ tmp = kmalloc(param.sizebytes, GFP_KERNEL);
+ if (!tmp)
+ {
+ printk(KERN_ERR "%s:kmalloc error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_device_getproperty(param.device_id, param.type, tmp, param.sizebytes);
+ if (kgslStatus == GSL_SUCCESS)
+ {
+ if (copy_to_user(param.value, tmp, param.sizebytes))
+ {
+ printk(KERN_ERR "%s: copy_to_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ kfree(tmp);
+ break;
+ }
+ }
+ else
+ {
+ printk(KERN_ERR "%s: kgsl_device_getproperty error\n", __func__);
+ }
+ kfree(tmp);
+ break;
+ }
+ case IOCTL_KGSL_DEVICE_SETPROPERTY:
+ {
+ kgsl_device_setproperty_t param;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_setproperty_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_device_setproperty(param.device_id, param.type, param.value, param.sizebytes);
+ if (kgslStatus != GSL_SUCCESS)
+ {
+ printk(KERN_ERR "%s: kgsl_device_setproperty error\n", __func__);
+ }
+ break;
+ }
+ case IOCTL_KGSL_DEVICE_REGREAD:
+ {
+ kgsl_device_regread_t param;
+ unsigned int tmp;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_regread_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_device_regread(param.device_id, param.offsetwords, &tmp);
+ if (kgslStatus == GSL_SUCCESS)
+ {
+ if (copy_to_user(param.value, &tmp, sizeof(unsigned int)))
+ {
+ printk(KERN_ERR "%s: copy_to_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ }
+ break;
+ }
+ case IOCTL_KGSL_DEVICE_REGWRITE:
+ {
+ kgsl_device_regwrite_t param;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_regwrite_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_device_regwrite(param.device_id, param.offsetwords, param.value);
+ break;
+ }
+ case IOCTL_KGSL_DEVICE_WAITIRQ:
+ {
+ kgsl_device_waitirq_t param;
+ unsigned int count;
+
+ printk(KERN_ERR "IOCTL_KGSL_DEVICE_WAITIRQ obsoleted!\n");
+// kgslStatus = -ENOTTY; break;
+
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_waitirq_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_device_waitirq(param.device_id, param.intr_id, &count, param.timeout);
+ if (kgslStatus == GSL_SUCCESS)
+ {
+ if (copy_to_user(param.count, &count, sizeof(unsigned int)))
+ {
+ printk(KERN_ERR "%s: copy_to_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ }
+ break;
+ }
+ case IOCTL_KGSL_CMDSTREAM_ISSUEIBCMDS:
+ {
+ kgsl_cmdstream_issueibcmds_t param;
+ gsl_timestamp_t tmp;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_cmdstream_issueibcmds_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_cmdstream_issueibcmds(param.device_id, param.drawctxt_index, param.ibaddr, param.sizedwords, &tmp, param.flags);
+ if (kgslStatus == GSL_SUCCESS)
+ {
+ if (copy_to_user(param.timestamp, &tmp, sizeof(gsl_timestamp_t)))
+ {
+ printk(KERN_ERR "%s: copy_to_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ }
+ break;
+ }
+ case IOCTL_KGSL_CMDSTREAM_READTIMESTAMP:
+ {
+ kgsl_cmdstream_readtimestamp_t param;
+ gsl_timestamp_t tmp;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_cmdstream_readtimestamp_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ tmp = kgsl_cmdstream_readtimestamp(param.device_id, param.type);
+ if (copy_to_user(param.timestamp, &tmp, sizeof(gsl_timestamp_t)))
+ {
+ printk(KERN_ERR "%s: copy_to_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = GSL_SUCCESS;
+ break;
+ }
+ case IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP:
+ {
+ int err;
+ kgsl_cmdstream_freememontimestamp_t param;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_cmdstream_freememontimestamp_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ err = del_memblock_from_allocated_list(fd, param.memdesc);
+ if(err)
+ {
+ /* tried to remove a block of memory that is not allocated!
+ * NOTE that -EINVAL is Linux kernel's error codes!
+ * the drivers error codes COULD mix up with kernel's. */
+ kgslStatus = -EINVAL;
+ }
+ else
+ {
+ kgslStatus = kgsl_cmdstream_freememontimestamp(param.device_id,
+ param.memdesc,
+ param.timestamp,
+ param.type);
+ }
+ break;
+ }
+ case IOCTL_KGSL_CMDSTREAM_WAITTIMESTAMP:
+ {
+ kgsl_cmdstream_waittimestamp_t param;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_cmdstream_waittimestamp_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_cmdstream_waittimestamp(param.device_id, param.timestamp, param.timeout);
+ break;
+ }
+ case IOCTL_KGSL_CMDWINDOW_WRITE:
+ {
+ kgsl_cmdwindow_write_t param;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_cmdwindow_write_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_cmdwindow_write(param.device_id, param.target, param.addr, param.data);
+ break;
+ }
+ case IOCTL_KGSL_CONTEXT_CREATE:
+ {
+ kgsl_context_create_t param;
+ unsigned int tmp;
+ int tmpStatus;
+
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_context_create_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_context_create(param.device_id, param.type, &tmp, param.flags);
+ if (kgslStatus == GSL_SUCCESS)
+ {
+ if (copy_to_user(param.drawctxt_id, &tmp, sizeof(unsigned int)))
+ {
+ tmpStatus = kgsl_context_destroy(param.device_id, tmp);
+ /* is asserting ok? Basicly we should return the error from copy_to_user
+ * but will the user space interpret it correctly? Will the user space
+ * always check against GSL_SUCCESS or GSL_FAILURE as they are not the only
+ * return values.
+ */
+ KOS_ASSERT(tmpStatus == GSL_SUCCESS);
+ printk(KERN_ERR "%s: copy_to_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ else
+ {
+ add_device_context_to_array(fd, param.device_id, tmp);
+ }
+ }
+ break;
+ }
+ case IOCTL_KGSL_CONTEXT_DESTROY:
+ {
+ kgsl_context_destroy_t param;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_context_destroy_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_context_destroy(param.device_id, param.drawctxt_id);
+ del_device_context_from_array(fd, param.device_id, param.drawctxt_id);
+ break;
+ }
+ case IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW:
+ {
+ kgsl_drawctxt_bind_gmem_shadow_t param;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_drawctxt_bind_gmem_shadow_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_drawctxt_bind_gmem_shadow(param.device_id, param.drawctxt_id, param.gmem_rect, param.shadow_x, param.shadow_y, param.shadow_buffer, param.buffer_id);
+ break;
+ }
+ case IOCTL_KGSL_SHAREDMEM_ALLOC:
+ {
+ kgsl_sharedmem_alloc_t param;
+ gsl_memdesc_t tmp;
+ int tmpStatus;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_sharedmem_alloc_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_sharedmem_alloc(param.device_id, param.flags, param.sizebytes, &tmp);
+ if (kgslStatus == GSL_SUCCESS)
+ {
+ if (copy_to_user(param.memdesc, &tmp, sizeof(gsl_memdesc_t)))
+ {
+ tmpStatus = kgsl_sharedmem_free(&tmp);
+ KOS_ASSERT(tmpStatus == GSL_SUCCESS);
+ printk(KERN_ERR "%s: copy_to_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ else
+ {
+ add_memblock_to_allocated_list(fd, &tmp);
+ }
+ }
+ break;
+ }
+ case IOCTL_KGSL_SHAREDMEM_FREE:
+ {
+ kgsl_sharedmem_free_t param;
+ gsl_memdesc_t tmp;
+ int err;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_sharedmem_free_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ if (copy_from_user(&tmp, (void __user *)param.memdesc, sizeof(gsl_memdesc_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ err = del_memblock_from_allocated_list(fd, &tmp);
+ if(err)
+ {
+ printk(KERN_ERR "%s: tried to free memdesc that was not allocated!\n", __func__);
+ kgslStatus = err;
+ break;
+ }
+ kgslStatus = kgsl_sharedmem_free(&tmp);
+ if (kgslStatus == GSL_SUCCESS)
+ {
+ if (copy_to_user(param.memdesc, &tmp, sizeof(gsl_memdesc_t)))
+ {
+ printk(KERN_ERR "%s: copy_to_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ }
+ break;
+ }
+ case IOCTL_KGSL_SHAREDMEM_READ:
+ {
+ kgsl_sharedmem_read_t param;
+ gsl_memdesc_t memdesc;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_sharedmem_read_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ if (copy_from_user(&memdesc, (void __user *)param.memdesc, sizeof(gsl_memdesc_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_sharedmem_read(&memdesc, param.dst, param.offsetbytes, param.sizebytes, true);
+ if (kgslStatus != GSL_SUCCESS)
+ {
+ printk(KERN_ERR "%s: kgsl_sharedmem_read failed\n", __func__);
+ }
+ break;
+ }
+ case IOCTL_KGSL_SHAREDMEM_WRITE:
+ {
+ kgsl_sharedmem_write_t param;
+ gsl_memdesc_t memdesc;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_sharedmem_write_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ if (copy_from_user(&memdesc, (void __user *)param.memdesc, sizeof(gsl_memdesc_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_sharedmem_write(&memdesc, param.offsetbytes, param.src, param.sizebytes, true);
+ if (kgslStatus != GSL_SUCCESS)
+ {
+ printk(KERN_ERR "%s: kgsl_sharedmem_write failed\n", __func__);
+ }
+
+ break;
+ }
+ case IOCTL_KGSL_SHAREDMEM_SET:
+ {
+ kgsl_sharedmem_set_t param;
+ gsl_memdesc_t memdesc;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_sharedmem_set_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ if (copy_from_user(&memdesc, (void __user *)param.memdesc, sizeof(gsl_memdesc_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_sharedmem_set(&memdesc, param.offsetbytes, param.value, param.sizebytes);
+ break;
+ }
+ case IOCTL_KGSL_SHAREDMEM_LARGESTFREEBLOCK:
+ {
+ kgsl_sharedmem_largestfreeblock_t param;
+ unsigned int largestfreeblock;
+
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_sharedmem_largestfreeblock_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ largestfreeblock = kgsl_sharedmem_largestfreeblock(param.device_id, param.flags);
+ if (copy_to_user(param.largestfreeblock, &largestfreeblock, sizeof(unsigned int)))
+ {
+ printk(KERN_ERR "%s: copy_to_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = GSL_SUCCESS;
+ break;
+ }
+ case IOCTL_KGSL_SHAREDMEM_CACHEOPERATION:
+ {
+ kgsl_sharedmem_cacheoperation_t param;
+ gsl_memdesc_t memdesc;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_sharedmem_cacheoperation_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ if (copy_from_user(&memdesc, (void __user *)param.memdesc, sizeof(gsl_memdesc_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_sharedmem_cacheoperation(&memdesc, param.offsetbytes, param.sizebytes, param.operation);
+ break;
+ }
+ case IOCTL_KGSL_SHAREDMEM_FROMHOSTPOINTER:
+ {
+ kgsl_sharedmem_fromhostpointer_t param;
+ gsl_memdesc_t memdesc;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_sharedmem_fromhostpointer_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ if (copy_from_user(&memdesc, (void __user *)param.memdesc, sizeof(gsl_memdesc_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_sharedmem_fromhostpointer(param.device_id, &memdesc, param.hostptr);
+ break;
+ }
+ case IOCTL_KGSL_ADD_TIMESTAMP:
+ {
+ kgsl_add_timestamp_t param;
+ gsl_timestamp_t tmp;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_add_timestamp_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ tmp = kgsl_add_timestamp(param.device_id, &tmp);
+ if (copy_to_user(param.timestamp, &tmp, sizeof(gsl_timestamp_t)))
+ {
+ printk(KERN_ERR "%s: copy_to_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = GSL_SUCCESS;
+ break;
+ }
+
+ case IOCTL_KGSL_DEVICE_CLOCK:
+ {
+ kgsl_device_clock_t param;
+ if (copy_from_user(&param, (void __user *)arg, sizeof(kgsl_device_clock_t)))
+ {
+ printk(KERN_ERR "%s: copy_from_user error\n", __func__);
+ kgslStatus = GSL_FAILURE;
+ break;
+ }
+ kgslStatus = kgsl_device_clock(param.device, param.enable);
+ break;
+ }
+ default:
+ kgslStatus = -ENOTTY;
+ break;
+ }
+
+ return kgslStatus;
+}
+
+static int gsl_kmod_mmap(struct file *fd, struct vm_area_struct *vma)
+{
+ int status = 0;
+ unsigned long start = vma->vm_start;
+ unsigned long pfn = vma->vm_pgoff;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ unsigned long prot = pgprot_writecombine(vma->vm_page_prot);
+ unsigned long addr = vma->vm_pgoff << PAGE_SHIFT;
+ void *va = NULL;
+
+ if (gsl_driver.enable_mmu && (addr < GSL_LINUX_MAP_RANGE_END) && (addr >= GSL_LINUX_MAP_RANGE_START)) {
+ va = gsl_linux_map_find(addr);
+ while (size > 0) {
+ if (remap_pfn_range(vma, start, vmalloc_to_pfn(va), PAGE_SIZE, prot)) {
+ return -EAGAIN;
+ }
+ start += PAGE_SIZE;
+ va += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ } else {
+ if (remap_pfn_range(vma, start, pfn, size, prot)) {
+ status = -EAGAIN;
+ }
+ }
+
+ vma->vm_ops = &gsl_kmod_vmops;
+
+ return status;
+}
+
+static int gsl_kmod_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ return VM_FAULT_SIGBUS;
+}
+
+static int gsl_kmod_open(struct inode *inode, struct file *fd)
+{
+ gsl_flags_t flags = 0;
+ struct gsl_kmod_per_fd_data *datp;
+ int err = 0;
+
+ if(mutex_lock_interruptible(&gsl_mutex))
+ {
+ return -EINTR;
+ }
+
+ if (kgsl_driver_entry(flags) != GSL_SUCCESS)
+ {
+ printk(KERN_INFO "%s: kgsl_driver_entry error\n", __func__);
+ err = -EIO; // TODO: not sure why did it fail?
+ }
+ else
+ {
+ /* allocate per file descriptor data structure */
+ datp = (struct gsl_kmod_per_fd_data *)kzalloc(
+ sizeof(struct gsl_kmod_per_fd_data),
+ GFP_KERNEL);
+ if(datp)
+ {
+ init_created_contexts_array(datp->created_contexts_array[0]);
+ INIT_LIST_HEAD(&datp->allocated_blocks_head);
+
+ fd->private_data = (void *)datp;
+ }
+ else
+ {
+ err = -ENOMEM;
+ }
+ }
+
+ mutex_unlock(&gsl_mutex);
+
+ return err;
+}
+
+static int gsl_kmod_release(struct inode *inode, struct file *fd)
+{
+ struct gsl_kmod_per_fd_data *datp;
+ int err = 0;
+
+ if(mutex_lock_interruptible(&gsl_mutex))
+ {
+ return -EINTR;
+ }
+
+ /* make sure contexts are destroyed */
+ del_all_devices_contexts(fd);
+
+ if (kgsl_driver_exit() != GSL_SUCCESS)
+ {
+ printk(KERN_INFO "%s: kgsl_driver_exit error\n", __func__);
+ err = -EIO; // TODO: find better error code
+ }
+ else
+ {
+ /* release per file descriptor data structure */
+ datp = (struct gsl_kmod_per_fd_data *)fd->private_data;
+ del_all_memblocks_from_allocated_list(fd);
+ kfree(datp);
+ fd->private_data = 0;
+ }
+
+ mutex_unlock(&gsl_mutex);
+
+ return err;
+}
+
+static struct class *gsl_kmod_class;
+
+static irqreturn_t z160_irq_handler(int irq, void *dev_id)
+{
+ kgsl_intr_isr();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t z430_irq_handler(int irq, void *dev_id)
+{
+ kgsl_intr_isr();
+ return IRQ_HANDLED;
+}
+
+static int gpu_probe(struct platform_device *pdev)
+{
+ int i;
+ struct resource *res;
+ struct device *dev;
+
+ if (pdev->dev.platform_data)
+ z160_version = *((int *)(pdev->dev.platform_data));
+ else
+ z160_version = 0;
+
+ for(i = 0; i < 2; i++){
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ if (!res) {
+ if (i == 0) {
+ printk(KERN_ERR "gpu: unable to get gpu irq\n");
+ return -ENODEV;
+ } else {
+ break;
+ }
+ }
+ if(strcmp(res->name, "gpu_2d_irq") == 0){
+ gpu_2d_irq = res->start;
+ }else if(strcmp(res->name, "gpu_3d_irq") == 0){
+ gpu_3d_irq = res->start;
+ }
+ }
+
+ for(i = 0; i < 4; i++){
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res) {
+ gpu_2d_regbase = 0;
+ gpu_2d_regsize = 0;
+ gpu_3d_regbase = 0;
+ gpu_2d_regsize = 0;
+ gmem_size = 0;
+ gpu_reserved_mem = 0;
+ gpu_reserved_mem_size = 0;
+ break;
+ }else{
+ if(strcmp(res->name, "gpu_2d_registers") == 0){
+ gpu_2d_regbase = res->start;
+ gpu_2d_regsize = res->end - res->start + 1;
+ }else if(strcmp(res->name, "gpu_3d_registers") == 0){
+ gpu_3d_regbase = res->start;
+ gpu_3d_regsize = res->end - res->start + 1;
+ }else if(strcmp(res->name, "gpu_graphics_mem") == 0){
+ gmem_size = res->end - res->start + 1;
+ }else if(strcmp(res->name, "gpu_reserved_mem") == 0){
+ gpu_reserved_mem = res->start;
+ gpu_reserved_mem_size = res->end - res->start + 1;
+ }
+ }
+ }
+
+ if (gpu_3d_irq > 0)
+ {
+ if (request_irq(gpu_3d_irq, z430_irq_handler, 0, "ydx", NULL) < 0) {
+ printk(KERN_ERR "%s: request_irq error\n", __func__);
+ gpu_3d_irq = 0;
+ goto request_irq_error;
+ }
+ }
+
+ if (gpu_2d_irq > 0)
+ {
+ if (request_irq(gpu_2d_irq, z160_irq_handler, 0, "g12", NULL) < 0) {
+ printk(KERN_ERR "2D Acceleration Enabled, OpenVG Disabled!\n");
+ gpu_2d_irq = 0;
+ }
+ }
+
+ if (kgsl_driver_init() != GSL_SUCCESS) {
+ printk(KERN_ERR "%s: kgsl_driver_init error\n", __func__);
+ goto kgsl_driver_init_error;
+ }
+
+ gsl_kmod_major = register_chrdev(0, "gsl_kmod", &gsl_kmod_fops);
+ gsl_kmod_vmops.fault = gsl_kmod_fault;
+
+ if (gsl_kmod_major <= 0)
+ {
+ pr_err("%s: register_chrdev error\n", __func__);
+ goto register_chrdev_error;
+ }
+
+ gsl_kmod_class = class_create(THIS_MODULE, "gsl_kmod");
+
+ if (IS_ERR(gsl_kmod_class))
+ {
+ pr_err("%s: class_create error\n", __func__);
+ goto class_create_error;
+ }
+
+ #if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28))
+ dev = device_create(gsl_kmod_class, NULL, MKDEV(gsl_kmod_major, 0), "gsl_kmod");
+ #else
+ dev = device_create(gsl_kmod_class, NULL, MKDEV(gsl_kmod_major, 0), NULL,"gsl_kmod");
+ #endif
+
+ if (!IS_ERR(dev))
+ {
+ // gsl_kmod_data.device = dev;
+ return 0;
+ }
+
+ pr_err("%s: device_create error\n", __func__);
+
+class_create_error:
+ class_destroy(gsl_kmod_class);
+
+register_chrdev_error:
+ unregister_chrdev(gsl_kmod_major, "gsl_kmod");
+
+kgsl_driver_init_error:
+ kgsl_driver_close();
+ if (gpu_2d_irq > 0) {
+ free_irq(gpu_2d_irq, NULL);
+ }
+ if (gpu_3d_irq > 0) {
+ free_irq(gpu_3d_irq, NULL);
+ }
+request_irq_error:
+ return 0; // TODO: return proper error code
+}
+
+static int gpu_remove(struct platform_device *pdev)
+{
+ device_destroy(gsl_kmod_class, MKDEV(gsl_kmod_major, 0));
+ class_destroy(gsl_kmod_class);
+ unregister_chrdev(gsl_kmod_major, "gsl_kmod");
+
+ if (gpu_3d_irq)
+ {
+ free_irq(gpu_3d_irq, NULL);
+ }
+
+ if (gpu_2d_irq)
+ {
+ free_irq(gpu_2d_irq, NULL);
+ }
+
+ kgsl_driver_close();
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int gpu_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int i;
+ gsl_powerprop_t power;
+
+ power.flags = GSL_PWRFLAGS_POWER_OFF;
+ for (i = 0; i < GSL_DEVICE_MAX; i++)
+ {
+ kgsl_device_setproperty(
+ (gsl_deviceid_t) (i+1),
+ GSL_PROP_DEVICE_POWER,
+ &power,
+ sizeof(gsl_powerprop_t));
+ }
+
+ return 0;
+}
+
+static int gpu_resume(struct platform_device *pdev)
+{
+ int i;
+ gsl_powerprop_t power;
+
+ power.flags = GSL_PWRFLAGS_POWER_ON;
+ for (i = 0; i < GSL_DEVICE_MAX; i++)
+ {
+ kgsl_device_setproperty(
+ (gsl_deviceid_t) (i+1),
+ GSL_PROP_DEVICE_POWER,
+ &power,
+ sizeof(gsl_powerprop_t));
+ }
+
+ return 0;
+}
+#else
+#define gpu_suspend NULL
+#define gpu_resume NULL
+#endif /* !CONFIG_PM */
+
+/*! Driver definition
+ */
+static struct platform_driver gpu_driver = {
+ .driver = {
+ .name = "mxc_gpu",
+ },
+ .probe = gpu_probe,
+ .remove = gpu_remove,
+ .suspend = gpu_suspend,
+ .resume = gpu_resume,
+};
+
+static int __init gsl_kmod_init(void)
+{
+ return platform_driver_register(&gpu_driver);
+}
+
+static void __exit gsl_kmod_exit(void)
+{
+ platform_driver_unregister(&gpu_driver);
+}
+
+module_init(gsl_kmod_init);
+module_exit(gsl_kmod_exit);
+MODULE_AUTHOR("Advanced Micro Devices");
+MODULE_DESCRIPTION("AMD graphics core driver for i.MX");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mxc/amd-gpu/platform/hal/linux/gsl_kmod_cleanup.c b/drivers/mxc/amd-gpu/platform/hal/linux/gsl_kmod_cleanup.c
new file mode 100644
index 000000000000..3685a5756baf
--- /dev/null
+++ b/drivers/mxc/amd-gpu/platform/hal/linux/gsl_kmod_cleanup.c
@@ -0,0 +1,269 @@
+/* Copyright (c) 2008-2010, Advanced Micro Devices. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "gsl.h"
+#include "gsl_kmod_cleanup.h"
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+
+/*
+ * Local helper functions to check and convert device/context id's (1 based)
+ * to index (0 based).
+ */
+static u32 device_id_to_device_index(gsl_deviceid_t device_id)
+{
+ KOS_ASSERT((GSL_DEVICE_ANY < device_id) &&
+ (device_id <= GSL_DEVICE_MAX));
+ return (u32)(device_id - 1);
+}
+
+/*
+ * Local helper function to check and get pointer to per file descriptor data
+ */
+static struct gsl_kmod_per_fd_data *get_fd_private_data(struct file *fd)
+{
+ struct gsl_kmod_per_fd_data *datp;
+
+ KOS_ASSERT(fd);
+ datp = (struct gsl_kmod_per_fd_data *)fd->private_data;
+ KOS_ASSERT(datp);
+ return datp;
+}
+
+static s8 *find_first_entry_with(s8 *subarray, s8 context_id)
+{
+ s8 *entry = NULL;
+ int i;
+
+//printk(KERN_DEBUG "At %s, ctx_id = %d\n", __func__, context_id);
+
+ KOS_ASSERT(context_id >= EMPTY_ENTRY);
+ KOS_ASSERT(context_id <= GSL_CONTEXT_MAX); // TODO: check the bound.
+
+ for(i = 0; i < GSL_CONTEXT_MAX; i++) // TODO: check the bound.
+ {
+ if(subarray[i] == (s8)context_id)
+ {
+ entry = &subarray[i];
+ break;
+ }
+ }
+
+ return entry;
+}
+
+
+/*
+ * Add a memdesc into a list of allocated memory blocks for this file
+ * descriptor. The list is build in such a way that it implements FIFO (i.e.
+ * list). Traces of tiger, tiger_ri and VG11 CTs should be analysed to make
+ * informed choice.
+ *
+ * NOTE! gsl_memdesc_ts are COPIED so user space should NOT change them.
+ */
+int add_memblock_to_allocated_list(struct file *fd,
+ gsl_memdesc_t *allocated_block)
+{
+ int err = 0;
+ struct gsl_kmod_per_fd_data *datp;
+ struct gsl_kmod_alloc_list *lisp;
+ struct list_head *head;
+
+ KOS_ASSERT(allocated_block);
+
+ datp = get_fd_private_data(fd);
+
+ head = &datp->allocated_blocks_head;
+ KOS_ASSERT(head);
+
+ /* allocate and put new entry in the list of allocated memory descriptors */
+ lisp = (struct gsl_kmod_alloc_list *)kzalloc(sizeof(struct gsl_kmod_alloc_list), GFP_KERNEL);
+ if(lisp)
+ {
+ INIT_LIST_HEAD(&lisp->node);
+
+ /* builds FIFO (list_add() would build LIFO) */
+ list_add_tail(&lisp->node, head);
+ memcpy(&lisp->allocated_block, allocated_block, sizeof(gsl_memdesc_t));
+ lisp->allocation_number = datp->maximum_number_of_blocks;
+// printk(KERN_DEBUG "List entry #%u allocated\n", lisp->allocation_number);
+
+ datp->maximum_number_of_blocks++;
+ datp->number_of_allocated_blocks++;
+
+ err = 0;
+ }
+ else
+ {
+ printk(KERN_ERR "%s: Could not allocate new list element\n", __func__);
+ err = -ENOMEM;
+ }
+
+ return err;
+}
+
+/* Delete a previously allocated memdesc from a list of allocated memory blocks */
+int del_memblock_from_allocated_list(struct file *fd,
+ gsl_memdesc_t *freed_block)
+{
+ struct gsl_kmod_per_fd_data *datp;
+ struct gsl_kmod_alloc_list *cursor, *next;
+ struct list_head *head;
+// int is_different;
+
+ KOS_ASSERT(freed_block);
+
+ datp = get_fd_private_data(fd);
+
+ head = &datp->allocated_blocks_head;
+ KOS_ASSERT(head);
+
+ KOS_ASSERT(datp->number_of_allocated_blocks > 0);
+
+ if(!list_empty(head))
+ {
+ list_for_each_entry_safe(cursor, next, head, node)
+ {
+ if(cursor->allocated_block.gpuaddr == freed_block->gpuaddr)
+ {
+// is_different = memcmp(&cursor->allocated_block, freed_block, sizeof(gsl_memdesc_t));
+// KOS_ASSERT(!is_different);
+
+ list_del(&cursor->node);
+// printk(KERN_DEBUG "List entry #%u freed\n", cursor->allocation_number);
+ kfree(cursor);
+ datp->number_of_allocated_blocks--;
+ return 0;
+ }
+ }
+ }
+ return -EINVAL; // tried to free entry not existing or from empty list.
+}
+
+/* Delete all previously allocated memdescs from a list */
+int del_all_memblocks_from_allocated_list(struct file *fd)
+{
+ struct gsl_kmod_per_fd_data *datp;
+ struct gsl_kmod_alloc_list *cursor, *next;
+ struct list_head *head;
+
+ datp = get_fd_private_data(fd);
+
+ head = &datp->allocated_blocks_head;
+ KOS_ASSERT(head);
+
+ if(!list_empty(head))
+ {
+ printk(KERN_INFO "Not all allocated memory blocks were freed. Doing it now.\n");
+ list_for_each_entry_safe(cursor, next, head, node)
+ {
+ printk(KERN_INFO "Freeing list entry #%u, gpuaddr=%x\n", (u32)cursor->allocation_number, cursor->allocated_block.gpuaddr);
+ kgsl_sharedmem_free(&cursor->allocated_block);
+ list_del(&cursor->node);
+ kfree(cursor);
+ }
+ }
+
+ KOS_ASSERT(list_empty(head));
+ datp->number_of_allocated_blocks = 0;
+
+ return 0;
+}
+
+void init_created_contexts_array(s8 *array)
+{
+ memset((void*)array, EMPTY_ENTRY, GSL_DEVICE_MAX * GSL_CONTEXT_MAX);
+}
+
+
+void add_device_context_to_array(struct file *fd,
+ gsl_deviceid_t device_id,
+ unsigned int context_id)
+{
+ struct gsl_kmod_per_fd_data *datp;
+ s8 *entry;
+ s8 *subarray;
+ u32 device_index = device_id_to_device_index(device_id);
+
+ datp = get_fd_private_data(fd);
+
+ subarray = datp->created_contexts_array[device_index];
+ entry = find_first_entry_with(subarray, EMPTY_ENTRY);
+
+ KOS_ASSERT(entry);
+ KOS_ASSERT((datp->created_contexts_array[device_index] <= entry) &&
+ (entry < datp->created_contexts_array[device_index] + GSL_CONTEXT_MAX));
+ KOS_ASSERT(context_id < 127);
+ *entry = (s8)context_id;
+}
+
+void del_device_context_from_array(struct file *fd,
+ gsl_deviceid_t device_id,
+ unsigned int context_id)
+{
+ struct gsl_kmod_per_fd_data *datp;
+ u32 device_index = device_id_to_device_index(device_id);
+ s8 *entry;
+ s8 *subarray;
+
+ datp = get_fd_private_data(fd);
+
+ KOS_ASSERT(context_id < 127);
+ subarray = &(datp->created_contexts_array[device_index][0]);
+ entry = find_first_entry_with(subarray, context_id);
+ KOS_ASSERT(entry);
+ KOS_ASSERT((datp->created_contexts_array[device_index] <= entry) &&
+ (entry < datp->created_contexts_array[device_index] + GSL_CONTEXT_MAX));
+ *entry = EMPTY_ENTRY;
+}
+
+void del_all_devices_contexts(struct file *fd)
+{
+ struct gsl_kmod_per_fd_data *datp;
+ gsl_deviceid_t id;
+ u32 device_index;
+ u32 ctx_array_index;
+ s8 ctx;
+ int err;
+
+ datp = get_fd_private_data(fd);
+
+ /* device_id is 1 based */
+ for(id = GSL_DEVICE_ANY + 1; id <= GSL_DEVICE_MAX; id++)
+ {
+ device_index = device_id_to_device_index(id);
+ for(ctx_array_index = 0; ctx_array_index < GSL_CONTEXT_MAX; ctx_array_index++)
+ {
+ ctx = datp->created_contexts_array[device_index][ctx_array_index];
+ if(ctx != EMPTY_ENTRY)
+ {
+ err = kgsl_context_destroy(id, ctx);
+ if(err != GSL_SUCCESS)
+ {
+ printk(KERN_ERR "%s: could not destroy context %d on device id = %u\n", __func__, ctx, id);
+ }
+ else
+ {
+ printk(KERN_DEBUG "%s: Destroyed context %d on device id = %u\n", __func__, ctx, id);
+ }
+ }
+ }
+ }
+}
+
diff --git a/drivers/mxc/amd-gpu/platform/hal/linux/gsl_kmod_cleanup.h b/drivers/mxc/amd-gpu/platform/hal/linux/gsl_kmod_cleanup.h
new file mode 100644
index 000000000000..475ee3be2e50
--- /dev/null
+++ b/drivers/mxc/amd-gpu/platform/hal/linux/gsl_kmod_cleanup.h
@@ -0,0 +1,90 @@
+/* Copyright (c) 2008-2010, Advanced Micro Devices. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __GSL_KMOD_CLEANUP_H
+#define __GSL_KMOD_CLEANUP_H
+#include "gsl_types.h"
+
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+
+#if (GSL_CONTEXT_MAX > 127)
+ #error created_contexts_array supports context numbers only 127 or less.
+#endif
+
+static const s8 EMPTY_ENTRY = -1;
+
+/* A structure to make list of allocated memory blocks. List per fd. */
+/* should probably be allocated from slab cache to minimise fragmentation */
+struct gsl_kmod_alloc_list
+{
+ struct list_head node;
+ gsl_memdesc_t allocated_block;
+ u32 allocation_number;
+};
+
+/* A structure to hold abovementioned list of blocks. Contain per fd data. */
+struct gsl_kmod_per_fd_data
+{
+ struct list_head allocated_blocks_head; // list head
+ u32 maximum_number_of_blocks;
+ u32 number_of_allocated_blocks;
+ s8 created_contexts_array[GSL_DEVICE_MAX][GSL_CONTEXT_MAX];
+};
+
+
+/*
+ * prototypes
+ */
+
+/* allocated memory block tracking */
+int add_memblock_to_allocated_list(struct file *fd,
+ gsl_memdesc_t *allocated_block);
+
+int del_memblock_from_allocated_list(struct file *fd,
+ gsl_memdesc_t *freed_block);
+
+int del_all_memblocks_from_allocated_list(struct file *fd);
+
+/* created contexts tracking */
+void init_created_contexts_array(s8 *array);
+
+void add_device_context_to_array(struct file *fd,
+ gsl_deviceid_t device_id,
+ unsigned int context_id);
+
+void del_device_context_from_array(struct file *fd,
+ gsl_deviceid_t device_id,
+ unsigned int context_id);
+
+void del_all_devices_contexts(struct file *fd);
+
+#endif // __GSL_KMOD_CLEANUP_H
+
diff --git a/drivers/mxc/amd-gpu/platform/hal/linux/gsl_linux_map.c b/drivers/mxc/amd-gpu/platform/hal/linux/gsl_linux_map.c
new file mode 100644
index 000000000000..7fee7b814411
--- /dev/null
+++ b/drivers/mxc/amd-gpu/platform/hal/linux/gsl_linux_map.c
@@ -0,0 +1,221 @@
+/* Copyright (c) 2008-2010, Advanced Micro Devices. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+
+#include "gsl_linux_map.h"
+
+struct gsl_linux_map
+{
+ struct list_head list;
+ unsigned int gpu_addr;
+ void *kernel_virtual_addr;
+ unsigned int size;
+};
+
+static LIST_HEAD(gsl_linux_map_list);
+static DEFINE_MUTEX(gsl_linux_map_mutex);
+
+int gsl_linux_map_init()
+{
+ mutex_lock(&gsl_linux_map_mutex);
+ INIT_LIST_HEAD(&gsl_linux_map_list);
+ mutex_unlock(&gsl_linux_map_mutex);
+
+ return 0;
+}
+
+void *gsl_linux_map_alloc(unsigned int gpu_addr, unsigned int size)
+{
+ struct gsl_linux_map * map;
+ struct list_head *p;
+ void *va;
+
+ mutex_lock(&gsl_linux_map_mutex);
+
+ list_for_each(p, &gsl_linux_map_list){
+ map = list_entry(p, struct gsl_linux_map, list);
+ if(map->gpu_addr == gpu_addr){
+ mutex_unlock(&gsl_linux_map_mutex);
+ return map->kernel_virtual_addr;
+ }
+ }
+
+ va = __vmalloc(size, GFP_KERNEL, pgprot_noncached(pgprot_kernel));
+ if(va == NULL){
+ mutex_unlock(&gsl_linux_map_mutex);
+ return NULL;
+ }
+
+ map = (struct gsl_linux_map *)kmalloc(sizeof(*map), GFP_KERNEL);
+ map->gpu_addr = gpu_addr;
+ map->kernel_virtual_addr = va;
+ map->size = size;
+
+ INIT_LIST_HEAD(&map->list);
+ list_add_tail(&map->list, &gsl_linux_map_list);
+
+ mutex_unlock(&gsl_linux_map_mutex);
+ return va;
+}
+
+void gsl_linux_map_free(unsigned int gpu_addr)
+{
+ int found = 0;
+ struct gsl_linux_map * map;
+ struct list_head *p;
+
+ mutex_lock(&gsl_linux_map_mutex);
+
+ list_for_each(p, &gsl_linux_map_list){
+ map = list_entry(p, struct gsl_linux_map, list);
+ if(map->gpu_addr == gpu_addr){
+ found = 1;
+ break;
+ }
+ }
+
+ if(found){
+ vfree(map->kernel_virtual_addr);
+ list_del(&map->list);
+ kfree(map);
+ }
+
+ mutex_unlock(&gsl_linux_map_mutex);
+}
+
+void *gsl_linux_map_find(unsigned int gpu_addr)
+{
+ struct gsl_linux_map * map;
+ struct list_head *p;
+
+ mutex_lock(&gsl_linux_map_mutex);
+
+ list_for_each(p, &gsl_linux_map_list){
+ map = list_entry(p, struct gsl_linux_map, list);
+ if(map->gpu_addr == gpu_addr){
+ mutex_unlock(&gsl_linux_map_mutex);
+ return map->kernel_virtual_addr;
+ }
+ }
+
+ mutex_unlock(&gsl_linux_map_mutex);
+ return NULL;
+}
+
+void *gsl_linux_map_read(void *dst, unsigned int gpuoffset, unsigned int sizebytes, unsigned int touserspace)
+{
+ struct gsl_linux_map * map;
+ struct list_head *p;
+
+ mutex_lock(&gsl_linux_map_mutex);
+
+ list_for_each(p, &gsl_linux_map_list){
+ map = list_entry(p, struct gsl_linux_map, list);
+ if(map->gpu_addr <= gpuoffset &&
+ (map->gpu_addr + map->size) > gpuoffset){
+ void *src = map->kernel_virtual_addr + (gpuoffset - map->gpu_addr);
+ mutex_unlock(&gsl_linux_map_mutex);
+ if (touserspace)
+ {
+ return (void *)copy_to_user(dst, map->kernel_virtual_addr + gpuoffset - map->gpu_addr, sizebytes);
+ }
+ else
+ {
+ return memcpy(dst, src, sizebytes);
+ }
+ }
+ }
+
+ mutex_unlock(&gsl_linux_map_mutex);
+ return NULL;
+}
+
+void *gsl_linux_map_write(void *src, unsigned int gpuoffset, unsigned int sizebytes, unsigned int fromuserspace)
+{
+ struct gsl_linux_map * map;
+ struct list_head *p;
+
+ mutex_lock(&gsl_linux_map_mutex);
+
+ list_for_each(p, &gsl_linux_map_list){
+ map = list_entry(p, struct gsl_linux_map, list);
+ if(map->gpu_addr <= gpuoffset &&
+ (map->gpu_addr + map->size) > gpuoffset){
+ void *dst = map->kernel_virtual_addr + (gpuoffset - map->gpu_addr);
+ mutex_unlock(&gsl_linux_map_mutex);
+ if (fromuserspace)
+ {
+ return (void *)copy_from_user(map->kernel_virtual_addr + gpuoffset - map->gpu_addr, src, sizebytes);
+ }
+ else
+ {
+ return memcpy(dst, src, sizebytes);
+ }
+ }
+ }
+
+ mutex_unlock(&gsl_linux_map_mutex);
+ return NULL;
+}
+
+void *gsl_linux_map_set(unsigned int gpuoffset, unsigned int value, unsigned int sizebytes)
+{
+ struct gsl_linux_map * map;
+ struct list_head *p;
+
+ mutex_lock(&gsl_linux_map_mutex);
+
+ list_for_each(p, &gsl_linux_map_list){
+ map = list_entry(p, struct gsl_linux_map, list);
+ if(map->gpu_addr <= gpuoffset &&
+ (map->gpu_addr + map->size) > gpuoffset){
+ void *ptr = map->kernel_virtual_addr + (gpuoffset - map->gpu_addr);
+ mutex_unlock(&gsl_linux_map_mutex);
+ return memset(ptr, value, sizebytes);
+ }
+ }
+
+ mutex_unlock(&gsl_linux_map_mutex);
+ return NULL;
+}
+
+int gsl_linux_map_destroy()
+{
+ struct gsl_linux_map * map;
+ struct list_head *p, *tmp;
+
+ mutex_lock(&gsl_linux_map_mutex);
+
+ list_for_each_safe(p, tmp, &gsl_linux_map_list){
+ map = list_entry(p, struct gsl_linux_map, list);
+ vfree(map->kernel_virtual_addr);
+ list_del(&map->list);
+ kfree(map);
+ }
+
+ INIT_LIST_HEAD(&gsl_linux_map_list);
+
+ mutex_unlock(&gsl_linux_map_mutex);
+ return 0;
+}
diff --git a/drivers/mxc/amd-gpu/platform/hal/linux/gsl_linux_map.h b/drivers/mxc/amd-gpu/platform/hal/linux/gsl_linux_map.h
new file mode 100644
index 000000000000..ebbe94a75e10
--- /dev/null
+++ b/drivers/mxc/amd-gpu/platform/hal/linux/gsl_linux_map.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2008-2010, Advanced Micro Devices. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __GSL_LINUX_MAP_H__
+#define __GSL_LINUX_MAP_H__
+
+#include "gsl_halconfig.h"
+
+#define GSL_LINUX_MAP_RANGE_START (1024*1024)
+#define GSL_LINUX_MAP_RANGE_END (GSL_LINUX_MAP_RANGE_START+GSL_HAL_SHMEM_SIZE_EMEM1_MMU)
+
+int gsl_linux_map_init(void);
+void *gsl_linux_map_alloc(unsigned int gpu_addr, unsigned int size);
+void gsl_linux_map_free(unsigned int gpu_addr);
+void *gsl_linux_map_find(unsigned int gpu_addr);
+void *gsl_linux_map_read(void *dst, unsigned int gpuoffset, unsigned int sizebytes, unsigned int touserspace);
+void *gsl_linux_map_write(void *src, unsigned int gpuoffset, unsigned int sizebytes, unsigned int fromuserspace);
+void *gsl_linux_map_set(unsigned int gpuoffset, unsigned int value, unsigned int sizebytes);
+int gsl_linux_map_destroy(void);
+
+#endif
diff --git a/drivers/mxc/amd-gpu/platform/hal/linux/misc.c b/drivers/mxc/amd-gpu/platform/hal/linux/misc.c
new file mode 100644
index 000000000000..a356f334b187
--- /dev/null
+++ b/drivers/mxc/amd-gpu/platform/hal/linux/misc.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <gsl.h>
+
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+typedef struct _gsl_autogate_t {
+ struct timer_list timer;
+ spinlock_t lock;
+ int active;
+ int timeout;
+ gsl_device_t *dev;
+} gsl_autogate_t;
+
+
+#define KGSL_DEVICE_IDLE_TIMEOUT 5000 /* unit ms */
+
+int kgsl_device_active(gsl_device_t *dev)
+{
+ unsigned long flags;
+ gsl_autogate_t *autogate = dev->autogate;
+ if (!autogate) {
+ printk(KERN_ERR "%s: autogate has exited!\n", __func__);
+ return 0;
+ }
+// printk(KERN_ERR "%s:%d id %d active %d\n", __func__, __LINE__, dev->id, autogate->active);
+
+ spin_lock_irqsave(&autogate->lock, flags);
+ if (!autogate->active)
+ kgsl_clock(autogate->dev->id, 1);
+ autogate->active = 1;
+ mod_timer(&autogate->timer, jiffies + msecs_to_jiffies(autogate->timeout));
+ spin_unlock_irqrestore(&autogate->lock, flags);
+ return 0;
+}
+
+static void kgsl_device_inactive(unsigned long data)
+{
+ gsl_autogate_t *autogate = (gsl_autogate_t *)data;
+ unsigned long flags;
+
+// printk(KERN_ERR "%s:%d id %d active %d\n", __func__, __LINE__, autogate->dev->id, autogate->active);
+ del_timer(&autogate->timer);
+ spin_lock_irqsave(&autogate->lock, flags);
+ WARN(!autogate->active, "GPU Device %d is already inactive\n", autogate->dev->id);
+ autogate->active = 0;
+ /* idle check may sleep, so don't use it */
+// if (autogate->dev->ftbl.device_idle)
+// autogate->dev->ftbl.device_idle(autogate->dev, GSL_TIMEOUT_DEFAULT);
+ kgsl_clock(autogate->dev->id, 0);
+ spin_unlock_irqrestore(&autogate->lock, flags);
+}
+
+int kgsl_device_clock(gsl_deviceid_t id, int enable)
+{
+ int ret = GSL_SUCCESS;
+ gsl_device_t *device;
+
+ device = &gsl_driver.device[id-1]; // device_id is 1 based
+ if (device->flags & GSL_FLAGS_INITIALIZED) {
+ if (enable)
+ kgsl_device_active(device);
+ else
+ kgsl_device_inactive((unsigned long)device);
+ } else {
+ printk(KERN_ERR "%s: Dev %d clock is already off!\n", __func__, id);
+ ret = GSL_FAILURE;
+ }
+
+ return ret;
+}
+
+int kgsl_device_autogate_init(gsl_device_t *dev)
+{
+ gsl_autogate_t *autogate;
+
+// printk(KERN_ERR "%s:%d id %d\n", __func__, __LINE__, dev->id);
+ autogate = kmalloc(sizeof(gsl_autogate_t), GFP_KERNEL);
+ if (!autogate) {
+ printk(KERN_ERR "%s: out of memory!\n", __func__);
+ return -ENOMEM;
+ }
+ autogate->dev = dev;
+ autogate->active = 1;
+ spin_lock_init(&autogate->lock);
+ autogate->timeout = KGSL_DEVICE_IDLE_TIMEOUT;
+ init_timer(&autogate->timer);
+ autogate->timer.expires = jiffies + msecs_to_jiffies(autogate->timeout);
+ autogate->timer.function = kgsl_device_inactive;
+ autogate->timer.data = (unsigned long)autogate;
+ add_timer(&autogate->timer);
+ dev->autogate = autogate;
+ return 0;
+}
+
+void kgsl_device_autogate_exit(gsl_device_t *dev)
+{
+ gsl_autogate_t *autogate = dev->autogate;
+
+// printk(KERN_ERR "%s:%d id %d active %d\n", __func__, __LINE__, dev->id, autogate->active);
+ if (autogate->active)
+ del_timer(&autogate->timer);
+ else
+ kgsl_clock(autogate->dev->id, 1);
+
+ kfree(autogate);
+ dev->autogate = NULL;
+
+}