summaryrefslogtreecommitdiff
path: root/drivers/mxc/amd-gpu/common
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mxc/amd-gpu/common')
-rw-r--r--drivers/mxc/amd-gpu/common/gsl_cmdstream.c239
-rw-r--r--drivers/mxc/amd-gpu/common/gsl_cmdwindow.c136
-rw-r--r--drivers/mxc/amd-gpu/common/gsl_context.c74
-rw-r--r--drivers/mxc/amd-gpu/common/gsl_debug_pm4.c1015
-rw-r--r--drivers/mxc/amd-gpu/common/gsl_device.c663
-rw-r--r--drivers/mxc/amd-gpu/common/gsl_drawctxt.c1796
-rw-r--r--drivers/mxc/amd-gpu/common/gsl_driver.c330
-rw-r--r--drivers/mxc/amd-gpu/common/gsl_g12.c987
-rw-r--r--drivers/mxc/amd-gpu/common/gsl_intrmgr.c305
-rw-r--r--drivers/mxc/amd-gpu/common/gsl_log.c591
-rw-r--r--drivers/mxc/amd-gpu/common/gsl_memmgr.c949
-rw-r--r--drivers/mxc/amd-gpu/common/gsl_mmu.c1036
-rw-r--r--drivers/mxc/amd-gpu/common/gsl_ringbuffer.c1154
-rw-r--r--drivers/mxc/amd-gpu/common/gsl_sharedmem.c937
-rw-r--r--drivers/mxc/amd-gpu/common/gsl_tbdump.c228
-rw-r--r--drivers/mxc/amd-gpu/common/gsl_yamato.c886
-rw-r--r--drivers/mxc/amd-gpu/common/pfp_microcode_nrt.inl327
-rw-r--r--drivers/mxc/amd-gpu/common/pm4_microcode.inl815
18 files changed, 12468 insertions, 0 deletions
diff --git a/drivers/mxc/amd-gpu/common/gsl_cmdstream.c b/drivers/mxc/amd-gpu/common/gsl_cmdstream.c
new file mode 100644
index 000000000000..338192c7ba32
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/gsl_cmdstream.c
@@ -0,0 +1,239 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "gsl.h"
+#include "gsl_hal.h"
+#include "gsl_cmdstream.h"
+
+//////////////////////////////////////////////////////////////////////////////
+// functions
+//////////////////////////////////////////////////////////////////////////////
+
+int
+kgsl_cmdstream_init(gsl_device_t *device)
+{
+ return GSL_SUCCESS;
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_cmdstream_close(gsl_device_t *device)
+{
+ return GSL_SUCCESS;
+}
+
+//----------------------------------------------------------------------------
+
+gsl_timestamp_t
+kgsl_cmdstream_readtimestamp0(gsl_deviceid_t device_id, gsl_timestamp_type_t type)
+{
+ gsl_timestamp_t timestamp = -1;
+ gsl_device_t* device = &gsl_driver.device[device_id-1];
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE,
+ "--> gsl_timestamp_t kgsl_cmdstream_readtimestamp(gsl_deviceid_t device_id=%d gsl_timestamp_type_t type=%d)\n", device_id, type );
+#if (defined(GSL_BLD_G12) && defined(IRQTHREAD_POLL))
+ kos_event_signal(device->irqthread_event);
+#endif
+ if (type == GSL_TIMESTAMP_CONSUMED)
+ {
+ // start-of-pipeline timestamp
+ GSL_CMDSTREAM_GET_SOP_TIMESTAMP(device, (unsigned int*)&timestamp);
+ }
+ else if (type == GSL_TIMESTAMP_RETIRED)
+ {
+ // end-of-pipeline timestamp
+ GSL_CMDSTREAM_GET_EOP_TIMESTAMP(device, (unsigned int*)&timestamp);
+ }
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_readtimestamp. Return value %d\n", timestamp );
+ return (timestamp);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API gsl_timestamp_t
+kgsl_cmdstream_readtimestamp(gsl_deviceid_t device_id, gsl_timestamp_type_t type)
+{
+ gsl_timestamp_t timestamp = -1;
+ GSL_API_MUTEX_LOCK();
+ timestamp = kgsl_cmdstream_readtimestamp0(device_id, type);
+ GSL_API_MUTEX_UNLOCK();
+ return timestamp;
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_cmdstream_issueibcmds(gsl_deviceid_t device_id, int drawctxt_index, gpuaddr_t ibaddr, int sizedwords, gsl_timestamp_t *timestamp, unsigned int flags)
+{
+ gsl_device_t* device = &gsl_driver.device[device_id-1];
+ int status = GSL_FAILURE;
+ GSL_API_MUTEX_LOCK();
+
+ kgsl_device_active(device);
+
+ if (device->ftbl.cmdstream_issueibcmds)
+ {
+ status = device->ftbl.cmdstream_issueibcmds(device, drawctxt_index, ibaddr, sizedwords, timestamp, flags);
+ }
+ GSL_API_MUTEX_UNLOCK();
+ return status;
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_add_timestamp(gsl_deviceid_t device_id, gsl_timestamp_t *timestamp)
+{
+ gsl_device_t* device = &gsl_driver.device[device_id-1];
+ int status = GSL_FAILURE;
+ GSL_API_MUTEX_LOCK();
+ if (device->ftbl.device_addtimestamp)
+ {
+ status = device->ftbl.device_addtimestamp(device, timestamp);
+ }
+ GSL_API_MUTEX_UNLOCK();
+ return status;
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API
+int kgsl_cmdstream_waittimestamp(gsl_deviceid_t device_id, gsl_timestamp_t timestamp, unsigned int timeout)
+{
+ gsl_device_t* device = &gsl_driver.device[device_id-1];
+ int status = GSL_FAILURE;
+ if (device->ftbl.device_waittimestamp)
+ {
+ status = device->ftbl.device_waittimestamp(device, timestamp, timeout);
+ }
+ return status;
+}
+
+//----------------------------------------------------------------------------
+
+void
+kgsl_cmdstream_memqueue_drain(gsl_device_t *device)
+{
+ gsl_memnode_t *memnode, *nextnode, *freehead;
+ gsl_timestamp_t timestamp, ts_processed;
+ gsl_memqueue_t *memqueue = &device->memqueue;
+ // check head
+ if (memqueue->head == NULL)
+ {
+ return;
+ }
+ // get current EOP timestamp
+ ts_processed = kgsl_cmdstream_readtimestamp0(device->id, GSL_TIMESTAMP_RETIRED);
+ timestamp = memqueue->head->timestamp;
+ // check head timestamp
+ if (!(((ts_processed - timestamp) >= 0) || ((ts_processed - timestamp) < -20000)))
+ {
+ return;
+ }
+ memnode = memqueue->head;
+ freehead = memqueue->head;
+ // get node list to free
+ for(;;)
+ {
+ nextnode = memnode->next;
+ if (nextnode == NULL)
+ {
+ // entire queue drained
+ memqueue->head = NULL;
+ memqueue->tail = NULL;
+ break;
+ }
+ timestamp = nextnode->timestamp;
+ if (!(((ts_processed - timestamp) >= 0) || ((ts_processed - timestamp) < -20000)))
+ {
+ // drained up to a point
+ memqueue->head = nextnode;
+ memnode->next = NULL;
+ break;
+ }
+ memnode = nextnode;
+ }
+ // free nodes
+ while (freehead)
+ {
+ memnode = freehead;
+ freehead = memnode->next;
+ kgsl_sharedmem_free0(&memnode->memdesc, memnode->pid);
+ kos_free(memnode);
+ }
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_cmdstream_freememontimestamp(gsl_deviceid_t device_id, gsl_memdesc_t *memdesc, gsl_timestamp_t timestamp, gsl_timestamp_type_t type)
+{
+ gsl_memnode_t *memnode;
+ gsl_device_t *device = &gsl_driver.device[device_id-1];
+ gsl_memqueue_t *memqueue;
+ (void)type; // unref. For now just use EOP timestamp
+
+ GSL_API_MUTEX_LOCK();
+
+ memqueue = &device->memqueue;
+
+ memnode = kos_malloc(sizeof(gsl_memnode_t));
+
+ if (!memnode)
+ {
+ // other solution is to idle and free which given that the upper level driver probably wont check, probably a better idea
+ GSL_API_MUTEX_UNLOCK();
+ return (GSL_FAILURE);
+ }
+
+ memnode->timestamp = timestamp;
+ memnode->pid = GSL_CALLER_PROCESSID_GET();
+ memnode->next = NULL;
+ kos_memcpy(&memnode->memdesc, memdesc, sizeof(gsl_memdesc_t));
+
+ // add to end of queue
+ if (memqueue->tail != NULL)
+ {
+ memqueue->tail->next = memnode;
+ memqueue->tail = memnode;
+ }
+ else
+ {
+ KOS_ASSERT(memqueue->head == NULL);
+ memqueue->head = memnode;
+ memqueue->tail = memnode;
+ }
+
+ GSL_API_MUTEX_UNLOCK();
+
+ return (GSL_SUCCESS);
+}
+
+static int kgsl_cmdstream_timestamp_cmp(gsl_timestamp_t ts_new, gsl_timestamp_t ts_old)
+{
+ gsl_timestamp_t ts_diff = ts_new - ts_old;
+ return (ts_diff >= 0) || (ts_diff < -20000);
+}
+
+int kgsl_cmdstream_check_timestamp(gsl_deviceid_t device_id, gsl_timestamp_t timestamp)
+{
+ gsl_timestamp_t ts_processed;
+ ts_processed = kgsl_cmdstream_readtimestamp0(device_id, GSL_TIMESTAMP_RETIRED);
+ return kgsl_cmdstream_timestamp_cmp(ts_processed, timestamp);
+}
diff --git a/drivers/mxc/amd-gpu/common/gsl_cmdwindow.c b/drivers/mxc/amd-gpu/common/gsl_cmdwindow.c
new file mode 100644
index 000000000000..4d70da5b25c8
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/gsl_cmdwindow.c
@@ -0,0 +1,136 @@
+/* Copyright (c) 2002,2007-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "gsl.h"
+#include "gsl_hal.h"
+
+#ifdef GSL_BLD_G12
+
+//////////////////////////////////////////////////////////////////////////////
+// defines
+//////////////////////////////////////////////////////////////////////////////
+#define GSL_CMDWINDOW_TARGET_MASK 0x000000FF
+#define GSL_CMDWINDOW_ADDR_MASK 0x00FFFF00
+#define GSL_CMDWINDOW_TARGET_SHIFT 0
+#define GSL_CMDWINDOW_ADDR_SHIFT 8
+
+
+//////////////////////////////////////////////////////////////////////////////
+// functions
+//////////////////////////////////////////////////////////////////////////////
+
+int
+kgsl_cmdwindow_init(gsl_device_t *device)
+{
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_cmdwindow_close(gsl_device_t *device)
+{
+ return (GSL_SUCCESS);
+}
+
+#endif // GSL_BLD_G12
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_cmdwindow_write0(gsl_deviceid_t device_id, gsl_cmdwindow_t target, unsigned int addr, unsigned int data)
+{
+#ifdef GSL_BLD_G12
+ gsl_device_t *device;
+ unsigned int cmdwinaddr;
+ unsigned int cmdstream;
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_cmdwindow_write( gsl_device_id_t device_id=%d, gsl_cmdwindow_t target=%d, unsigned int addr=0x%08x, unsigned int data=0x%08x)\n", device_id, target, addr, data );
+
+ device = &gsl_driver.device[device_id-1]; // device_id is 1 based
+
+ if (target < GSL_CMDWINDOW_MIN || target > GSL_CMDWINDOW_MAX)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_ERROR, "ERROR: Invalid target.\n" );
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_cmdwindow_write. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ if ((!(device->flags & GSL_FLAGS_INITIALIZED) && target == GSL_CMDWINDOW_MMU) ||
+ (!(device->flags & GSL_FLAGS_STARTED) && target != GSL_CMDWINDOW_MMU))
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_ERROR, "ERROR: Invalid device state to write to selected targer.\n" );
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_cmdwindow_write. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ // set command stream
+ if (target == GSL_CMDWINDOW_MMU)
+ {
+#ifdef GSL_NO_MMU
+ return (GSL_SUCCESS);
+#endif
+ cmdstream = ADDR_VGC_MMUCOMMANDSTREAM;
+ }
+ else
+ {
+ cmdstream = ADDR_VGC_COMMANDSTREAM;
+ }
+
+
+ // set command window address
+ cmdwinaddr = ((target << GSL_CMDWINDOW_TARGET_SHIFT) & GSL_CMDWINDOW_TARGET_MASK);
+ cmdwinaddr |= ((addr << GSL_CMDWINDOW_ADDR_SHIFT) & GSL_CMDWINDOW_ADDR_MASK);
+
+#ifndef GSL_NO_MMU
+ // set mmu pagetable
+ kgsl_mmu_setpagetable(device, GSL_CALLER_PROCESSID_GET());
+#endif
+
+ // write command window address
+ device->ftbl.device_regwrite(device, (cmdstream)>>2, cmdwinaddr);
+
+ // write data
+ device->ftbl.device_regwrite(device, (cmdstream)>>2, data);
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_cmdwindow_write. Return value %B\n", GSL_SUCCESS );
+
+ return (GSL_SUCCESS);
+#else
+ // unreferenced formal parameter
+ (void) device_id;
+ (void) target;
+ (void) addr;
+ (void) data;
+
+ return (GSL_FAILURE);
+#endif // GSL_BLD_G12
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_cmdwindow_write(gsl_deviceid_t device_id, gsl_cmdwindow_t target, unsigned int addr, unsigned int data)
+{
+ int status = GSL_SUCCESS;
+ GSL_API_MUTEX_LOCK();
+ status = kgsl_cmdwindow_write0(device_id, target, addr, data);
+ GSL_API_MUTEX_UNLOCK();
+ return status;
+}
diff --git a/drivers/mxc/amd-gpu/common/gsl_context.c b/drivers/mxc/amd-gpu/common/gsl_context.c
new file mode 100644
index 000000000000..c999247b3afd
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/gsl_context.c
@@ -0,0 +1,74 @@
+/* Copyright (c) 2008-2010, Advanced Micro Devices. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "gsl.h"
+#include "gsl_hal.h"
+#include "gsl_context.h"
+
+//////////////////////////////////////////////////////////////////////////////
+// functions
+//////////////////////////////////////////////////////////////////////////////
+
+KGSL_API int
+kgsl_context_create(gsl_deviceid_t device_id, gsl_context_type_t type, unsigned int *drawctxt_id, gsl_flags_t flags)
+{
+ gsl_device_t* device = &gsl_driver.device[device_id-1];
+ int status;
+
+ GSL_API_MUTEX_LOCK();
+
+ if (device->ftbl.context_create)
+ {
+ status = device->ftbl.context_create(device, type, drawctxt_id, flags);
+ }
+ else
+ {
+ status = GSL_FAILURE;
+ }
+
+ GSL_API_MUTEX_UNLOCK();
+
+ return status;
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_context_destroy(gsl_deviceid_t device_id, unsigned int drawctxt_id)
+{
+ gsl_device_t* device = &gsl_driver.device[device_id-1];
+ int status;
+
+ GSL_API_MUTEX_LOCK();
+
+ if (device->ftbl.context_destroy)
+ {
+ status = device->ftbl.context_destroy(device, drawctxt_id);
+ }
+ else
+ {
+ status = GSL_FAILURE;
+ }
+
+ GSL_API_MUTEX_UNLOCK();
+
+ return status;
+}
+
+//----------------------------------------------------------------------------
+
diff --git a/drivers/mxc/amd-gpu/common/gsl_debug_pm4.c b/drivers/mxc/amd-gpu/common/gsl_debug_pm4.c
new file mode 100644
index 000000000000..847df8dbe386
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/gsl_debug_pm4.c
@@ -0,0 +1,1015 @@
+/* Copyright (c) 2008-2010, Advanced Micro Devices. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "gsl.h"
+#include "gsl_hal.h"
+
+#if defined(_WIN32) && defined (GSL_BLD_YAMATO)
+
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+
+//#define PM4_DEBUG_USE_MEMBUF
+
+#ifdef PM4_DEBUG_USE_MEMBUF
+
+#define MEMBUF_SIZE 100000
+#define BUFFER_END_MARGIN 1000
+char memBuf[MEMBUF_SIZE];
+static int writePtr = 0;
+static unsigned int lineNumber = 0;
+//#define fprintf(A,...); writePtr += sprintf( memBuf+writePtr, __VA_ARGS__ ); sprintf( memBuf+writePtr, "###" ); if( writePtr > MEMBUF_SIZE-BUFFER_END_MARGIN ) { memset(memBuf+writePtr, '#', MEMBUF_SIZE-writePtr); writePtr = 0; }
+#define FILE char
+#define fopen(X,Y) 0
+#define fclose(X)
+
+int printString( FILE *_File, const char * _Format, ...)
+{
+ int ret;
+ va_list ap;
+ (void)_File;
+
+ va_start(ap, _Format);
+ if( writePtr > 0 && memBuf[writePtr-1] == '\n' )
+ {
+ // Add line number if last written character was newline
+ writePtr += sprintf( memBuf+writePtr, "%d: ", lineNumber++ );
+ }
+ ret = vsprintf(memBuf+writePtr, _Format, ap);
+ writePtr += ret;
+ sprintf( memBuf+writePtr, "###" );
+ if( writePtr > MEMBUF_SIZE-BUFFER_END_MARGIN )
+ {
+ memset(memBuf+writePtr, '#', MEMBUF_SIZE-writePtr);
+ writePtr = 0;
+ }
+
+ va_end(ap);
+
+ return ret;
+}
+
+#else
+
+int printString( FILE *_File, const char * _Format, ...)
+{
+ int ret;
+ va_list ap;
+ va_start(ap, _Format);
+ ret = vfprintf(_File, _Format, ap);
+ va_end(ap);
+ fflush(_File);
+ return ret;
+}
+
+#endif
+
+#ifndef _WIN32_WCE
+#define PM4_DUMPFILE "pm4dump.txt"
+#else
+#define PM4_DUMPFILE "\\Release\\pm4dump.txt"
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+// defines
+//////////////////////////////////////////////////////////////////////////////
+#define EXPAND_OPCODE(opcode) ((opcode << 8) | PM4_PKT_MASK)
+
+#define GetString_uint GetString_int
+#define GetString_fixed12_4(val, szValue) GetString_fixed(val, 12, 4, szValue)
+#define GetString_signedint15(val, szValue) GetString_signedint(val, 15, szValue)
+
+// Need a prototype for this function
+void WritePM4Packet_Type3(FILE* pFile, unsigned int dwHeader, unsigned int** ppBuffer);
+
+static int indirectionLevel = 0;
+
+
+//////////////////////////////////////////////////////////////////////////////
+// functions
+//////////////////////////////////////////////////////////////////////////////
+
+void WriteDWORD(FILE* pFile, unsigned int dwValue)
+{
+ printString(pFile, " 0x%08x", dwValue);
+}
+
+void WriteDWORD2(FILE* pFile, unsigned int dwValue)
+{
+ printString(pFile, " 0x%08x\n", dwValue);
+}
+
+//----------------------------------------------------------------------------
+
+// Generate the GetString_## functions for enumerated types
+#define START_ENUMTYPE(__type) \
+void GetString_##__type(unsigned int val, char* szValue) \
+{ \
+ switch(val) \
+ {
+
+#define GENERATE_ENUM(__enumname, __val) \
+ case __val: \
+ kos_strcpy(szValue, #__enumname); \
+ break;
+
+#define END_ENUMTYPE(__type) \
+ default: \
+ sprintf(szValue, "Unknown: %d", val); \
+ break; \
+ } \
+}
+
+#include _YAMATO_GENENUM_H
+
+//----------------------------------------------------------------------------
+
+void
+GetString_hex(unsigned int val, char* szValue)
+{
+ sprintf(szValue, "0x%x", val);
+}
+
+//----------------------------------------------------------------------------
+
+void
+GetString_float(unsigned int val, char* szValue)
+{
+ float fval = *((float*) &val);
+ sprintf(szValue, "%.4f", fval);
+}
+
+//----------------------------------------------------------------------------
+
+void
+GetString_bool(unsigned int val, char* szValue)
+{
+ if (val)
+ {
+ kos_strcpy(szValue, "TRUE");
+ }
+ else
+ {
+ kos_strcpy(szValue, "FALSE");
+ }
+}
+
+//----------------------------------------------------------------------------
+
+void GetString_int(unsigned int val, char* szValue)
+{
+ sprintf(szValue, "%d", val);
+}
+
+//----------------------------------------------------------------------------
+
+void
+GetString_intMinusOne(unsigned int val, char* szValue)
+{
+ sprintf(szValue, "%d+1", val);
+}
+
+//----------------------------------------------------------------------------
+
+void
+GetString_signedint(unsigned int val, unsigned int dwNumBits, char* szValue)
+{
+ int nValue = val;
+
+ if (val & (1<<(dwNumBits-1)))
+ {
+ nValue |= 0xffffffff << dwNumBits;
+ }
+
+ sprintf(szValue, "%d", nValue);
+}
+
+//----------------------------------------------------------------------------
+
+void
+GetString_fixed(unsigned int val, unsigned int dwNumInt, unsigned int dwNumFrac, char* szValue)
+{
+
+ (void) dwNumInt; // unreferenced formal parameter
+
+ if (val>>dwNumFrac == 0)
+ {
+ // Integer part is 0 - just print out the fractional part
+ sprintf(szValue, "%d/%d",
+ val&((1<<dwNumFrac)-1),
+ 1<<dwNumFrac);
+ }
+ else
+ {
+ // Print out as a mixed fraction
+ sprintf(szValue, "%d %d/%d",
+ val>>dwNumFrac,
+ val&((1<<dwNumFrac)-1),
+ 1<<dwNumFrac);
+ }
+}
+
+//----------------------------------------------------------------------------
+
+void
+GetString_Register(unsigned int dwBaseIndex, unsigned int dwValue, char* pszString)
+{
+ char szValue[64];
+ char szField[128];
+
+ // Empty the string
+ pszString[0] = '\0';
+
+ switch(dwBaseIndex)
+ {
+#define START_REGISTER(__reg) \
+ case mm##__reg: \
+ { \
+ reg##__reg reg; \
+ reg.u32All = dwValue; \
+ strcat(pszString, #__reg ", (");
+
+#define GENERATE_FIELD(__name, __type) \
+ GetString_##__type(reg.bitfields.__name, szValue); \
+ sprintf(szField, #__name " = %s, ", szValue); \
+ strcat(pszString, szField);
+
+#define END_REGISTER(__reg) \
+ pszString[strlen(pszString)-2]='\0'; \
+ strcat(pszString, ")"); \
+ } \
+ break;
+
+#include _YAMATO_GENREG_H
+
+ default:
+ break;
+ }
+}
+
+//----------------------------------------------------------------------------
+
+void
+GetString_Type3Opcode(unsigned int opcode, char* pszValue)
+{
+switch(EXPAND_OPCODE(opcode))
+ {
+#define TYPE3SWITCH(__opcode) \
+ case PM4_PACKET3_##__opcode: \
+ kos_strcpy(pszValue, #__opcode); \
+ break;
+
+ TYPE3SWITCH(NOP)
+ TYPE3SWITCH(IB_PREFETCH_END)
+ TYPE3SWITCH(SUBBLK_PREFETCH)
+
+ TYPE3SWITCH(INSTR_PREFETCH)
+ TYPE3SWITCH(REG_RMW)
+ TYPE3SWITCH(DRAW_INDX)
+ TYPE3SWITCH(VIZ_QUERY)
+ TYPE3SWITCH(SET_STATE)
+ TYPE3SWITCH(WAIT_FOR_IDLE)
+ TYPE3SWITCH(IM_LOAD)
+ TYPE3SWITCH(IM_LOAD_IMMEDIATE)
+ TYPE3SWITCH(SET_CONSTANT)
+ TYPE3SWITCH(LOAD_CONSTANT_CONTEXT)
+ TYPE3SWITCH(LOAD_ALU_CONSTANT)
+
+ TYPE3SWITCH(DRAW_INDX_BIN)
+ TYPE3SWITCH(3D_DRAW_INDX_2_BIN)
+ TYPE3SWITCH(3D_DRAW_INDX_2)
+ TYPE3SWITCH(INDIRECT_BUFFER_PFD)
+ TYPE3SWITCH(INVALIDATE_STATE)
+ TYPE3SWITCH(WAIT_REG_MEM)
+ TYPE3SWITCH(MEM_WRITE)
+ TYPE3SWITCH(REG_TO_MEM)
+ TYPE3SWITCH(INDIRECT_BUFFER)
+
+ TYPE3SWITCH(CP_INTERRUPT)
+ TYPE3SWITCH(COND_EXEC)
+ TYPE3SWITCH(COND_WRITE)
+ TYPE3SWITCH(EVENT_WRITE)
+ TYPE3SWITCH(INSTR_MATCH)
+ TYPE3SWITCH(ME_INIT)
+ TYPE3SWITCH(CONST_PREFETCH)
+ TYPE3SWITCH(MEM_WRITE_CNTR)
+
+ TYPE3SWITCH(SET_BIN_MASK)
+ TYPE3SWITCH(SET_BIN_SELECT)
+ TYPE3SWITCH(WAIT_REG_EQ)
+ TYPE3SWITCH(WAIT_REG_GTE)
+ TYPE3SWITCH(INCR_UPDT_STATE)
+ TYPE3SWITCH(INCR_UPDT_CONST)
+ TYPE3SWITCH(INCR_UPDT_INSTR)
+ TYPE3SWITCH(EVENT_WRITE_SHD)
+ TYPE3SWITCH(EVENT_WRITE_CFL)
+ TYPE3SWITCH(EVENT_WRITE_ZPD)
+ TYPE3SWITCH(WAIT_UNTIL_READ)
+ TYPE3SWITCH(WAIT_IB_PFD_COMPLETE)
+ TYPE3SWITCH(CONTEXT_UPDATE)
+
+ default:
+ sprintf(pszValue, "Unknown: %d", opcode);
+ break;
+ }
+}
+
+//----------------------------------------------------------------------------
+
+void
+WritePM4Packet_Type0(FILE* pFile, unsigned int dwHeader, unsigned int** ppBuffer)
+{
+ pm4_type0 header = *((pm4_type0*) &dwHeader);
+ unsigned int* pBuffer = *ppBuffer;
+ unsigned int dwIndex;
+
+ WriteDWORD(pFile, dwHeader);
+ printString(pFile, " // Type-0 packet (BASE_INDEX = 0x%x, ONE_REG_WR = %d, COUNT = %d+1)\n",
+ header.base_index, header.one_reg_wr, header.count);
+
+ // Now go through and write the dwNumDWORDs
+ for (dwIndex = 0; dwIndex < header.count+1; dwIndex++)
+ {
+ char szRegister[1024];
+ unsigned int dwRegIndex;
+ unsigned int dwRegValue = *(pBuffer++);
+
+ if (header.one_reg_wr)
+ {
+ dwRegIndex = header.base_index;
+ }
+ else
+ {
+ dwRegIndex = header.base_index + dwIndex;
+ }
+
+ WriteDWORD(pFile, dwRegValue);
+ // Write register string based on fields
+ GetString_Register(dwRegIndex, dwRegValue, szRegister);
+ printString(pFile, " // %s\n", szRegister);
+
+ // Write actual unsigned int
+
+ }
+
+ *ppBuffer = pBuffer;
+}
+
+//----------------------------------------------------------------------------
+
+void
+WritePM4Packet_Type2(FILE* pFile, unsigned int dwHeader, unsigned int** ppBuffer)
+{
+ unsigned int* pBuffer = *ppBuffer;
+
+ WriteDWORD(pFile, dwHeader);
+ printString(pFile, " // Type-2 packet\n");
+
+ *ppBuffer = pBuffer;
+}
+
+//----------------------------------------------------------------------------
+
+void
+AnalyzePacketType(FILE *pFile, unsigned int dwHeader, unsigned int**ppBuffer)
+{
+ switch(dwHeader & PM4_PKT_MASK)
+ {
+ case PM4_TYPE0_PKT:
+ WritePM4Packet_Type0(pFile, dwHeader, ppBuffer);
+ break;
+
+ case PM4_TYPE1_PKT:
+ break;
+
+ case PM4_TYPE2_PKT:
+ WritePM4Packet_Type2(pFile, dwHeader, ppBuffer);
+ break;
+
+ case PM4_TYPE3_PKT:
+ WritePM4Packet_Type3(pFile, dwHeader, ppBuffer);
+ break;
+ }
+}
+
+void
+WritePM4Packet_Type3(FILE* pFile, unsigned int dwHeader, unsigned int** ppBuffer)
+{
+ pm4_type3 header = *((pm4_type3*) &dwHeader);
+ unsigned int* pBuffer = *ppBuffer;
+ unsigned int dwIndex;
+ char szOpcode[64];
+
+ if((EXPAND_OPCODE(header.it_opcode) == PM4_PACKET3_INDIRECT_BUFFER) ||
+ (EXPAND_OPCODE(header.it_opcode) == PM4_PACKET3_INDIRECT_BUFFER_PFD))
+ {
+ unsigned int *pIndirectBuffer = (unsigned int *) *(pBuffer++); // ordinal 2 of IB packet is an address
+ unsigned int *pIndirectBufferEnd = pIndirectBuffer + *(pBuffer++); // ordinal 3 of IB packet is size
+ unsigned int gpuaddr = kgsl_sharedmem_convertaddr((unsigned int) pIndirectBuffer, 1);
+
+ indirectionLevel++;
+
+ WriteDWORD2(pFile, dwHeader);
+ WriteDWORD2(pFile, gpuaddr);
+ WriteDWORD2(pFile, (unsigned int) (pIndirectBufferEnd-pIndirectBuffer));
+
+ if (indirectionLevel == 1)
+ {
+ printString(pFile, "Start_IB1, base=0x%x, size=%d\n", gpuaddr, (unsigned int)(pIndirectBufferEnd - pIndirectBuffer));
+ }
+ else
+ {
+ printString(pFile, "Start_IB2, base=0x%x, size=%d\n", gpuaddr, (unsigned int)(pIndirectBufferEnd - pIndirectBuffer));
+ }
+
+ while(pIndirectBuffer < pIndirectBufferEnd)
+ {
+ unsigned int _dwHeader = *(pIndirectBuffer++);
+
+ AnalyzePacketType(pFile, _dwHeader, &pIndirectBuffer);
+ }
+
+ if (indirectionLevel == 1)
+ {
+ printString(pFile, "End_IB1\n");
+ }
+ else
+ {
+ printString(pFile, "End_IB2\n");
+ }
+
+ indirectionLevel--;
+ }
+ else
+ {
+ unsigned int registerAddr = 0xffffffff;
+ char szRegister[1024];
+
+ GetString_Type3Opcode(header.it_opcode, szOpcode);
+
+ WriteDWORD(pFile, dwHeader);
+ printString(pFile, " // Type-3 packet (PREDICATE = %d, IT_OPCODE = %s, COUNT = %d+1)\n",
+ header.predicate, szOpcode, header.count);
+
+ // Go through each command
+ for (dwIndex = 0; dwIndex < header.count+1; dwIndex++)
+ {
+ // Check for a register write
+ if((EXPAND_OPCODE(header.it_opcode) == PM4_PACKET3_SET_CONSTANT) && (((*pBuffer) >> 16) == 0x4))
+ registerAddr = (*pBuffer) & 0xffff;
+
+ // Write unsigned int
+ WriteDWORD(pFile, *pBuffer);
+
+ // Starting at Ordinal 2 is actual register values
+ if((dwIndex > 0) && (registerAddr != 0xffffffff))
+ {
+ // Write register string based on address
+ GetString_Register(registerAddr + 0x2000, *pBuffer, szRegister);
+ printString(pFile, " // %s\n", szRegister);
+ registerAddr++;
+ }
+ else
+ {
+ // Write out newline if we aren't augmenting with register fields
+ printString(pFile, "\n");
+ }
+
+ pBuffer++;
+ }
+ }
+ *ppBuffer = pBuffer;
+}
+
+//----------------------------------------------------------------------------
+
+void
+Yamato_DumpInitParams(unsigned int dwEDRAMBase, unsigned int dwEDRAMSize)
+{
+ FILE* pFile = fopen(PM4_DUMPFILE, "a");
+
+ printString(pFile, "InitParams, edrambase=0x%x, edramsize=%d\n",
+ dwEDRAMBase, dwEDRAMSize);
+
+ fclose(pFile);
+}
+
+//----------------------------------------------------------------------------
+
+void
+Yamato_DumpSwapBuffers(unsigned int dwAddress, unsigned int dwWidth,
+ unsigned int dwHeight, unsigned int dwPitch, unsigned int dwAlignedHeight, unsigned int dwBitsPerPixel)
+{
+ // Open file
+ FILE* pFile = fopen(PM4_DUMPFILE, "a");
+
+ printString(pFile, "SwapBuffers, address=0x%08x, width=%d, height=%d, pitch=%d, alignedheight=%d, bpp=%d\n",
+ dwAddress, dwWidth, dwHeight, dwPitch, dwAlignedHeight, dwBitsPerPixel);
+
+ fclose(pFile);
+}
+
+//----------------------------------------------------------------------------
+
+void
+Yamato_DumpRegSpace(gsl_device_t *device)
+{
+ int regsPerLine = 0x20;
+ unsigned int dwOffset;
+ unsigned int value;
+
+ FILE* pFile = fopen(PM4_DUMPFILE, "a");
+
+ printString(pFile, "Start_RegisterSpace\n");
+
+ for (dwOffset = 0; dwOffset < device->regspace.sizebytes; dwOffset += 4)
+ {
+ if (dwOffset % regsPerLine == 0)
+ {
+ printString(pFile, " 0x%08x ", dwOffset);
+ }
+
+ GSL_HAL_REG_READ(device->id, (unsigned int) device->regspace.mmio_virt_base, (dwOffset >> 2), &value);
+
+ printString(pFile, " 0x%08x", value);
+
+ if (((dwOffset + 4) % regsPerLine == 0) && ((dwOffset + 4) < device->regspace.sizebytes))
+ {
+ printString(pFile, "\n");
+ }
+ }
+
+ printString(pFile, "\nEnd_RegisterSpace\n");
+
+ fclose(pFile);
+}
+
+//----------------------------------------------------------------------------
+
+void
+Yamato_DumpAllocateMemory(unsigned int dwSize, unsigned int dwFlags, unsigned int dwAddress,
+ unsigned int dwActualSize)
+{
+ // Open file
+ FILE* pFile = fopen(PM4_DUMPFILE, "a");
+
+ printString(pFile, "AllocateMemory, size=%d, flags=0x%x, address=0x%x, actualSize=%d\n",
+ dwSize, dwFlags, dwAddress, dwActualSize);
+
+ fclose(pFile);
+}
+
+//----------------------------------------------------------------------------
+
+void
+Yamato_DumpFreeMemory(unsigned int dwAddress)
+{
+ // Open file
+ FILE* pFile = fopen(PM4_DUMPFILE, "a");
+
+ printString(pFile, "FreeMemory, address=0x%x\n", dwAddress);
+
+ fclose(pFile);
+}
+
+//----------------------------------------------------------------------------
+
+void
+Yamato_DumpWriteMemory(unsigned int dwAddress, unsigned int dwSize, void* pData)
+{
+ // Open file
+ FILE* pFile = fopen(PM4_DUMPFILE, "a");
+ unsigned int dwNumDWORDs;
+ unsigned int dwIndex;
+ unsigned int *pDataPtr;
+
+ printString(pFile, "StartWriteMemory, address=0x%x, size=%d\n", dwAddress, dwSize);
+
+ // Now write the data, in dwNumDWORDs
+ dwNumDWORDs = dwSize >> 2;
+
+ // If there are spillover bytes into the next dword, increment the amount dumped out here.
+ // The reader needs to take care of not overwriting the nonvalid bytes
+ if((dwSize % 4) != 0)
+ dwNumDWORDs++;
+
+ for (dwIndex = 0, pDataPtr = (unsigned int *)pData; dwIndex < dwNumDWORDs; dwIndex++, pDataPtr++)
+ {
+ WriteDWORD2(pFile, *pDataPtr);
+ }
+
+ printString(pFile, "EndWriteMemory\n");
+
+ fclose(pFile);
+}
+
+void
+Yamato_DumpSetMemory(unsigned int dwAddress, unsigned int dwSize, unsigned int pData)
+{
+ // Open file
+ FILE* pFile = fopen(PM4_DUMPFILE, "a");
+// unsigned int* pDataPtr;
+
+ printString(pFile, "SetMemory, address=0x%x, size=%d, value=0x%x\n",
+ dwAddress, dwSize, pData);
+
+ fclose(pFile);
+}
+
+//----------------------------------------------------------------------------
+void
+Yamato_ConvertIBAddr(unsigned int dwHeader, unsigned int *pBuffer, int gpuToHost)
+{
+ unsigned int hostaddr;
+ unsigned int *ibend;
+ unsigned int *addr;
+ unsigned int *ib = pBuffer;
+ pm4_type3 header = *((pm4_type3*) &dwHeader);
+
+ // convert ib1 base address
+ if((EXPAND_OPCODE(header.it_opcode) == PM4_PACKET3_INDIRECT_BUFFER) ||
+ (EXPAND_OPCODE(header.it_opcode) == PM4_PACKET3_INDIRECT_BUFFER_PFD))
+ {
+ if (gpuToHost)
+ {
+ // from gpu to host
+ *ib = kgsl_sharedmem_convertaddr(*ib, 0);
+
+ hostaddr = *ib;
+ }
+ else
+ {
+ // from host to gpu
+ hostaddr = *ib;
+ *ib = kgsl_sharedmem_convertaddr(*ib, 1);
+ }
+
+ // walk through ib1 and convert any ib2 base address
+
+ ib = (unsigned int *) hostaddr;
+ ibend = (unsigned int *) (ib + *(++pBuffer));
+
+ while (ib < ibend)
+ {
+ dwHeader = *(ib);
+ header = *((pm4_type3*) (&dwHeader));
+
+ switch(dwHeader & PM4_PKT_MASK)
+ {
+ case PM4_TYPE0_PKT:
+ ib += header.count + 2;
+ break;
+
+ case PM4_TYPE1_PKT:
+ break;
+
+ case PM4_TYPE2_PKT:
+ ib++;
+ break;
+
+ case PM4_TYPE3_PKT:
+ if((EXPAND_OPCODE(header.it_opcode) == PM4_PACKET3_INDIRECT_BUFFER) ||
+ (EXPAND_OPCODE(header.it_opcode) == PM4_PACKET3_INDIRECT_BUFFER_PFD))
+ {
+ addr = ib + 1;
+ if (gpuToHost)
+ {
+ // from gpu to host
+ *addr = kgsl_sharedmem_convertaddr(*addr, 0);
+ }
+ else
+ {
+ // from host to gpu
+ *addr = kgsl_sharedmem_convertaddr(*addr, 1);
+ }
+ }
+ ib += header.count + 2;
+ break;
+ }
+ }
+ }
+}
+
+//----------------------------------------------------------------------------
+
+void
+Yamato_DumpPM4(unsigned int* pBuffer, unsigned int sizeDWords)
+{
+ unsigned int *pBufferEnd = pBuffer + sizeDWords;
+ unsigned int *tmp;
+
+ // Open file
+ FILE* pFile = fopen(PM4_DUMPFILE, "a");
+
+ printString(pFile, "Start_PM4Buffer\n");//, count=%d\n", sizeDWords);
+
+ // So look at the first unsigned int - should be a header
+ while(pBuffer < pBufferEnd)
+ {
+ unsigned int dwHeader = *(pBuffer++);
+
+ //printString(pFile, " Start_Packet\n");
+ switch(dwHeader & PM4_PKT_MASK)
+ {
+ case PM4_TYPE0_PKT:
+ WritePM4Packet_Type0(pFile, dwHeader, &pBuffer);
+ break;
+
+ case PM4_TYPE1_PKT:
+ break;
+
+ case PM4_TYPE2_PKT:
+ WritePM4Packet_Type2(pFile, dwHeader, &pBuffer);
+ break;
+
+ case PM4_TYPE3_PKT:
+ indirectionLevel = 0;
+ tmp = pBuffer;
+ Yamato_ConvertIBAddr(dwHeader, tmp, 1);
+ WritePM4Packet_Type3(pFile, dwHeader, &pBuffer);
+ Yamato_ConvertIBAddr(dwHeader, tmp, 0);
+ break;
+ }
+ //printString(pFile, " End_Packet\n");
+ }
+
+ printString(pFile, "End_PM4Buffer\n");
+ fclose(pFile);
+}
+
+//----------------------------------------------------------------------------
+
+void
+Yamato_DumpRegisterWrite(unsigned int dwAddress, unsigned int value)
+{
+ FILE *pFile;
+
+ // Build a Type-0 packet that maps to this register write
+ unsigned int pBuffer[100], *pBuf = &pBuffer[1];
+
+ // Don't dump CP_RB_WPTR (switch statement may be necessary here for future additions)
+ if(dwAddress == mmCP_RB_WPTR)
+ return;
+
+ pFile = fopen(PM4_DUMPFILE, "a");
+
+ pBuffer[0] = dwAddress;
+ pBuffer[1] = value;
+
+ printString(pFile, "StartRegisterWrite\n");
+ WritePM4Packet_Type0(pFile, pBuffer[0], &pBuf);
+ printString(pFile, "EndRegisterWrite\n");
+
+ fclose(pFile);
+}
+
+//----------------------------------------------------------------------------
+
+void
+Yamato_DumpFbStart(gsl_device_t *device)
+{
+ FILE *pFile;
+
+ static int firstCall = 0;
+
+ // We only want to call this once
+ if(firstCall)
+ return;
+
+ pFile = fopen(PM4_DUMPFILE, "a");
+
+ printString(pFile, "FbStart, value=0x%x\n", device->mmu.mpu_base);
+ printString(pFile, "FbSize, value=0x%x\n", device->mmu.mpu_range);
+
+ fclose(pFile);
+
+ firstCall = 1;
+}
+
+//----------------------------------------------------------------------------
+
+void
+Yamato_DumpWindow(unsigned int addr, unsigned int width, unsigned int height)
+{
+ FILE *pFile;
+
+ pFile = fopen(PM4_DUMPFILE, "a");
+
+ printString(pFile, "DumpWindow, addr=0x%x, width=0x%x, height=0x%x\n", addr, width, height);
+
+ fclose(pFile);
+}
+
+//----------------------------------------------------------------------------
+#ifdef _DEBUG
+
+#define ADDRESS_STACK_SIZE 256
+#define GET_PM4_TYPE3_OPCODE(x) ((*(x) >> 8) & 0xFF)
+#define IF_REGISTER_IN_RANGE(reg, base, count) \
+ offset = (reg) - (base); \
+ if(offset >= 0 && offset <= (count) - 2)
+#define GET_CP_CONSTANT_DATA(x) (*((x) + offset + 2))
+
+static const char format2bpp[] =
+{
+ 2, // COLORX_4_4_4_4
+ 2, // COLORX_1_5_5_5
+ 2, // COLORX_5_6_5
+ 1, // COLORX_8
+ 2, // COLORX_8_8
+ 4, // COLORX_8_8_8_8
+ 4, // COLORX_S8_8_8_8
+ 2, // COLORX_16_FLOAT
+ 4, // COLORX_16_16_FLOAT
+ 8, // COLORX_16_16_16_16_FLOAT
+ 4, // COLORX_32_FLOAT
+ 8, // COLORX_32_32_FLOAT
+ 16, // COLORX_32_32_32_32_FLOAT ,
+ 1, // COLORX_2_3_3
+ 3, // COLORX_8_8_8
+};
+
+static unsigned int kgsl_dumpx_addr_count = 0; //unique command buffer addresses encountered
+static int kgsl_dumpx_handle_type3(unsigned int* hostaddr, int count)
+{
+ // For swap detection we need to find the below declared static values, and detect DI during EDRAM copy
+ static unsigned int width = 0, height = 0, format = 0, baseaddr = 0, iscopy = 0;
+
+ static unsigned int addr_stack[ADDRESS_STACK_SIZE];
+ static unsigned int size_stack[ADDRESS_STACK_SIZE];
+ int swap = 0; // have we encountered a swap during recursion (return value)
+
+ switch(GET_PM4_TYPE3_OPCODE(hostaddr))
+ {
+ case PM4_INDIRECT_BUFFER_PFD:
+ case PM4_INDIRECT_BUFFER:
+ {
+ // traverse indirect buffers
+ unsigned int i;
+ unsigned int ibaddr = *(hostaddr+1);
+ unsigned int ibsize = *(hostaddr+2);
+
+ // is this address already in encountered?
+ for(i = 0; i < kgsl_dumpx_addr_count && addr_stack[i] != ibaddr; i++);
+
+ if(kgsl_dumpx_addr_count == i)
+ {
+ // yes it was, store the address so we don't dump this buffer twice
+ addr_stack[kgsl_dumpx_addr_count] = ibaddr;
+ // just for sanity checking
+ size_stack[kgsl_dumpx_addr_count++] = ibsize;
+ KOS_ASSERT(kgsl_dumpx_addr_count < ADDRESS_STACK_SIZE);
+
+ // recursively follow the indirect link and update swap if indirect buffer had resolve
+ swap |= kgsl_dumpx_parse_ibs(ibaddr, ibsize);
+ }
+ else
+ {
+ KOS_ASSERT(size_stack[i] == ibsize);
+ }
+ }
+ break;
+
+ case PM4_SET_CONSTANT:
+ if((*(hostaddr+1) >> 16) == 0x4)
+ {
+ // parse register writes, and figure out framebuffer configuration
+
+ unsigned int regaddr = (*(hostaddr + 1) & 0xFFFF) + 0x2000; //dword address in register space
+ int offset; // used by the macros
+
+ IF_REGISTER_IN_RANGE(mmPA_SC_WINDOW_SCISSOR_BR, regaddr, count)
+ {
+ // found write to PA_SC_WINDOW_SCISSOR_BR, we use this to detect current
+ // width and height of the framebuffer (TODO: find more reliable way of achieving this)
+ unsigned int data = GET_CP_CONSTANT_DATA(hostaddr);
+ width = data & 0xFFFF;
+ height = data >> 16;
+ }
+
+ IF_REGISTER_IN_RANGE(mmRB_MODECONTROL, regaddr, count)
+ {
+ // found write to RB_MODECONTROL, we use this to find out if next DI is resolve
+ unsigned int data = GET_CP_CONSTANT_DATA(hostaddr);
+ iscopy = (data & RB_MODECONTROL__EDRAM_MODE_MASK) == (EDRAM_COPY << RB_MODECONTROL__EDRAM_MODE__SHIFT);
+ }
+
+ IF_REGISTER_IN_RANGE(mmRB_COPY_DEST_BASE, regaddr, count)
+ {
+ // found write to RB_COPY_DEST_BASE, we use this to find out the framebuffer base address
+ unsigned int data = GET_CP_CONSTANT_DATA(hostaddr);
+ baseaddr = (data & RB_COPY_DEST_BASE__COPY_DEST_BASE_MASK);
+ }
+
+ IF_REGISTER_IN_RANGE(mmRB_COPY_DEST_INFO, regaddr, count)
+ {
+ // found write to RB_COPY_DEST_INFO, we use this to find out the framebuffer format
+ unsigned int data = GET_CP_CONSTANT_DATA(hostaddr);
+ format = (data & RB_COPY_DEST_INFO__COPY_DEST_FORMAT_MASK) >> RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT;
+ }
+ }
+ break;
+
+ case PM4_DRAW_INDX:
+ case PM4_DRAW_INDX_2:
+ {
+ // DI found
+ // check if it is resolve
+ if(iscopy && !swap)
+ {
+ // printf("resolve: %ix%i @ 0x%08x, format = 0x%08x\n", width, height, baseaddr, format);
+ KOS_ASSERT(format < 15);
+
+ // yes it was and we need to update color buffer config because this is the first bin
+ // dumpx framebuffer base address, and dimensions
+ KGSL_DEBUG_DUMPX( BB_DUMP_CBUF_AWH, (unsigned int)baseaddr, width, height, " ");
+
+ // find aligned width
+ width = (width + 31) & ~31;
+
+ //dump bytes-per-pixel and aligned width
+ KGSL_DEBUG_DUMPX( BB_DUMP_CBUF_FS, format2bpp[format], width, 0, " ");
+ swap = 1;
+ }
+
+ }
+ break;
+
+ default:
+ break;
+ }
+ return swap;
+}
+
+// Traverse IBs and dump them to test vector. Detect swap by inspecting register
+// writes, keeping note of the current state, and dump framebuffer config to test vector
+int kgsl_dumpx_parse_ibs(gpuaddr_t gpuaddr, int sizedwords)
+{
+ static unsigned int level = 0; //recursion level
+
+ int swap = 0; // have we encountered a swap during recursion (return value)
+ unsigned int *hostaddr;
+ int dwords_left = sizedwords; //dwords left in the current command buffer
+
+ level++;
+
+ KOS_ASSERT(sizeof(unsigned int *) == sizeof(unsigned int));
+ KOS_ASSERT(level <= 2);
+ hostaddr = (unsigned int *)kgsl_sharedmem_convertaddr(gpuaddr, 0);
+
+ // dump the IB to test vector
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX, KGSL_DEBUG_DUMPX(BB_DUMP_MEMWRITE, gpuaddr, (unsigned int)hostaddr, sizedwords*4, "kgsl_dumpx_write_ibs"));
+
+ while(dwords_left)
+ {
+ int count = 0; //dword count including packet header
+
+ switch(*hostaddr >> 30)
+ {
+ case 0x0: // type-0
+ count = (*hostaddr >> 16)+2;
+ break;
+ case 0x1: // type-1
+ count = 2;
+ break;
+ case 0x3: // type-3
+ count = ((*hostaddr >> 16) & 0x3fff) + 2;
+ swap |= kgsl_dumpx_handle_type3(hostaddr, count);
+ break; // type-3
+ default:
+ KOS_ASSERT(!"unknown packet type");
+ }
+
+ // jump to next packet
+ dwords_left -= count;
+ hostaddr += count;
+ KOS_ASSERT(dwords_left >= 0 && "PM4 parsing error");
+ }
+
+ level--;
+
+ // if this is the starting level of recursion, we are done. clean-up
+ if(level == 0) kgsl_dumpx_addr_count = 0;
+
+ return swap;
+}
+#endif
+
+#endif // WIN32
+
diff --git a/drivers/mxc/amd-gpu/common/gsl_device.c b/drivers/mxc/amd-gpu/common/gsl_device.c
new file mode 100644
index 000000000000..bcb557e69d6d
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/gsl_device.c
@@ -0,0 +1,663 @@
+/* Copyright (c) 2008-2010, Advanced Micro Devices. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "gsl.h"
+#include "gsl_hal.h"
+#ifdef _LINUX
+#include <linux/sched.h>
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+// inline functions
+//////////////////////////////////////////////////////////////////////////////
+OSINLINE void
+kgsl_device_getfunctable(gsl_deviceid_t device_id, gsl_functable_t *ftbl)
+{
+ switch (device_id)
+ {
+#ifdef GSL_BLD_YAMATO
+ case GSL_DEVICE_YAMATO:
+ kgsl_yamato_getfunctable(ftbl);
+ break;
+#endif // GSL_BLD_YAMATO
+#ifdef GSL_BLD_G12
+ case GSL_DEVICE_G12:
+ kgsl_g12_getfunctable(ftbl);
+ break;
+#endif // GSL_BLD_G12
+ default:
+ break;
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// functions
+//////////////////////////////////////////////////////////////////////////////
+
+int
+kgsl_device_init(gsl_device_t *device, gsl_deviceid_t device_id)
+{
+ int status = GSL_SUCCESS;
+ gsl_devconfig_t config;
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_device_init(gsl_device_t *device=0x%08x, gsl_deviceid_t device_id=%D )\n", device, device_id );
+
+ if (device->flags & GSL_FLAGS_INITIALIZED)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_init. Return value %B\n", GSL_SUCCESS );
+ return (GSL_SUCCESS);
+ }
+
+ kos_memset(device, 0, sizeof(gsl_device_t));
+
+ // if device configuration is present
+ if (kgsl_hal_getdevconfig(device_id, &config) == GSL_SUCCESS)
+ {
+ kgsl_device_getfunctable(device_id, &device->ftbl);
+
+ kos_memcpy(&device->regspace, &config.regspace, sizeof(gsl_memregion_t));
+#ifdef GSL_BLD_YAMATO
+ kos_memcpy(&device->gmemspace, &config.gmemspace, sizeof(gsl_memregion_t));
+#endif // GSL_BLD_YAMATO
+
+ device->refcnt = 0;
+ device->id = device_id;
+
+#ifndef GSL_NO_MMU
+ device->mmu.config = config.mmu_config;
+ device->mmu.mpu_base = config.mpu_base;
+ device->mmu.mpu_range = config.mpu_range;
+ device->mmu.va_base = config.va_base;
+ device->mmu.va_range = config.va_range;
+#endif
+
+ if (device->ftbl.device_init)
+ {
+ status = device->ftbl.device_init(device);
+ }
+ else
+ {
+ status = GSL_FAILURE_NOTINITIALIZED;
+ }
+
+ // allocate memory store
+ status = kgsl_sharedmem_alloc0(device->id, GSL_MEMFLAGS_ALIGNPAGE | GSL_MEMFLAGS_CONPHYS, sizeof(gsl_devmemstore_t), &device->memstore);
+
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX,
+ {
+ // dumpx needs this to be in EMEM0 aperture
+ kgsl_sharedmem_free0(&device->memstore, GSL_CALLER_PROCESSID_GET());
+ status = kgsl_sharedmem_alloc0(device->id, GSL_MEMFLAGS_ALIGNPAGE, sizeof(gsl_devmemstore_t), &device->memstore);
+ });
+
+ if (status != GSL_SUCCESS)
+ {
+ kgsl_device_stop(device->id);
+ return (status);
+ }
+ kgsl_sharedmem_set0(&device->memstore, 0, 0, device->memstore.size);
+
+ // init memqueue
+ device->memqueue.head = NULL;
+ device->memqueue.tail = NULL;
+
+ // init cmdstream
+ status = kgsl_cmdstream_init(device);
+ if (status != GSL_SUCCESS)
+ {
+ kgsl_device_stop(device->id);
+ return (status);
+ }
+
+#ifndef _LINUX
+ // Create timestamp event
+ device->timestamp_event = kos_event_create(0);
+ if( !device->timestamp_event )
+ {
+ kgsl_device_stop(device->id);
+ return (status);
+ }
+#else
+ // Create timestamp wait queue
+ init_waitqueue_head(&device->timestamp_waitq);
+#endif
+
+ //
+ // Read the chip ID after the device has been initialized.
+ //
+ device->chip_id = kgsl_hal_getchipid(device->id);
+ }
+
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_init. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_device_close(gsl_device_t *device)
+{
+ int status = GSL_FAILURE_NOTINITIALIZED;
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_device_close(gsl_device_t *device=0x%08x )\n", device );
+
+ /* make sure the device is stopped before close
+ kgsl_device_close is only called for last running caller process
+ */
+ while (device->refcnt > 0) {
+ GSL_API_MUTEX_UNLOCK();
+ kgsl_device_stop(device->id);
+ GSL_API_MUTEX_LOCK();
+ }
+
+ // close cmdstream
+ status = kgsl_cmdstream_close(device);
+ if( status != GSL_SUCCESS ) return status;
+
+ if (device->flags & GSL_FLAGS_INITIALIZED)
+ {
+ if (device->ftbl.device_close)
+ {
+ status = device->ftbl.device_close(device);
+ }
+ }
+
+ // DumpX allocates memstore from MMU aperture
+ if ((device->refcnt == 0) && device->memstore.hostptr
+ && !(gsl_driver.flags_debug & GSL_DBGFLAGS_DUMPX))
+ {
+ kgsl_sharedmem_free0(&device->memstore, GSL_CALLER_PROCESSID_GET());
+ }
+
+#ifndef _LINUX
+ // destroy timestamp event
+ if(device->timestamp_event)
+ {
+ kos_event_signal(device->timestamp_event); // wake up waiting threads before destroying the structure
+ kos_event_destroy( device->timestamp_event );
+ device->timestamp_event = 0;
+ }
+#else
+ wake_up_interruptible_all(&(device->timestamp_waitq));
+#endif
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_close. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_device_destroy(gsl_device_t *device)
+{
+ int status = GSL_FAILURE_NOTINITIALIZED;
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_device_destroy(gsl_device_t *device=0x%08x )\n", device );
+
+ if (device->flags & GSL_FLAGS_INITIALIZED)
+ {
+ if (device->ftbl.device_destroy)
+ {
+ status = device->ftbl.device_destroy(device);
+ }
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_destroy. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_device_attachcallback(gsl_device_t *device, unsigned int pid)
+{
+ int status = GSL_SUCCESS;
+ int pindex;
+
+#ifndef GSL_NO_MMU
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "--> int kgsl_device_attachcallback(gsl_device_t *device=0x%08x, unsigned int pid=0x%08x)\n", device, pid );
+
+ if (device->flags & GSL_FLAGS_INITIALIZED)
+ {
+ if (kgsl_driver_getcallerprocessindex(pid, &pindex) == GSL_SUCCESS)
+ {
+ device->callerprocess[pindex] = pid;
+
+ status = kgsl_mmu_attachcallback(&device->mmu, pid);
+ }
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_attachcallback. Return value: %B\n", status );
+
+#else
+ (void)pid;
+ (void)device;
+#endif
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_device_detachcallback(gsl_device_t *device, unsigned int pid)
+{
+ int status = GSL_SUCCESS;
+ int pindex;
+
+#ifndef GSL_NO_MMU
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "--> int kgsl_device_detachcallback(gsl_device_t *device=0x%08x, unsigned int pid=0x%08x)\n", device, pid );
+
+ if (device->flags & GSL_FLAGS_INITIALIZED)
+ {
+ if (kgsl_driver_getcallerprocessindex(pid, &pindex) == GSL_SUCCESS)
+ {
+ status |= kgsl_mmu_detachcallback(&device->mmu, pid);
+
+ device->callerprocess[pindex] = 0;
+ }
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_detachcallback. Return value: %B\n", status );
+
+#else
+ (void)pid;
+ (void)device;
+#endif
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_device_getproperty(gsl_deviceid_t device_id, gsl_property_type_t type, void *value, unsigned int sizebytes)
+{
+ int status = GSL_SUCCESS;
+ gsl_device_t *device = &gsl_driver.device[device_id-1]; // device_id is 1 based
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_device_getproperty(gsl_deviceid_t device_id=%D, gsl_property_type_t type=%d, void *value=0x08x, unsigned int sizebytes=%d)\n", device_id, type, value, sizebytes );
+
+ KOS_ASSERT(value);
+
+#ifndef _DEBUG
+ (void) sizebytes; // unreferenced formal parameter
+#endif
+
+ switch (type)
+ {
+ case GSL_PROP_SHMEM:
+ {
+ gsl_shmemprop_t *shem = (gsl_shmemprop_t *) value;
+
+ KOS_ASSERT(sizebytes == sizeof(gsl_shmemprop_t));
+
+ shem->numapertures = gsl_driver.shmem.numapertures;
+ shem->aperture_mask = GSL_APERTURE_MASK;
+ shem->aperture_shift = GSL_APERTURE_SHIFT;
+
+ break;
+ }
+
+ case GSL_PROP_SHMEM_APERTURES:
+ {
+ int i;
+ gsl_apertureprop_t *aperture = (gsl_apertureprop_t *) value;
+
+ KOS_ASSERT(sizebytes == (sizeof(gsl_apertureprop_t) * gsl_driver.shmem.numapertures));
+
+ for (i = 0; i < gsl_driver.shmem.numapertures; i++)
+ {
+ if (gsl_driver.shmem.apertures[i].memarena)
+ {
+ aperture->gpuaddr = GSL_APERTURE_GETGPUADDR(gsl_driver.shmem, i);
+ aperture->hostaddr = GSL_APERTURE_GETHOSTADDR(gsl_driver.shmem, i);
+ }
+ else
+ {
+ aperture->gpuaddr = 0x0;
+ aperture->hostaddr = 0x0;
+ }
+ aperture++;
+ }
+
+ break;
+ }
+
+ case GSL_PROP_DEVICE_SHADOW:
+ {
+ gsl_shadowprop_t *shadowprop = (gsl_shadowprop_t *) value;
+
+ KOS_ASSERT(sizebytes == sizeof(gsl_shadowprop_t));
+
+ kos_memset(shadowprop, 0, sizeof(gsl_shadowprop_t));
+
+#ifdef GSL_DEVICE_SHADOW_MEMSTORE_TO_USER
+ if (device->memstore.hostptr)
+ {
+ shadowprop->hostaddr = (unsigned int) device->memstore.hostptr;
+ shadowprop->size = device->memstore.size;
+ shadowprop->flags = GSL_FLAGS_INITIALIZED;
+ }
+#endif // GSL_DEVICE_SHADOW_MEMSTORE_TO_USER
+
+ break;
+ }
+
+ default:
+ {
+ if (device->ftbl.device_getproperty)
+ {
+ status = device->ftbl.device_getproperty(device, type, value, sizebytes);
+ }
+
+ break;
+ }
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_getproperty. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_device_setproperty(gsl_deviceid_t device_id, gsl_property_type_t type, void *value, unsigned int sizebytes)
+{
+ int status = GSL_SUCCESS;
+ gsl_device_t *device;
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_device_setproperty(gsl_deviceid_t device_id=%D, gsl_property_type_t type=%d, void *value=0x08x, unsigned int sizebytes=%d)\n", device_id, type, value, sizebytes );
+
+ KOS_ASSERT(value);
+
+ GSL_API_MUTEX_LOCK();
+
+ device = &gsl_driver.device[device_id-1]; // device_id is 1 based
+
+ if (device->flags & GSL_FLAGS_INITIALIZED)
+ {
+ if (device->ftbl.device_setproperty)
+ {
+ status = device->ftbl.device_setproperty(device, type, value, sizebytes);
+ }
+ }
+
+ GSL_API_MUTEX_UNLOCK();
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_setproperty. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_device_start(gsl_deviceid_t device_id, gsl_flags_t flags)
+{
+ int status = GSL_FAILURE_NOTINITIALIZED;
+ gsl_device_t *device;
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_device_start(gsl_deviceid_t device_id=%D, gsl_flags_t flags=%d)\n", device_id, flags );
+
+ GSL_API_MUTEX_LOCK();
+
+ device = &gsl_driver.device[device_id-1]; // device_id is 1 based
+
+ kgsl_device_active(device);
+
+ if (!(device->flags & GSL_FLAGS_INITIALIZED))
+ {
+ GSL_API_MUTEX_UNLOCK();
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_ERROR, "ERROR: Trying to start uninitialized device.\n" );
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_start. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ device->refcnt++;
+
+ if (device->flags & GSL_FLAGS_STARTED)
+ {
+ GSL_API_MUTEX_UNLOCK();
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_start. Return value %B\n", GSL_SUCCESS );
+ return (GSL_SUCCESS);
+ }
+
+ // start device in safe mode
+ if (flags & GSL_FLAGS_SAFEMODE)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_INFO, "Running the device in safe mode.\n" );
+ device->flags |= GSL_FLAGS_SAFEMODE;
+ }
+
+ if (device->ftbl.device_start)
+ {
+ status = device->ftbl.device_start(device, flags);
+ }
+
+ GSL_API_MUTEX_UNLOCK();
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_start. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_device_stop(gsl_deviceid_t device_id)
+{
+ int status = GSL_FAILURE_NOTINITIALIZED;
+ gsl_device_t *device;
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_device_stop(gsl_deviceid_t device_id=%D)\n", device_id );
+
+ GSL_API_MUTEX_LOCK();
+
+ device = &gsl_driver.device[device_id-1]; // device_id is 1 based
+
+ if (device->flags & GSL_FLAGS_STARTED)
+ {
+ KOS_ASSERT(device->refcnt);
+
+ device->refcnt--;
+
+ if (device->refcnt == 0)
+ {
+ if (device->ftbl.device_stop)
+ {
+ status = device->ftbl.device_stop(device);
+ }
+ }
+ else
+ {
+ status = GSL_SUCCESS;
+ }
+ }
+
+ GSL_API_MUTEX_UNLOCK();
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_stop. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_device_idle(gsl_deviceid_t device_id, unsigned int timeout)
+{
+ int status = GSL_FAILURE_NOTINITIALIZED;
+ gsl_device_t *device;
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_device_idle(gsl_deviceid_t device_id=%D, unsigned int timeout=%d)\n", device_id, timeout );
+
+ GSL_API_MUTEX_LOCK();
+
+ device = &gsl_driver.device[device_id-1]; // device_id is 1 based
+
+ kgsl_device_active(device);
+
+ if (device->ftbl.device_idle)
+ {
+ status = device->ftbl.device_idle(device, timeout);
+ }
+
+ GSL_API_MUTEX_UNLOCK();
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_idle. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_device_regread(gsl_deviceid_t device_id, unsigned int offsetwords, unsigned int *value)
+{
+ int status = GSL_FAILURE_NOTINITIALIZED;
+ gsl_device_t *device;
+
+
+#ifdef GSL_LOG
+ if( offsetwords != mmRBBM_STATUS && offsetwords != mmCP_RB_RPTR ) // Would otherwise flood the log
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_device_regread(gsl_deviceid_t device_id=%D, unsigned int offsetwords=%R, unsigned int *value=0x%08x)\n", device_id, offsetwords, value );
+#endif
+
+ GSL_API_MUTEX_LOCK();
+
+ device = &gsl_driver.device[device_id-1]; // device_id is 1 based
+
+ KOS_ASSERT(value);
+ KOS_ASSERT(offsetwords < device->regspace.sizebytes);
+
+ if (device->ftbl.device_regread)
+ {
+ status = device->ftbl.device_regread(device, offsetwords, value);
+ }
+
+ GSL_API_MUTEX_UNLOCK();
+
+#ifdef GSL_LOG
+ if( offsetwords != mmRBBM_STATUS && offsetwords != mmCP_RB_RPTR )
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_regread. Return value %B\n", status );
+#endif
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_device_regwrite(gsl_deviceid_t device_id, unsigned int offsetwords, unsigned int value)
+{
+ int status = GSL_FAILURE_NOTINITIALIZED;
+ gsl_device_t *device;
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_device_regwrite(gsl_deviceid_t device_id=%D, unsigned int offsetwords=%R, unsigned int value=0x%08x)\n", device_id, offsetwords, value );
+
+ GSL_API_MUTEX_LOCK();
+
+ device = &gsl_driver.device[device_id-1]; // device_id is 1 based
+
+ KOS_ASSERT(offsetwords < device->regspace.sizebytes);
+
+ if (device->ftbl.device_regwrite)
+ {
+ status = device->ftbl.device_regwrite(device, offsetwords, value);
+ }
+
+ GSL_API_MUTEX_UNLOCK();
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_regwrite. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_device_waitirq(gsl_deviceid_t device_id, gsl_intrid_t intr_id, unsigned int *count, unsigned int timeout)
+{
+ int status = GSL_FAILURE_NOTINITIALIZED;
+ gsl_device_t *device;
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_device_waitirq(gsl_deviceid_t device_id=%D, gsl_intrid_t intr_id=%d, unsigned int *count=0x%08x, unsigned int timout=0x%08x)\n", device_id, intr_id, count, timeout);
+
+ GSL_API_MUTEX_LOCK();
+
+ device = &gsl_driver.device[device_id-1]; // device_id is 1 based
+
+ if (device->ftbl.device_waitirq)
+ {
+ status = device->ftbl.device_waitirq(device, intr_id, count, timeout);
+ }
+
+ GSL_API_MUTEX_UNLOCK();
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_waitirq. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_device_runpending(gsl_device_t *device)
+{
+ int status = GSL_FAILURE_NOTINITIALIZED;
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_device_runpending(gsl_device_t *device=0x%08x )\n", device );
+
+ if (device->flags & GSL_FLAGS_INITIALIZED)
+ {
+ if (device->ftbl.device_runpending)
+ {
+ status = device->ftbl.device_runpending(device);
+ }
+ }
+
+ // free any pending freeontimestamps
+ kgsl_cmdstream_memqueue_drain(device);
+
+ kgsl_log_write( KGSL_LOG_GROUP_DEVICE | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_device_runpending. Return value %B\n", status );
+
+ return (status);
+}
+
diff --git a/drivers/mxc/amd-gpu/common/gsl_drawctxt.c b/drivers/mxc/amd-gpu/common/gsl_drawctxt.c
new file mode 100644
index 000000000000..afa44e618794
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/gsl_drawctxt.c
@@ -0,0 +1,1796 @@
+/* Copyright (c) 2002,2007-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "gsl.h"
+#include "gsl_hal.h"
+#ifdef _LINUX
+#include <asm/div64.h>
+#endif
+
+#ifdef GSL_BLD_YAMATO
+
+//#define DISABLE_SHADOW_WRITES
+
+/*
+//////////////////////////////////////////////////////////////////////////////
+//
+// Memory Map for Register, Constant & Instruction Shadow, and Command Buffers (34.5KB)
+//
+// +---------------------+------------+-------------+---+---------------------+
+// | ALU Constant Shadow | Reg Shadow | C&V Buffers |Tex| Shader Instr Shadow |
+// +---------------------+------------+-------------+---+---------------------+
+// ________________________________/ \___________________
+// / \
+// +--------------+-----------+------+-----------+------------------------+
+// | Restore Regs | Save Regs | Quad | Gmem Save | Gmem Restore | unused |
+// +--------------+-----------+------+-----------+------------------------+
+//
+// 8K - ALU Constant Shadow (8K aligned)
+// 4K - H/W Register Shadow (8K aligned)
+// 9K - Command and Vertex Buffers
+// - Indirect command buffer : Const/Reg restore
+// - includes Loop & Bool const shadows
+// - Indirect command buffer : Const/Reg save
+// - Quad vertices & texture coordinates
+// - Indirect command buffer : Gmem save
+// - Indirect command buffer : Gmem restore
+// - Unused (padding to 8KB boundary)
+// <1K - Texture Constant Shadow (768 bytes) (8K aligned)
+// 18K - Shader Instruction Shadow
+// - 6K vertex (32 byte aligned)
+// - 6K pixel (32 byte aligned)
+// - 6K shared (32 byte aligned)
+//
+// Note: Reading constants into a shadow, one at a time using REG_TO_MEM, takes
+// 3 DWORDS per DWORD transfered, plus 1 DWORD for the shadow, for a total of
+// 16 bytes per constant. If the texture constants were transfered this way,
+// the Command & Vertex Buffers section would extend past the 16K boundary.
+// By moving the texture constant shadow area to start at 16KB boundary, we
+// only require approximately 40 bytes more memory, but are able to use the
+// LOAD_CONSTANT_CONTEXT shadowing feature for the textures, speeding up
+// context switching.
+//
+// [Using LOAD_CONSTANT_CONTEXT shadowing feature for the Loop and/or Bool
+// constants would require an additional 8KB each, for alignment.]
+//
+//////////////////////////////////////////////////////////////////////////////
+*/
+
+//////////////////////////////////////////////////////////////////////////////
+// Constants
+//////////////////////////////////////////////////////////////////////////////
+
+
+
+
+#define ALU_CONSTANTS 2048 // DWORDS
+#define NUM_REGISTERS 1024 // DWORDS
+#ifdef DISABLE_SHADOW_WRITES
+ #define CMD_BUFFER_LEN 9216 // DWORDS
+#else
+ #define CMD_BUFFER_LEN 3072 // DWORDS
+#endif
+#define TEX_CONSTANTS (32*6) // DWORDS
+#define BOOL_CONSTANTS 8 // DWORDS
+#define LOOP_CONSTANTS 56 // DWORDS
+#define SHADER_INSTRUCT_LOG2 9U // 2^n == SHADER_INSTRUCTIONS
+
+#if defined(PM4_IM_STORE)
+#define SHADER_INSTRUCT (1<<SHADER_INSTRUCT_LOG2) // 96-bit instructions
+#else
+#define SHADER_INSTRUCT 0
+#endif
+
+// LOAD_CONSTANT_CONTEXT shadow size
+#define LCC_SHADOW_SIZE 0x2000 // 8KB
+
+#define ALU_SHADOW_SIZE LCC_SHADOW_SIZE // 8KB
+#define REG_SHADOW_SIZE 0x1000 // 4KB
+#ifdef DISABLE_SHADOW_WRITES
+ #define CMD_BUFFER_SIZE 0x9000 // 36KB
+#else
+ #define CMD_BUFFER_SIZE 0x3000 // 12KB
+#endif
+#define TEX_SHADOW_SIZE (TEX_CONSTANTS*4) // 768 bytes
+#define SHADER_SHADOW_SIZE (SHADER_INSTRUCT*12)// 6KB
+
+#define REG_OFFSET LCC_SHADOW_SIZE
+#define CMD_OFFSET (REG_OFFSET + REG_SHADOW_SIZE)
+#define TEX_OFFSET (CMD_OFFSET + CMD_BUFFER_SIZE)
+#define SHADER_OFFSET ((TEX_OFFSET + TEX_SHADOW_SIZE + 32) & ~31)
+
+#define CONTEXT_SIZE (SHADER_OFFSET + 3 * SHADER_SHADOW_SIZE)
+
+
+//////////////////////////////////////////////////////////////////////////////
+// temporary work structure
+//////////////////////////////////////////////////////////////////////////////
+
+typedef struct
+{
+ unsigned int *start; // Command & Vertex buffer start
+ unsigned int *cmd; // Next available dword in C&V buffer
+
+ // address of buffers, needed when creating IB1 command buffers.
+ gpuaddr_t bool_shadow; // Address where bool constants are shadowed
+ gpuaddr_t loop_shadow; // Address where loop constants are shadowed
+
+#if defined(PM4_IM_STORE)
+ gpuaddr_t shader_shared; // Address of shared shader instruction shadow
+ gpuaddr_t shader_vertex; // Address of vertex shader instruction shadow
+ gpuaddr_t shader_pixel; // Address of pixel shader instruction shadow
+#endif
+
+ gpuaddr_t reg_values[2]; // Addresses in command buffer where separately handled registers are saved
+ gpuaddr_t chicken_restore;// Address where the TP0_CHICKEN register value is written
+ gpuaddr_t gmem_base; // Base gpu address of GMEM
+}
+ctx_t;
+
+//////////////////////////////////////////////////////////////////////////////
+// Helper function to calculate IEEE754 single precision float values without FPU
+//////////////////////////////////////////////////////////////////////////////
+unsigned int uint2float( unsigned int uintval )
+{
+ unsigned int exp = 0;
+ unsigned int frac = 0;
+ unsigned int u = uintval;
+
+ // Handle zero separately
+ if( uintval == 0 ) return 0;
+
+ // Find log2 of u
+ if(u>=0x10000) { exp+=16; u>>=16; }
+ if(u>=0x100 ) { exp+=8; u>>=8; }
+ if(u>=0x10 ) { exp+=4; u>>=4; }
+ if(u>=0x4 ) { exp+=2; u>>=2; }
+ if(u>=0x2 ) { exp+=1; u>>=1; }
+
+ // Calculate fraction
+ frac = ( uintval & ( ~( 1 << exp ) ) ) << ( 23 - exp );
+
+ // Exp is biased by 127 and shifted 23 bits
+ exp = ( exp + 127 ) << 23;
+
+ return ( exp | frac );
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Helper function to divide two unsigned ints and return the result as a floating point value
+//////////////////////////////////////////////////////////////////////////////
+unsigned int uintdivide(unsigned int a, unsigned int b)
+{
+#ifdef _LINUX
+ uint64_t a_fixed = a << 16;
+ uint64_t b_fixed = b << 16;
+#else
+ unsigned int a_fixed = a << 16;
+ unsigned int b_fixed = b << 16;
+#endif
+ // Assume the result is 0.fraction
+ unsigned int fraction;
+ unsigned int exp = 126;
+
+ if( b == 0 ) return 0;
+
+#ifdef _LINUX
+ a_fixed = a_fixed << 32;
+ do_div(a_fixed, b_fixed);
+ fraction = (unsigned int)a_fixed;
+#else
+ fraction = ((unsigned int)((((__int64)a_fixed) << 32) / (__int64)b_fixed));
+#endif
+
+ if( fraction == 0 ) return 0;
+
+ // Normalize
+ while( !(fraction & (1<<31)) )
+ {
+ fraction <<= 1;
+ exp--;
+ }
+ // Remove hidden bit
+ fraction <<= 1;
+
+ // Round
+ if( ( fraction & 0x1ff ) > 256 )
+ {
+ int rounded = 0;
+ int i = 9;
+
+ // Do the bit addition
+ while( !rounded )
+ {
+ if( fraction & (1<<i) )
+ {
+ // 1b + 1b = 0b, carry = 1
+ fraction &= ~(1<<i);
+ i++;
+ }
+ else
+ {
+ fraction |= (1<<i);
+ rounded = 1;
+ }
+ }
+ }
+
+ // Use 23 most significant bits for the fraction
+ fraction >>= 9;
+
+ return ( ( exp << 23 ) | fraction );
+}
+
+
+
+//////////////////////////////////////////////////////////////////////////////
+// context save (gmem -> sys)
+//////////////////////////////////////////////////////////////////////////////
+
+
+//////////////////////////////////////////////////////////////////////////////
+// pre-compiled vertex shader program
+//
+// attribute vec4 P;
+// void main(void)
+// {
+// gl_Position = P;
+// }
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#define GMEM2SYS_VTX_PGM_LEN 0x12
+
+static const unsigned int gmem2sys_vtx_pgm[GMEM2SYS_VTX_PGM_LEN] = {
+ 0x00011003, 0x00001000, 0xc2000000,
+ 0x00001004, 0x00001000, 0xc4000000,
+ 0x00001005, 0x00002000, 0x00000000,
+ 0x1cb81000, 0x00398a88, 0x00000003,
+ 0x140f803e, 0x00000000, 0xe2010100,
+ 0x14000000, 0x00000000, 0xe2000000
+};
+
+
+//////////////////////////////////////////////////////////////////////////////
+// pre-compiled fragment shader program
+//
+// precision highp float;
+// uniform vec4 clear_color;
+// void main(void)
+// {
+// gl_FragColor = clear_color;
+// }
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#define GMEM2SYS_FRAG_PGM_LEN 0x0c
+
+static const unsigned int gmem2sys_frag_pgm[GMEM2SYS_FRAG_PGM_LEN] = {
+ 0x00000000, 0x1002c400, 0x10000000,
+ 0x00001003, 0x00002000, 0x00000000,
+ 0x140f8000, 0x00000000, 0x22000000,
+ 0x14000000, 0x00000000, 0xe2000000
+};
+
+
+//////////////////////////////////////////////////////////////////////////////
+// context restore (sys -> gmem)
+//////////////////////////////////////////////////////////////////////////////
+
+
+//////////////////////////////////////////////////////////////////////////////
+// pre-compiled vertex shader program
+//
+// attribute vec4 position;
+// attribute vec4 texcoord;
+// varying vec4 texcoord0;
+// void main()
+// {
+// gl_Position = position;
+// texcoord0 = texcoord;
+// }
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#define SYS2GMEM_VTX_PGM_LEN 0x18
+
+static const unsigned int sys2gmem_vtx_pgm[SYS2GMEM_VTX_PGM_LEN] = {
+ 0x00052003, 0x00001000, 0xc2000000, 0x00001005,
+ 0x00001000, 0xc4000000, 0x00001006, 0x10071000,
+ 0x20000000, 0x18981000, 0x0039ba88, 0x00000003,
+ 0x12982000, 0x40257b08, 0x00000002, 0x140f803e,
+ 0x00000000, 0xe2010100, 0x140f8000, 0x00000000,
+ 0xe2020200, 0x14000000, 0x00000000, 0xe2000000
+};
+
+
+//////////////////////////////////////////////////////////////////////////////
+// pre-compiled fragment shader program
+//
+// precision mediump float;
+// uniform sampler2D tex0;
+// varying vec4 texcoord0;
+// void main()
+// {
+// gl_FragColor = texture2D(tex0, texcoord0.xy);
+// }
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#define SYS2GMEM_FRAG_PGM_LEN 0x0f
+
+static const unsigned int sys2gmem_frag_pgm[SYS2GMEM_FRAG_PGM_LEN] = {
+ 0x00011002, 0x00001000, 0xc4000000, 0x00001003,
+ 0x10041000, 0x20000000, 0x10000001, 0x1ffff688,
+ 0x00000002, 0x140f8000, 0x00000000, 0xe2000000,
+ 0x14000000, 0x00000000, 0xe2000000
+};
+
+
+//////////////////////////////////////////////////////////////////////////////
+// shader texture constants (sysmem -> gmem)
+//////////////////////////////////////////////////////////////////////////////
+
+#define SYS2GMEM_TEX_CONST_LEN 6
+
+static unsigned int sys2gmem_tex_const[SYS2GMEM_TEX_CONST_LEN] =
+{
+ // Texture, FormatXYZW=Unsigned, ClampXYZ=Wrap/Repeat,RFMode=ZeroClamp-1,Dim=1:2d
+ 0x00000002, // Pitch = TBD
+
+ // Format=6:8888_WZYX, EndianSwap=0:None, ReqSize=0:256bit, DimHi=0, NearestClamp=1:OGL Mode
+ 0x00000806, // Address[31:12] = TBD
+
+ // Width, Height, EndianSwap=0:None
+ 0, // Width & Height = TBD
+
+ // NumFormat=0:RF, DstSelXYZW=XYZW, ExpAdj=0, MagFilt=MinFilt=0:Point, Mip=2:BaseMap
+ 0 << 1 | 1 << 4 | 2 << 7 | 3 << 10 | 2 << 23,
+
+ // VolMag=VolMin=0:Point, MinMipLvl=0, MaxMipLvl=1, LodBiasH=V=0, Dim3d=0
+ 0,
+
+ // BorderColor=0:ABGRBlack, ForceBC=0:diable, TriJuice=0, Aniso=0, Dim=1:2d, MipPacking=0
+ 1 << 9 // Mip Address[31:12] = TBD
+};
+
+
+//////////////////////////////////////////////////////////////////////////////
+// quad for copying GMEM to context shadow
+//////////////////////////////////////////////////////////////////////////////
+
+#define QUAD_LEN 12
+
+static unsigned int gmem_copy_quad[QUAD_LEN] = {
+ 0x00000000, 0x00000000, 0x3f800000,
+ 0x00000000, 0x00000000, 0x3f800000,
+ 0x00000000, 0x00000000, 0x3f800000,
+ 0x00000000, 0x00000000, 0x3f800000
+};
+
+#define TEXCOORD_LEN 8
+
+static unsigned int gmem_copy_texcoord[TEXCOORD_LEN] = {
+ 0x00000000, 0x3f800000,
+ 0x3f800000, 0x3f800000,
+ 0x00000000, 0x00000000,
+ 0x3f800000, 0x00000000
+};
+
+
+//////////////////////////////////////////////////////////////////////////////
+// shader linkage info
+//////////////////////////////////////////////////////////////////////////////
+
+#define SHADER_CONST_ADDR (11 * 6 + 3)
+
+
+//////////////////////////////////////////////////////////////////////////////
+// gmem command buffer length
+//////////////////////////////////////////////////////////////////////////////
+
+#define PM4_REG(reg) ((0x4 << 16) | (GSL_HAL_SUBBLOCK_OFFSET(reg)))
+
+//////////////////////////////////////////////////////////////////////////////
+// functions
+//////////////////////////////////////////////////////////////////////////////
+
+static void
+config_gmemsize(gmem_shadow_t *shadow, int gmem_size)
+{
+ int w=64, h=64; // 16KB surface, minimum
+
+ // convert from bytes to 32-bit words
+ gmem_size = (gmem_size + 3)/4;
+
+ // find the right surface size, close to a square.
+ while (w * h < gmem_size)
+ if (w < h)
+ w *= 2;
+ else
+ h *= 2;
+
+ shadow->width = w;
+ shadow->pitch = w;
+ shadow->height = h;
+
+ shadow->size = shadow->pitch * shadow->height * 4;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+
+static unsigned int
+gpuaddr(unsigned int *cmd, gsl_memdesc_t *memdesc)
+{
+ return memdesc->gpuaddr + ((char *)cmd - (char *)memdesc->hostptr);
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+
+static void
+create_ib1(gsl_drawctxt_t *drawctxt, unsigned int *cmd, unsigned int *start, unsigned int *end)
+{
+ cmd[0] = PM4_HDR_INDIRECT_BUFFER_PFD;
+ cmd[1] = gpuaddr(start, &drawctxt->gpustate);
+ cmd[2] = end - start;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+
+static unsigned int *
+program_shader(unsigned int *cmds, int vtxfrag, const unsigned int *shader_pgm, int dwords)
+{
+ // load the patched vertex shader stream
+ *cmds++ = pm4_type3_packet(PM4_IM_LOAD_IMMEDIATE, 2 + dwords);
+ *cmds++ = vtxfrag; // 0=vertex shader, 1=fragment shader
+ *cmds++ = ( (0 << 16) | dwords ); // instruction start & size (in 32-bit words)
+
+ kos_memcpy(cmds, shader_pgm, dwords<<2);
+ cmds += dwords;
+
+ return cmds;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+
+static unsigned int *
+reg_to_mem(unsigned int *cmds, gpuaddr_t dst, gpuaddr_t src, int dwords)
+{
+ while (dwords-- > 0)
+ {
+ *cmds++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmds++ = src++;
+ *cmds++ = dst;
+ dst += 4;
+ }
+
+ return cmds;
+}
+
+
+
+#ifdef DISABLE_SHADOW_WRITES
+
+static void build_reg_to_mem_range(unsigned int start, unsigned int end, unsigned int** cmd, /*ctx_t *ctx, unsigned int* offset) //*/gsl_drawctxt_t *drawctxt)
+{
+ unsigned int i = start;
+
+ for(i=start; i<=end; i++)
+ {
+ *(*cmd)++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *(*cmd)++ = i | (1<<30);
+ *(*cmd)++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) + (i-0x2000)*4;
+ }
+}
+
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+// chicken restore
+//////////////////////////////////////////////////////////////////////////////
+static unsigned int*
+build_chicken_restore_cmds(gsl_drawctxt_t *drawctxt, ctx_t *ctx)
+{
+ unsigned int *start = ctx->cmd;
+ unsigned int *cmds = start;
+
+ *cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0;
+
+ *cmds++ = pm4_type0_packet(mmTP0_CHICKEN, 1);
+ ctx->chicken_restore = gpuaddr(cmds, &drawctxt->gpustate);
+ *cmds++ = 0x00000000;
+
+
+ // create indirect buffer command for above command sequence
+ create_ib1(drawctxt, drawctxt->chicken_restore, start, cmds);
+
+ return cmds;
+}
+
+
+
+//////////////////////////////////////////////////////////////////////////////
+// context save
+//////////////////////////////////////////////////////////////////////////////
+
+
+//////////////////////////////////////////////////////////////////////////////
+// save h/w regs, alu constants, texture contants, etc. ...
+// requires: bool_shadow_gpuaddr, loop_shadow_gpuaddr
+//////////////////////////////////////////////////////////////////////////////
+
+static void
+build_regsave_cmds(gsl_drawctxt_t *drawctxt, ctx_t *ctx)
+{
+ unsigned int *start = ctx->cmd;
+ unsigned int *cmd = start;
+
+#ifdef DISABLE_SHADOW_WRITES
+ // Write HW registers into shadow
+ build_reg_to_mem_range(mmRB_SURFACE_INFO, mmRB_DEPTH_INFO, &cmd, drawctxt);
+ build_reg_to_mem_range(mmCOHER_DEST_BASE_0, mmPA_SC_SCREEN_SCISSOR_BR, &cmd, drawctxt);
+ build_reg_to_mem_range(mmPA_SC_WINDOW_OFFSET, mmPA_SC_WINDOW_SCISSOR_BR, &cmd, drawctxt);
+ build_reg_to_mem_range(mmVGT_MAX_VTX_INDX, mmRB_FOG_COLOR, &cmd, drawctxt);
+ build_reg_to_mem_range(mmRB_STENCILREFMASK_BF, mmPA_CL_VPORT_ZOFFSET, &cmd, drawctxt);
+ build_reg_to_mem_range(mmSQ_PROGRAM_CNTL, mmSQ_WRAPPING_1, &cmd, drawctxt);
+ build_reg_to_mem_range(mmRB_DEPTHCONTROL, mmRB_MODECONTROL, &cmd, drawctxt);
+ build_reg_to_mem_range(mmPA_SU_POINT_SIZE, mmPA_SC_LINE_STIPPLE, &cmd, drawctxt);
+ build_reg_to_mem_range(mmPA_SC_VIZ_QUERY, mmPA_SC_VIZ_QUERY, &cmd, drawctxt);
+ build_reg_to_mem_range(mmPA_SC_LINE_CNTL, mmSQ_PS_CONST, &cmd, drawctxt);
+ build_reg_to_mem_range(mmPA_SC_AA_MASK, mmPA_SC_AA_MASK, &cmd, drawctxt);
+ build_reg_to_mem_range(mmVGT_VERTEX_REUSE_BLOCK_CNTL, mmRB_DEPTH_CLEAR, &cmd, drawctxt);
+ build_reg_to_mem_range(mmRB_SAMPLE_COUNT_CTL, mmRB_COLOR_DEST_MASK, &cmd, drawctxt);
+ build_reg_to_mem_range(mmPA_SU_POLY_OFFSET_FRONT_SCALE, mmPA_SU_POLY_OFFSET_BACK_OFFSET, &cmd, drawctxt);
+
+ // Copy ALU constants
+ cmd = reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr) & 0xFFFFE000, mmSQ_CONSTANT_0, ALU_CONSTANTS);
+
+ // Copy Tex constants
+ cmd = reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000, mmSQ_FETCH_0, TEX_CONSTANTS);
+#else
+ // H/w registers are already shadowed; just need to disable shadowing to prevent corruption.
+ *cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+ *cmd++ = 4 << 16; // regs, start=0
+ *cmd++ = 0x0; // count = 0
+
+ // ALU constants are already shadowed; just need to disable shadowing to prevent corruption.
+ *cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000;
+ *cmd++ = 0 << 16; // ALU, start=0
+ *cmd++ = 0x0; // count = 0
+
+ // Tex constants are already shadowed; just need to disable shadowing to prevent corruption.
+ *cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000;
+ *cmd++ = 1 << 16; // Tex, start=0
+ *cmd++ = 0x0; // count = 0
+#endif
+
+
+
+
+ // Need to handle some of the registers separately
+ *cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmd++ = mmSQ_GPR_MANAGEMENT;
+ *cmd++ = ctx->reg_values[0];
+ *cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmd++ = mmTP0_CHICKEN;
+ *cmd++ = ctx->reg_values[1];
+
+ // Copy Boolean constants
+ cmd = reg_to_mem(cmd, ctx->bool_shadow, mmSQ_CF_BOOLEANS, BOOL_CONSTANTS);
+
+ // Copy Loop constants
+ cmd = reg_to_mem(cmd, ctx->loop_shadow, mmSQ_CF_LOOP, LOOP_CONSTANTS);
+
+ // create indirect buffer command for above command sequence
+ create_ib1(drawctxt, drawctxt->reg_save, start, cmd);
+
+ ctx->cmd = cmd;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// copy colour, depth, & stencil buffers from graphics memory to system memory
+//////////////////////////////////////////////////////////////////////////////
+
+static unsigned int*
+build_gmem2sys_cmds(gsl_drawctxt_t *drawctxt, ctx_t* ctx, gmem_shadow_t *shadow)
+{
+ unsigned int *cmds = shadow->gmem_save_commands;
+ unsigned int *start = cmds;
+
+ // Store TP0_CHICKEN register
+ *cmds++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmds++ = mmTP0_CHICKEN;
+ if( ctx )
+ *cmds++ = ctx->chicken_restore;
+ else
+ cmds++;
+
+ *cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0;
+
+ // Set TP0_CHICKEN to zero
+ *cmds++ = pm4_type0_packet(mmTP0_CHICKEN, 1);
+ *cmds++ = 0x00000000;
+
+ // --------------
+ // program shader
+ // --------------
+
+ // load shader vtx constants ... 5 dwords
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4);
+ *cmds++ = (0x1 << 16) | SHADER_CONST_ADDR;
+ *cmds++ = 0;
+ *cmds++ = shadow->quad_vertices.gpuaddr | 0x3; // valid(?) vtx constant flag & addr
+ *cmds++ = 0x00000030; // limit = 12 dwords
+
+ // Invalidate L2 cache to make sure vertices are updated
+ *cmds++ = pm4_type0_packet(mmTC_CNTL_STATUS, 1);
+ *cmds++ = 0x1;
+
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4);
+ *cmds++ = PM4_REG(mmVGT_MAX_VTX_INDX);
+ *cmds++ = 0x00ffffff; //mmVGT_MAX_VTX_INDX
+ *cmds++ = 0x0; //mmVGT_MIN_VTX_INDX
+ *cmds++ = 0x00000000; //mmVGT_INDX_OFFSET
+
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = PM4_REG(mmPA_SC_AA_MASK);
+ *cmds++ = 0x0000ffff; //mmPA_SC_AA_MASK
+
+
+ // load the patched vertex shader stream
+ cmds = program_shader(cmds, 0, gmem2sys_vtx_pgm, GMEM2SYS_VTX_PGM_LEN);
+
+ // Load the patched fragment shader stream
+ cmds = program_shader(cmds, 1, gmem2sys_frag_pgm, GMEM2SYS_FRAG_PGM_LEN);
+
+ // SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = PM4_REG(mmSQ_PROGRAM_CNTL);
+ *cmds++ = 0x10010001;
+ *cmds++ = 0x00000008;
+
+
+ // --------------
+ // resolve
+ // --------------
+
+ // PA_CL_VTE_CNTL
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = PM4_REG(mmPA_CL_VTE_CNTL);
+ *cmds++ = 0x00000b00; // disable X/Y/Z transforms, X/Y/Z are premultiplied by W
+
+ // change colour buffer to RGBA8888, MSAA = 1, and matching pitch
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = PM4_REG(mmRB_SURFACE_INFO);
+ *cmds++ = drawctxt->context_gmem_shadow.pitch; // GMEM pitch is equal to context GMEM shadow pitch
+
+ // RB_COLOR_INFO Endian=none, Linear, Format=RGBA8888, Swap=0, Base=gmem_base
+ if( ctx )
+ {
+ KOS_ASSERT((ctx->gmem_base & 0xFFF) == 0); // gmem base assumed 4K aligned.
+ *cmds++ = (COLORX_8_8_8_8 << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | ctx->gmem_base;
+ }
+ else
+ cmds++;
+
+ // disable Z
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = PM4_REG(mmRB_DEPTHCONTROL);
+ *cmds++ = 0;
+
+ // set mmPA_SU_SC_MODE_CNTL
+ // Front_ptype = draw triangles
+ // Back_ptype = draw triangles
+ // Provoking vertex = last
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = PM4_REG(mmPA_SU_SC_MODE_CNTL);
+ *cmds++ = 0x00080240;
+
+ // set the scissor to the extents of the draw surface
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = PM4_REG(mmPA_SC_SCREEN_SCISSOR_TL);
+ *cmds++ = (0 << 16) | 0;
+ *cmds++ = (drawctxt->context_gmem_shadow.height << 16) | drawctxt->context_gmem_shadow.width;
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = PM4_REG(mmPA_SC_WINDOW_SCISSOR_TL);
+ *cmds++ = (unsigned int) ((1U << 31) | (0 << 16) | 0);
+ *cmds++ = (drawctxt->context_gmem_shadow.height << 16) | drawctxt->context_gmem_shadow.width;
+
+ // load the viewport so that z scale = clear depth and z offset = 0.0f
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = PM4_REG(mmPA_CL_VPORT_ZSCALE);
+ *cmds++ = 0xbf800000; // -1.0f
+ *cmds++ = 0x0;
+
+ // load the COPY state
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 6);
+ *cmds++ = PM4_REG(mmRB_COPY_CONTROL);
+ *cmds++ = 0; // RB_COPY_CONTROL
+
+ *cmds++ = (shadow->gmemshadow.gpuaddr+shadow->offset*4) & 0xfffff000; // RB_COPY_DEST_BASE
+
+ *cmds++ = shadow->pitch >> 5; // RB_COPY_DEST_PITCH
+ *cmds++ = 0x0003c058; // Endian=none, Linear, Format=RGBA8888,Swap=0,!Dither,MaskWrite:R=G=B=A=1
+
+ {
+ // Calculate the new offset based on the adjusted base
+ unsigned int addr = (shadow->gmemshadow.gpuaddr+shadow->offset*4);
+ unsigned int offset = (addr-(addr&0xfffff000))/4;
+
+ kos_assert( (offset & 0xfffff000) == 0 ); // Make sure we stay in offsetx field.
+
+ *cmds++ = offset;
+ }
+
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = PM4_REG(mmRB_MODECONTROL);
+ *cmds++ = 0x6; // EDRAM copy
+
+ // queue the draw packet
+ *cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 2);
+ *cmds++ = 0; // viz query info.
+ *cmds++ = 0x00030088; // PrimType=RectList, NumIndices=3, SrcSel=AutoIndex
+
+ // create indirect buffer command for above command sequence
+ create_ib1(drawctxt, shadow->gmem_save, start, cmds);
+
+ return cmds;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// context restore
+//////////////////////////////////////////////////////////////////////////////
+
+
+//////////////////////////////////////////////////////////////////////////////
+// copy colour, depth, & stencil buffers from system memory to graphics memory
+//////////////////////////////////////////////////////////////////////////////
+
+static unsigned int*
+build_sys2gmem_cmds(gsl_drawctxt_t *drawctxt, ctx_t* ctx, gmem_shadow_t *shadow)
+{
+ unsigned int *cmds = shadow->gmem_restore_commands;
+ unsigned int *start = cmds;
+
+ // Store TP0_CHICKEN register
+ *cmds++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmds++ = mmTP0_CHICKEN;
+ if( ctx )
+ *cmds++ = ctx->chicken_restore;
+ else
+ cmds++;
+
+ *cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0;
+
+ // Set TP0_CHICKEN to zero
+ *cmds++ = pm4_type0_packet(mmTP0_CHICKEN, 1);
+ *cmds++ = 0x00000000;
+
+ // ----------------
+ // shader constants
+ // ----------------
+
+ // vertex buffer constants
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 7);
+
+ *cmds++ = (0x1 << 16) | (9 * 6);
+ *cmds++ = shadow->quad_vertices.gpuaddr | 0x3; // valid(?) vtx constant flag & addr
+ *cmds++ = 0x00000030; // limit = 12 dwords
+ *cmds++ = shadow->quad_texcoords.gpuaddr | 0x3; // valid(?) vtx constant flag & addr
+ *cmds++ = 0x00000020; // limit = 8 dwords
+ *cmds++ = 0;
+ *cmds++ = 0;
+
+ // Invalidate L2 cache to make sure vertices and texture coordinates are updated
+ *cmds++ = pm4_type0_packet(mmTC_CNTL_STATUS, 1);
+ *cmds++ = 0x1;
+
+ // load the patched vertex shader stream
+ cmds = program_shader(cmds, 0, sys2gmem_vtx_pgm, SYS2GMEM_VTX_PGM_LEN);
+
+ // Load the patched fragment shader stream
+ cmds = program_shader(cmds, 1, sys2gmem_frag_pgm, SYS2GMEM_FRAG_PGM_LEN);
+
+ // SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = PM4_REG(mmSQ_PROGRAM_CNTL);
+ *cmds++ = 0x10030002;
+ *cmds++ = 0x00000008;
+
+
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = PM4_REG(mmPA_SC_AA_MASK);
+ *cmds++ = 0x0000ffff; //mmPA_SC_AA_MASK
+
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = PM4_REG(mmPA_SC_VIZ_QUERY);
+ *cmds++ = 0x0; //mmPA_SC_VIZ_QUERY
+
+
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = PM4_REG(mmRB_COLORCONTROL);
+ *cmds++ = 0x00000c20; // RB_COLORCONTROL
+
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4);
+ *cmds++ = PM4_REG(mmVGT_MAX_VTX_INDX);
+ *cmds++ = 0x00ffffff; //mmVGT_MAX_VTX_INDX
+ *cmds++ = 0x0; //mmVGT_MIN_VTX_INDX
+ *cmds++ = 0x00000000; //mmVGT_INDX_OFFSET
+
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = PM4_REG(mmVGT_VERTEX_REUSE_BLOCK_CNTL);
+ *cmds++ = 0x00000002; //mmVGT_VERTEX_REUSE_BLOCK_CNTL
+ *cmds++ = 0x00000002; //mmVGT_OUT_DEALLOC_CNTL
+
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = PM4_REG(mmSQ_INTERPOLATOR_CNTL);
+ //*cmds++ = 0x0000ffff; //mmSQ_INTERPOLATOR_CNTL
+ *cmds++ = 0xffffffff; //mmSQ_INTERPOLATOR_CNTL
+
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = PM4_REG(mmPA_SC_AA_CONFIG);
+ *cmds++ = 0x00000000; //mmPA_SC_AA_CONFIG
+
+
+ // set mmPA_SU_SC_MODE_CNTL
+ // Front_ptype = draw triangles
+ // Back_ptype = draw triangles
+ // Provoking vertex = last
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = PM4_REG(mmPA_SU_SC_MODE_CNTL);
+ *cmds++ = 0x00080240;
+
+ // texture constants
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, (SYS2GMEM_TEX_CONST_LEN + 1));
+ *cmds++ = (0x1 << 16) | (0 * 6);
+ kos_memcpy(cmds, sys2gmem_tex_const, SYS2GMEM_TEX_CONST_LEN<<2);
+ cmds[0] |= (shadow->pitch >> 5) << 22;
+ cmds[1] |= shadow->gmemshadow.gpuaddr;
+ cmds[2] |= (shadow->width+shadow->offset_x-1) | (shadow->height+shadow->offset_y-1) << 13;
+ cmds += SYS2GMEM_TEX_CONST_LEN;
+
+ // change colour buffer to RGBA8888, MSAA = 1, and matching pitch
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = PM4_REG(mmRB_SURFACE_INFO);
+ *cmds++ = drawctxt->context_gmem_shadow.pitch; // GMEM pitch is equal to context GMEM shadow pitch
+
+
+ // RB_COLOR_INFO Endian=none, Linear, Format=RGBA8888, Swap=0, Base=gmem_base
+ if( ctx )
+ *cmds++ = (COLORX_8_8_8_8 << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | ctx->gmem_base;
+ else
+ cmds++;
+
+ // RB_DEPTHCONTROL
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = PM4_REG(mmRB_DEPTHCONTROL);
+ *cmds++ = 0; // disable Z
+
+
+ // set the scissor to the extents of the draw surface
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = PM4_REG(mmPA_SC_SCREEN_SCISSOR_TL);
+ *cmds++ = (0 << 16) | 0;
+ *cmds++ = (drawctxt->context_gmem_shadow.height << 16) | drawctxt->context_gmem_shadow.width;
+
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = PM4_REG(mmPA_SC_WINDOW_SCISSOR_TL);
+ *cmds++ = (unsigned int) ((1U << 31) | (shadow->gmem_offset_y << 16) | shadow->gmem_offset_x);
+ *cmds++ = (drawctxt->context_gmem_shadow.height << 16) | drawctxt->context_gmem_shadow.width;
+
+ // PA_CL_VTE_CNTL
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = PM4_REG(mmPA_CL_VTE_CNTL);
+ *cmds++ = 0x00000b00; // disable X/Y/Z transforms, X/Y/Z are premultiplied by W
+
+ // load the viewport so that z scale = clear depth and z offset = 0.0f
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = PM4_REG(mmPA_CL_VPORT_ZSCALE);
+ *cmds++ = 0xbf800000;
+ *cmds++ = 0x0;
+
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = PM4_REG(mmRB_COLOR_MASK);
+ *cmds++ = 0x0000000f; // R = G = B = 1:enabled
+
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = PM4_REG(mmRB_COLOR_DEST_MASK);
+ *cmds++ = 0xffffffff;
+
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = PM4_REG(mmSQ_WRAPPING_0);
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+
+ *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = PM4_REG(mmRB_MODECONTROL);
+ *cmds++ = 0x4; // draw pixels with color and depth/stencil component
+
+ // queue the draw packet
+ *cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 2);
+ *cmds++ = 0; // viz query info.
+ *cmds++ = 0x00030088; // PrimType=RectList, NumIndices=3, SrcSel=AutoIndex
+
+ // create indirect buffer command for above command sequence
+ create_ib1(drawctxt, shadow->gmem_restore, start, cmds);
+
+ return cmds;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// restore h/w regs, alu constants, texture constants, etc. ...
+//////////////////////////////////////////////////////////////////////////////
+
+static unsigned *
+reg_range(unsigned int *cmd, unsigned int start, unsigned int end)
+{
+ *cmd++ = PM4_REG(start); // h/w regs, start addr
+ *cmd++ = end - start + 1; // count
+ return cmd;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+
+static void
+build_regrestore_cmds(gsl_drawctxt_t *drawctxt, ctx_t *ctx)
+{
+ unsigned int *start = ctx->cmd;
+ unsigned int *cmd = start;
+
+
+ //*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ //*cmd++ = 0;
+
+ // H/W Registers
+ cmd++; // deferred pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, ???);
+#ifdef DISABLE_SHADOW_WRITES
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) | 1; // Force mismatch
+#else
+ *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+#endif
+
+ cmd = reg_range(cmd, mmRB_SURFACE_INFO, mmPA_SC_SCREEN_SCISSOR_BR);
+ cmd = reg_range(cmd, mmPA_SC_WINDOW_OFFSET, mmPA_SC_WINDOW_SCISSOR_BR);
+ cmd = reg_range(cmd, mmVGT_MAX_VTX_INDX, mmPA_CL_VPORT_ZOFFSET);
+ cmd = reg_range(cmd, mmSQ_PROGRAM_CNTL, mmSQ_WRAPPING_1);
+ cmd = reg_range(cmd, mmRB_DEPTHCONTROL, mmRB_MODECONTROL);
+ cmd = reg_range(cmd, mmPA_SU_POINT_SIZE, mmPA_SC_VIZ_QUERY/*mmVGT_ENHANCE*/);
+ cmd = reg_range(cmd, mmPA_SC_LINE_CNTL, mmRB_COLOR_DEST_MASK);
+ cmd = reg_range(cmd, mmPA_SU_POLY_OFFSET_FRONT_SCALE, mmPA_SU_POLY_OFFSET_BACK_OFFSET);
+
+ // Now we know how many register blocks we have, we can compute command length
+ start[0] = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, (cmd-start)-1);
+#ifdef DISABLE_SHADOW_WRITES
+ start[2] |= (0<<24) | (4 << 16); // Disable shadowing.
+#else
+ start[2] |= (1<<24) | (4 << 16); // Enable shadowing for the entire register block.
+#endif
+
+ // Need to handle some of the registers separately
+ *cmd++ = pm4_type0_packet(mmSQ_GPR_MANAGEMENT, 1);
+ ctx->reg_values[0] = gpuaddr(cmd, &drawctxt->gpustate);
+ *cmd++ = 0x00040400;
+
+ *cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+ *cmd++ = pm4_type0_packet(mmTP0_CHICKEN, 1);
+ ctx->reg_values[1] = gpuaddr(cmd, &drawctxt->gpustate);
+ *cmd++ = 0x00000000;
+
+ // ALU Constants
+ *cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000;
+#ifdef DISABLE_SHADOW_WRITES
+ *cmd++ = (0<<24) | (0<<16) | 0; // Disable shadowing
+#else
+ *cmd++ = (1<<24) | (0<<16) | 0;
+#endif
+ *cmd++ = ALU_CONSTANTS;
+
+
+ // Texture Constants
+ *cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000;
+#ifdef DISABLE_SHADOW_WRITES
+ *cmd++ = (0<<24) | (1<<16) | 0; // Disable shadowing
+#else
+ *cmd++ = (1<<24) | (1<<16) | 0;
+#endif
+ *cmd++ = TEX_CONSTANTS;
+
+
+ // Boolean Constants
+ *cmd++ = pm4_type3_packet(PM4_SET_CONSTANT, 1 + BOOL_CONSTANTS);
+ *cmd++ = (2<<16) | 0;
+
+ // the next BOOL_CONSTANT dwords is the shadow area for boolean constants.
+ ctx->bool_shadow = gpuaddr(cmd, &drawctxt->gpustate);
+ cmd += BOOL_CONSTANTS;
+
+
+ // Loop Constants
+ *cmd++ = pm4_type3_packet(PM4_SET_CONSTANT, 1 + LOOP_CONSTANTS);
+ *cmd++ = (3<<16) | 0;
+
+ // the next LOOP_CONSTANTS dwords is the shadow area for loop constants.
+ ctx->loop_shadow = gpuaddr(cmd, &drawctxt->gpustate);
+ cmd += LOOP_CONSTANTS;
+
+ // create indirect buffer command for above command sequence
+ create_ib1(drawctxt, drawctxt->reg_restore, start, cmd);
+
+ ctx->cmd = cmd;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// quad for saving/restoring gmem
+//////////////////////////////////////////////////////////////////////////////
+
+static void set_gmem_copy_quad( gmem_shadow_t* shadow )
+{
+ unsigned int tex_offset[2];
+
+ // set vertex buffer values
+
+ gmem_copy_quad[1] = uint2float( shadow->height + shadow->gmem_offset_y );
+ gmem_copy_quad[3] = uint2float( shadow->width + shadow->gmem_offset_x );
+ gmem_copy_quad[4] = uint2float( shadow->height + shadow->gmem_offset_y );
+ gmem_copy_quad[9] = uint2float( shadow->width + shadow->gmem_offset_x );
+
+ gmem_copy_quad[0] = uint2float( shadow->gmem_offset_x );
+ gmem_copy_quad[6] = uint2float( shadow->gmem_offset_x );
+ gmem_copy_quad[7] = uint2float( shadow->gmem_offset_y );
+ gmem_copy_quad[10] = uint2float( shadow->gmem_offset_y );
+
+ tex_offset[0] = uintdivide( shadow->offset_x, (shadow->offset_x+shadow->width) );
+ tex_offset[1] = uintdivide( shadow->offset_y, (shadow->offset_y+shadow->height) );
+
+ gmem_copy_texcoord[0] = gmem_copy_texcoord[4] = tex_offset[0];
+ gmem_copy_texcoord[5] = gmem_copy_texcoord[7] = tex_offset[1];
+
+ // copy quad data to vertex buffer
+ kos_memcpy(shadow->quad_vertices.hostptr, gmem_copy_quad, QUAD_LEN << 2);
+
+ // copy tex coord data to tex coord buffer
+ kos_memcpy(shadow->quad_texcoords.hostptr, gmem_copy_texcoord, TEXCOORD_LEN << 2);
+}
+
+
+static void
+build_quad_vtxbuff(gsl_drawctxt_t *drawctxt, ctx_t *ctx, gmem_shadow_t* shadow)
+{
+ unsigned int *cmd = ctx->cmd;
+
+ // quad vertex buffer location
+ shadow->quad_vertices.hostptr = cmd;
+ shadow->quad_vertices.gpuaddr = gpuaddr(cmd, &drawctxt->gpustate);
+ cmd += QUAD_LEN;
+
+ // tex coord buffer location (in GPU space)
+ shadow->quad_texcoords.hostptr = cmd;
+ shadow->quad_texcoords.gpuaddr = gpuaddr(cmd, &drawctxt->gpustate);
+
+
+ cmd += TEXCOORD_LEN;
+
+ set_gmem_copy_quad(shadow);
+
+
+ ctx->cmd = cmd;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+
+static void
+build_shader_save_restore_cmds(gsl_drawctxt_t *drawctxt, ctx_t *ctx)
+{
+ unsigned int *cmd = ctx->cmd;
+ unsigned int *save, *restore, *fixup;
+#if defined(PM4_IM_STORE)
+ unsigned int *startSizeVtx, *startSizePix, *startSizeShared;
+#endif
+ unsigned int *partition1;
+ unsigned int *shaderBases, *partition2;
+
+#if defined(PM4_IM_STORE)
+ // compute vertex, pixel and shared instruction shadow GPU addresses
+ ctx->shader_vertex = drawctxt->gpustate.gpuaddr + SHADER_OFFSET;
+ ctx->shader_pixel = ctx->shader_vertex + SHADER_SHADOW_SIZE;
+ ctx->shader_shared = ctx->shader_pixel + SHADER_SHADOW_SIZE;
+#endif
+
+
+ //-------------------------------------------------------------------
+ // restore shader partitioning and instructions
+ //-------------------------------------------------------------------
+
+ restore = cmd; // start address
+
+ // Invalidate Vertex & Pixel instruction code address and sizes
+ *cmd++ = pm4_type3_packet(PM4_INVALIDATE_STATE, 1);
+ *cmd++ = 0x00000300; // 0x100 = Vertex, 0x200 = Pixel
+
+ // Restore previous shader vertex & pixel instruction bases.
+ *cmd++ = pm4_type3_packet(PM4_SET_SHADER_BASES, 1);
+ shaderBases = cmd++; // TBD #5: shader bases (from fixup)
+
+ // write the shader partition information to a scratch register
+ *cmd++ = pm4_type0_packet(mmSQ_INST_STORE_MANAGMENT, 1);
+ partition1 = cmd++; // TBD #4a: partition info (from save)
+
+#if defined(PM4_IM_STORE)
+ // load vertex shader instructions from the shadow.
+ *cmd++ = pm4_type3_packet(PM4_IM_LOAD, 2);
+ *cmd++ = ctx->shader_vertex + 0x0; // 0x0 = Vertex
+ startSizeVtx = cmd++; // TBD #1: start/size (from save)
+
+ // load pixel shader instructions from the shadow.
+ *cmd++ = pm4_type3_packet(PM4_IM_LOAD, 2);
+ *cmd++ = ctx->shader_pixel + 0x1; // 0x1 = Pixel
+ startSizePix = cmd++; // TBD #2: start/size (from save)
+
+ // load shared shader instructions from the shadow.
+ *cmd++ = pm4_type3_packet(PM4_IM_LOAD, 2);
+ *cmd++ = ctx->shader_shared + 0x2; // 0x2 = Shared
+ startSizeShared = cmd++; // TBD #3: start/size (from save)
+#endif
+
+ // create indirect buffer command for above command sequence
+ create_ib1(drawctxt, drawctxt->shader_restore, restore, cmd);
+
+
+ //-------------------------------------------------------------------
+ // fixup SET_SHADER_BASES data
+ //
+ // since self-modifying PM4 code is being used here, a seperate
+ // command buffer is used for this fixup operation, to ensure the
+ // commands are not read by the PM4 engine before the data fields
+ // have been written.
+ //-------------------------------------------------------------------
+
+ fixup = cmd; // start address
+
+ // write the shader partition information to a scratch register
+ *cmd++ = pm4_type0_packet(mmSCRATCH_REG2, 1);
+ partition2 = cmd++; // TBD #4b: partition info (from save)
+
+ // mask off unused bits, then OR with shader instruction memory size
+ *cmd++ = pm4_type3_packet(PM4_REG_RMW, 3);
+ *cmd++ = mmSCRATCH_REG2;
+ *cmd++ = 0x0FFF0FFF; // AND off invalid bits.
+ *cmd++ = (unsigned int)((SHADER_INSTRUCT_LOG2-5U) << 29); // OR in instruction memory size
+
+ // write the computed value to the SET_SHADER_BASES data field
+ *cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmd++ = mmSCRATCH_REG2;
+ *cmd++ = gpuaddr(shaderBases, &drawctxt->gpustate); // TBD #5: shader bases (to restore)
+
+ // create indirect buffer command for above command sequence
+ create_ib1(drawctxt, drawctxt->shader_fixup, fixup, cmd);
+
+
+ //-------------------------------------------------------------------
+ // save shader partitioning and instructions
+ //-------------------------------------------------------------------
+
+ save = cmd; // start address
+
+ *cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+ // Fetch the SQ_INST_STORE_MANAGMENT register value,
+ // Store the value in the data fields of the SET_CONSTANT commands above.
+ *cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmd++ = mmSQ_INST_STORE_MANAGMENT;
+ *cmd++ = gpuaddr(partition1, &drawctxt->gpustate); // TBD #4a: partition info (to restore)
+ *cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmd++ = mmSQ_INST_STORE_MANAGMENT;
+ *cmd++ = gpuaddr(partition2, &drawctxt->gpustate); // TBD #4b: partition info (to fixup)
+
+#if defined(PM4_IM_STORE)
+ // Store the vertex shader instructions
+ *cmd++ = pm4_type3_packet(PM4_IM_STORE, 2);
+ *cmd++ = ctx->shader_vertex + 0x0; // 0x0 = Vertex
+ *cmd++ = gpuaddr(startSizeVtx, &drawctxt->gpustate); // TBD #1: start/size (to restore)
+
+ // store the pixel shader instructions
+ *cmd++ = pm4_type3_packet(PM4_IM_STORE, 2);
+ *cmd++ = ctx->shader_pixel + 0x1; // 0x1 = Pixel
+ *cmd++ = gpuaddr(startSizePix, &drawctxt->gpustate); // TBD #2: start/size (to restore)
+
+ // Store the shared shader instructions
+ *cmd++ = pm4_type3_packet(PM4_IM_STORE, 2);
+ *cmd++ = ctx->shader_shared + 0x2; // 0x2 = Shared
+ *cmd++ = gpuaddr(startSizeShared, &drawctxt->gpustate); // TBD #3: start/size (to restore)
+#endif
+
+ *cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+
+
+ // Create indirect buffer command for above command sequence
+ create_ib1(drawctxt, drawctxt->shader_save, save, cmd);
+
+
+ ctx->cmd = cmd;
+}
+
+
+
+//////////////////////////////////////////////////////////////////////////////
+// create buffers for saving/restoring registers and constants
+//////////////////////////////////////////////////////////////////////////////
+
+static int
+create_gpustate_shadow(gsl_device_t *device, gsl_drawctxt_t *drawctxt, ctx_t *ctx)
+{
+ gsl_flags_t flags;
+
+ flags = (GSL_MEMFLAGS_CONPHYS | GSL_MEMFLAGS_ALIGN8K);
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX, flags = (GSL_MEMFLAGS_EMEM | GSL_MEMFLAGS_ALIGN8K));
+
+ // allocate memory to allow HW to save sub-blocks for efficient context save/restore
+ if (kgsl_sharedmem_alloc0(device->id, flags, CONTEXT_SIZE, &drawctxt->gpustate) != GSL_SUCCESS)
+ return GSL_FAILURE;
+
+ drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW;
+
+ // Blank out h/w register, constant, and command buffer shadows.
+ kgsl_sharedmem_set0(&drawctxt->gpustate, 0, 0, CONTEXT_SIZE);
+
+ // set-up command and vertex buffer pointers
+ ctx->cmd = ctx->start = (unsigned int *) ((char *)drawctxt->gpustate.hostptr + CMD_OFFSET);
+
+ // build indirect command buffers to save & restore regs/constants
+ build_regrestore_cmds(drawctxt, ctx);
+ build_regsave_cmds(drawctxt, ctx);
+
+ build_shader_save_restore_cmds(drawctxt, ctx);
+
+ return GSL_SUCCESS;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// Allocate GMEM shadow buffer
+//////////////////////////////////////////////////////////////////////////////
+static int
+allocate_gmem_shadow_buffer(gsl_device_t *device, gsl_drawctxt_t *drawctxt)
+{
+ // allocate memory for GMEM shadow
+ if (kgsl_sharedmem_alloc0(device->id, (GSL_MEMFLAGS_CONPHYS | GSL_MEMFLAGS_ALIGN8K),
+ drawctxt->context_gmem_shadow.size, &drawctxt->context_gmem_shadow.gmemshadow) != GSL_SUCCESS)
+ return GSL_FAILURE;
+
+ // blank out gmem shadow.
+ kgsl_sharedmem_set0(&drawctxt->context_gmem_shadow.gmemshadow, 0, 0, drawctxt->context_gmem_shadow.size);
+
+ return GSL_SUCCESS;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// create GMEM save/restore specific stuff
+//////////////////////////////////////////////////////////////////////////////
+
+static int
+create_gmem_shadow(gsl_device_t *device, gsl_drawctxt_t *drawctxt, ctx_t *ctx)
+{
+ unsigned int i;
+ config_gmemsize(&drawctxt->context_gmem_shadow, device->gmemspace.sizebytes);
+ ctx->gmem_base = device->gmemspace.gpu_base;
+
+ if( drawctxt->flags & CTXT_FLAGS_GMEM_SHADOW )
+ {
+ if( allocate_gmem_shadow_buffer(device, drawctxt) != GSL_SUCCESS )
+ return GSL_FAILURE;
+ }
+ else
+ {
+ kos_memset( &drawctxt->context_gmem_shadow.gmemshadow, 0, sizeof( gsl_memdesc_t ) );
+ }
+
+ // build quad vertex buffer
+ build_quad_vtxbuff(drawctxt, ctx, &drawctxt->context_gmem_shadow);
+
+ // build TP0_CHICKEN register restore command buffer
+ ctx->cmd = build_chicken_restore_cmds(drawctxt, ctx);
+
+ // build indirect command buffers to save & restore gmem
+ drawctxt->context_gmem_shadow.gmem_save_commands = ctx->cmd;
+ ctx->cmd = build_gmem2sys_cmds(drawctxt, ctx, &drawctxt->context_gmem_shadow);
+ drawctxt->context_gmem_shadow.gmem_restore_commands = ctx->cmd;
+ ctx->cmd = build_sys2gmem_cmds(drawctxt, ctx, &drawctxt->context_gmem_shadow);
+
+ for( i = 0; i < GSL_MAX_GMEM_SHADOW_BUFFERS; i++ )
+ {
+ // build quad vertex buffer
+ build_quad_vtxbuff(drawctxt, ctx, &drawctxt->user_gmem_shadow[i]);
+
+ // build indirect command buffers to save & restore gmem
+ drawctxt->user_gmem_shadow[i].gmem_save_commands = ctx->cmd;
+ ctx->cmd = build_gmem2sys_cmds(drawctxt, ctx, &drawctxt->user_gmem_shadow[i]);
+
+ drawctxt->user_gmem_shadow[i].gmem_restore_commands = ctx->cmd;
+ ctx->cmd = build_sys2gmem_cmds(drawctxt, ctx, &drawctxt->user_gmem_shadow[i]);
+ }
+
+ return GSL_SUCCESS;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// init draw context
+//////////////////////////////////////////////////////////////////////////////
+
+int
+kgsl_drawctxt_init(gsl_device_t *device)
+{
+ return (GSL_SUCCESS);
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// close draw context
+//////////////////////////////////////////////////////////////////////////////
+
+int
+kgsl_drawctxt_close(gsl_device_t *device)
+{
+ return (GSL_SUCCESS);
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// create a new drawing context
+//////////////////////////////////////////////////////////////////////////////
+
+int
+kgsl_drawctxt_create(gsl_device_t* device, gsl_context_type_t type, unsigned int *drawctxt_id, gsl_flags_t flags)
+{
+ gsl_drawctxt_t *drawctxt;
+ int index;
+ ctx_t ctx;
+
+ kgsl_device_active(device);
+
+ if (device->drawctxt_count >= GSL_CONTEXT_MAX)
+ {
+ return (GSL_FAILURE);
+ }
+
+ // find a free context slot
+ index = 0;
+ while (index < GSL_CONTEXT_MAX)
+ {
+ if (device->drawctxt[index].flags == CTXT_FLAGS_NOT_IN_USE)
+ break;
+
+ index++;
+ }
+
+ if (index >= GSL_CONTEXT_MAX)
+ {
+ return (GSL_FAILURE);
+ }
+
+ drawctxt = &device->drawctxt[index];
+
+ kos_memset( &drawctxt->context_gmem_shadow, 0, sizeof( gmem_shadow_t ) );
+
+ drawctxt->pid = GSL_CALLER_PROCESSID_GET();
+ drawctxt->flags = CTXT_FLAGS_IN_USE;
+ drawctxt->type = type;
+
+ device->drawctxt_count++;
+
+ // create context shadows, when not running in safe mode
+ if (!(device->flags & GSL_FLAGS_SAFEMODE))
+ {
+ if (create_gpustate_shadow(device, drawctxt, &ctx) != GSL_SUCCESS)
+ {
+ kgsl_drawctxt_destroy(device, index);
+ return (GSL_FAILURE);
+ }
+
+ // Save the shader instruction memory on context switching
+ drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE;
+
+ if(!(flags & GSL_CONTEXT_NO_GMEM_ALLOC))
+ drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW;
+
+ // Clear out user defined GMEM shadow buffer structs
+ kos_memset( drawctxt->user_gmem_shadow, 0, sizeof(gmem_shadow_t)*GSL_MAX_GMEM_SHADOW_BUFFERS );
+
+ // create gmem shadow
+ if (create_gmem_shadow(device, drawctxt, &ctx) != GSL_SUCCESS)
+ {
+ kgsl_drawctxt_destroy(device, index);
+ return (GSL_FAILURE);
+ }
+
+
+ KOS_ASSERT(ctx.cmd - ctx.start <= CMD_BUFFER_LEN);
+ }
+
+ *drawctxt_id = index;
+
+ return (GSL_SUCCESS);
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// destroy a drawing context
+//////////////////////////////////////////////////////////////////////////////
+
+int
+kgsl_drawctxt_destroy(gsl_device_t* device, unsigned int drawctxt_id)
+{
+ gsl_drawctxt_t *drawctxt;
+
+ drawctxt = &device->drawctxt[drawctxt_id];
+
+ if (drawctxt->flags != CTXT_FLAGS_NOT_IN_USE)
+ {
+ // deactivate context
+ if (device->drawctxt_active == drawctxt)
+ {
+ // no need to save GMEM or shader, the context is being destroyed.
+ drawctxt->flags &= ~(CTXT_FLAGS_GMEM_SAVE | CTXT_FLAGS_SHADER_SAVE);
+
+ kgsl_drawctxt_switch(device, GSL_CONTEXT_NONE, 0);
+ }
+
+ device->ftbl.device_idle(device, GSL_TIMEOUT_DEFAULT);
+
+ // destroy state shadow, if allocated
+ if (drawctxt->flags & CTXT_FLAGS_STATE_SHADOW)
+ kgsl_sharedmem_free0(&drawctxt->gpustate, GSL_CALLER_PROCESSID_GET());
+
+
+ // destroy gmem shadow, if allocated
+ if (drawctxt->context_gmem_shadow.gmemshadow.size > 0)
+ {
+ kgsl_sharedmem_free0(&drawctxt->context_gmem_shadow.gmemshadow, GSL_CALLER_PROCESSID_GET());
+ drawctxt->context_gmem_shadow.gmemshadow.size = 0;
+ }
+
+ drawctxt->flags = CTXT_FLAGS_NOT_IN_USE;
+ drawctxt->pid = 0;
+
+ device->drawctxt_count--;
+ KOS_ASSERT(device->drawctxt_count >= 0);
+ }
+
+ return (GSL_SUCCESS);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Binds a user specified buffer as GMEM shadow area
+//
+// gmem_rect: defines the rectangle that is copied from GMEM. X and Y
+// coordinates need to be multiples of 8 after conversion to 32bpp.
+// X, Y, width, and height need to be at 32-bit boundary to avoid
+// rounding.
+//
+// shadow_x & shadow_y: Position in GMEM shadow buffer where the contents of
+// gmem_rect is copied. Both must be multiples of 8 after
+// conversion to 32bpp. They also need to be at 32-bit
+// boundary to avoid rounding.
+//
+// shadow_buffer: Description of the GMEM shadow buffer. BPP needs to be
+// 8, 16, 32, 64, or 128. Enabled tells if the buffer is
+// used or not (values 0 and 1). All the other buffer
+// parameters are ignored when enabled=0.
+//
+// buffer_id: Two different buffers can be defined. Use buffer IDs 0 and 1.
+//
+//
+//////////////////////////////////////////////////////////////////////////////
+KGSL_API int kgsl_drawctxt_bind_gmem_shadow(gsl_deviceid_t device_id, unsigned int drawctxt_id, const gsl_rect_t* gmem_rect, unsigned int shadow_x, unsigned int shadow_y, const gsl_buffer_desc_t* shadow_buffer, unsigned int buffer_id)
+{
+ gsl_device_t *device;
+ gsl_drawctxt_t *drawctxt;
+ gmem_shadow_t *shadow; // Shadow struct being modified
+ unsigned int i;
+
+ GSL_API_MUTEX_LOCK();
+
+ device = &gsl_driver.device[device_id-1]; // device_id is 1 based
+
+ drawctxt = &device->drawctxt[drawctxt_id];
+
+ shadow = &drawctxt->user_gmem_shadow[buffer_id];
+
+ if( !shadow_buffer->enabled )
+ {
+ // Disable shadow
+ shadow->gmemshadow.size = 0;
+ }
+ else
+ {
+ // Binding to a buffer
+ unsigned int width, height, gmem_x, gmem_y, gmem_width, gmem_height, pixel_ratio;
+
+ KOS_ASSERT(shadow_buffer->stride_bytes%4 == 0);
+
+ // Convert to 32bpp pixel units
+ if( shadow_buffer->bpp <= 32 )
+ {
+ KOS_ASSERT(32%shadow_buffer->bpp==0);
+ pixel_ratio = 32/shadow_buffer->bpp;
+ KOS_ASSERT(gmem_rect->x%pixel_ratio==0); // Needs to be at 32bit boundary
+ gmem_x = gmem_rect->x/pixel_ratio;
+ KOS_ASSERT(gmem_x%8==0); // Needs to be a multiple of 8
+ KOS_ASSERT(gmem_rect->y%pixel_ratio==0); // Needs to be at 32bit boundary
+ gmem_y = gmem_rect->y/pixel_ratio;
+ KOS_ASSERT(gmem_y%8==0); // Needs to be a multiple of 8
+ KOS_ASSERT(gmem_rect->width%pixel_ratio==0); // Needs to be at 32bit boundary
+ gmem_width = gmem_rect->width/pixel_ratio;
+ KOS_ASSERT(gmem_rect->height%pixel_ratio==0); // Needs to be at 32bit boundary
+ gmem_height = gmem_rect->height/pixel_ratio;
+ KOS_ASSERT(shadow_x%pixel_ratio==0); // Needs to be at 32bit boundary
+ shadow_x = shadow_x/pixel_ratio;
+ KOS_ASSERT(shadow_x%8==0); // Needs to be a multiple of 8
+ KOS_ASSERT(shadow_y%pixel_ratio==0); // Needs to be at 32bit boundary
+ shadow_y = shadow_y/pixel_ratio;
+ KOS_ASSERT(shadow_y%8==0); // Needs to be a multiple of 8
+ }
+ else
+ {
+ KOS_ASSERT(shadow_buffer->bpp==64 || shadow_buffer->bpp==128);
+ pixel_ratio = shadow_buffer->bpp/32;
+ gmem_x = gmem_rect->x*pixel_ratio;
+ KOS_ASSERT(gmem_x%8==0); // Needs to be a multiple of 8
+ gmem_y = gmem_rect->y*pixel_ratio;
+ KOS_ASSERT(gmem_y%8==0); // Needs to be a multiple of 8
+ gmem_width = gmem_rect->width*pixel_ratio;
+ gmem_height = gmem_rect->height*pixel_ratio;
+ shadow_x = shadow_x*pixel_ratio;
+ KOS_ASSERT(shadow_x%8==0); // Needs to be a multiple of 8
+ shadow_y = shadow_y*pixel_ratio;
+ KOS_ASSERT(shadow_y%8==0); // Needs to be a multiple of 8
+ }
+
+ KOS_ASSERT( buffer_id >= 0 && buffer_id < GSL_MAX_GMEM_SHADOW_BUFFERS );
+
+ width = gmem_width < drawctxt->context_gmem_shadow.width ? gmem_width : drawctxt->context_gmem_shadow.width;
+ height = gmem_height < drawctxt->context_gmem_shadow.height ? gmem_height : drawctxt->context_gmem_shadow.height;
+
+ drawctxt->user_gmem_shadow[buffer_id].width = width;
+ drawctxt->user_gmem_shadow[buffer_id].height = height;
+ drawctxt->user_gmem_shadow[buffer_id].pitch = shadow_buffer->stride_bytes/4;
+
+ kos_memcpy( &drawctxt->user_gmem_shadow[buffer_id].gmemshadow, &shadow_buffer->data, sizeof( gsl_memdesc_t ) );
+ // Calculate offset
+ drawctxt->user_gmem_shadow[buffer_id].offset = (int)shadow_buffer->stride_bytes/4*((int)shadow_y-(int)gmem_y)+(int)shadow_x-(int)gmem_x;
+
+ drawctxt->user_gmem_shadow[buffer_id].offset_x = shadow_x;
+ drawctxt->user_gmem_shadow[buffer_id].offset_y = shadow_y;
+ drawctxt->user_gmem_shadow[buffer_id].gmem_offset_x = gmem_x;
+ drawctxt->user_gmem_shadow[buffer_id].gmem_offset_y = gmem_y;
+
+ drawctxt->user_gmem_shadow[buffer_id].size = drawctxt->user_gmem_shadow[buffer_id].gmemshadow.size;
+
+ // Modify quad vertices
+ set_gmem_copy_quad(shadow);
+
+ // Modify commands
+ build_gmem2sys_cmds(drawctxt, NULL, shadow);
+ build_sys2gmem_cmds(drawctxt, NULL, shadow);
+
+ // Release context GMEM shadow if found
+ if (drawctxt->context_gmem_shadow.gmemshadow.size > 0)
+ {
+ kgsl_sharedmem_free0(&drawctxt->context_gmem_shadow.gmemshadow, GSL_CALLER_PROCESSID_GET());
+ drawctxt->context_gmem_shadow.gmemshadow.size = 0;
+ }
+ }
+
+ // Enable GMEM shadowing if we have any of the user buffers enabled
+ drawctxt->flags &= ~CTXT_FLAGS_GMEM_SHADOW;
+ for( i = 0; i < GSL_MAX_GMEM_SHADOW_BUFFERS; i++ )
+ {
+ if( drawctxt->user_gmem_shadow[i].gmemshadow.size > 0 )
+ {
+ drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW;
+ }
+ }
+
+ GSL_API_MUTEX_UNLOCK();
+
+ return (GSL_SUCCESS);
+}
+
+
+
+//////////////////////////////////////////////////////////////////////////////
+// switch drawing contexts
+//////////////////////////////////////////////////////////////////////////////
+
+void
+kgsl_drawctxt_switch(gsl_device_t *device, gsl_drawctxt_t *drawctxt, gsl_flags_t flags)
+{
+ gsl_drawctxt_t *active_ctxt = device->drawctxt_active;
+
+ if (drawctxt != GSL_CONTEXT_NONE)
+ {
+ if(0) // flags & GSL_CONTEXT_SAVE_GMEM )
+ {
+ // Set the flag in context so that the save is done when this context is switched out.
+ drawctxt->flags |= CTXT_FLAGS_GMEM_SAVE;
+ }
+ else
+ {
+ // Remove GMEM saving flag from the context
+ drawctxt->flags &= ~CTXT_FLAGS_GMEM_SAVE;
+ }
+ }
+
+ // already current?
+ if (active_ctxt == drawctxt)
+ {
+ return;
+ }
+
+ // save old context, when not running in safe mode
+ if (active_ctxt != GSL_CONTEXT_NONE && !(device->flags & GSL_FLAGS_SAFEMODE))
+ {
+ // save registers and constants.
+ kgsl_ringbuffer_issuecmds(device, 0, active_ctxt->reg_save, 3, active_ctxt->pid);
+
+ if (active_ctxt->flags & CTXT_FLAGS_SHADER_SAVE)
+ {
+ // save shader partitioning and instructions.
+ kgsl_ringbuffer_issuecmds(device, 1, active_ctxt->shader_save, 3, active_ctxt->pid);
+
+ // fixup shader partitioning parameter for SET_SHADER_BASES.
+ kgsl_ringbuffer_issuecmds(device, 0, active_ctxt->shader_fixup, 3, active_ctxt->pid);
+
+ active_ctxt->flags |= CTXT_FLAGS_SHADER_RESTORE;
+ }
+
+ if (active_ctxt->flags & CTXT_FLAGS_GMEM_SHADOW && active_ctxt->flags & CTXT_FLAGS_GMEM_SAVE )
+ {
+ // save gmem. (note: changes shader. shader must already be saved.)
+
+ unsigned int i, numbuffers = 0;
+
+ for( i = 0; i < GSL_MAX_GMEM_SHADOW_BUFFERS; i++ )
+ {
+ if( active_ctxt->user_gmem_shadow[i].gmemshadow.size > 0 )
+ {
+ kgsl_ringbuffer_issuecmds(device, 1, active_ctxt->user_gmem_shadow[i].gmem_save, 3, active_ctxt->pid);
+
+ // Restore TP0_CHICKEN
+ kgsl_ringbuffer_issuecmds(device, 0, active_ctxt->chicken_restore, 3, active_ctxt->pid);
+ numbuffers++;
+ }
+ }
+ if( numbuffers == 0 )
+ {
+ // No user defined buffers -> use context default
+ kgsl_ringbuffer_issuecmds(device, 1, active_ctxt->context_gmem_shadow.gmem_save, 3, active_ctxt->pid);
+ // Restore TP0_CHICKEN
+ kgsl_ringbuffer_issuecmds(device, 0, active_ctxt->chicken_restore, 3, active_ctxt->pid);
+ }
+
+ active_ctxt->flags |= CTXT_FLAGS_GMEM_RESTORE;
+ }
+ }
+
+ device->drawctxt_active = drawctxt;
+
+ // restore new context, when not running in safe mode
+ if (drawctxt != GSL_CONTEXT_NONE && !(device->flags & GSL_FLAGS_SAFEMODE))
+ {
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX, KGSL_DEBUG_DUMPX(BB_DUMP_MEMWRITE, drawctxt->gpustate.gpuaddr, (unsigned int)drawctxt->gpustate.hostptr, LCC_SHADOW_SIZE + REG_SHADOW_SIZE + CMD_BUFFER_SIZE + TEX_SHADOW_SIZE , "kgsl_drawctxt_switch"));
+
+ // restore gmem. (note: changes shader. shader must not already be restored.)
+ if (drawctxt->flags & CTXT_FLAGS_GMEM_RESTORE)
+ {
+ unsigned int i, numbuffers = 0;
+
+ for( i = 0; i < GSL_MAX_GMEM_SHADOW_BUFFERS; i++ )
+ {
+ if( drawctxt->user_gmem_shadow[i].gmemshadow.size > 0 )
+ {
+ kgsl_ringbuffer_issuecmds(device, 1, drawctxt->user_gmem_shadow[i].gmem_restore, 3, drawctxt->pid);
+
+ // Restore TP0_CHICKEN
+ kgsl_ringbuffer_issuecmds(device, 0, drawctxt->chicken_restore, 3, drawctxt->pid);
+ numbuffers++;
+ }
+ }
+ if( numbuffers == 0 )
+ {
+ // No user defined buffers -> use context default
+ kgsl_ringbuffer_issuecmds(device, 1, drawctxt->context_gmem_shadow.gmem_restore, 3, drawctxt->pid);
+ // Restore TP0_CHICKEN
+ kgsl_ringbuffer_issuecmds(device, 0, drawctxt->chicken_restore, 3, drawctxt->pid);
+ }
+
+ drawctxt->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
+ }
+
+ // restore registers and constants.
+ kgsl_ringbuffer_issuecmds(device, 0, drawctxt->reg_restore, 3, drawctxt->pid);
+
+ // restore shader instructions & partitioning.
+ if (drawctxt->flags & CTXT_FLAGS_SHADER_RESTORE)
+ {
+ kgsl_ringbuffer_issuecmds(device, 0, drawctxt->shader_restore, 3, drawctxt->pid);
+ }
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// destroy all drawing contexts
+//////////////////////////////////////////////////////////////////////////////
+int
+kgsl_drawctxt_destroyall(gsl_device_t *device)
+{
+ int i;
+ gsl_drawctxt_t *drawctxt;
+
+ for (i = 0; i < GSL_CONTEXT_MAX; i++)
+ {
+ drawctxt = &device->drawctxt[i];
+
+ if (drawctxt->flags != CTXT_FLAGS_NOT_IN_USE)
+ {
+ // destroy state shadow, if allocated
+ if (drawctxt->flags & CTXT_FLAGS_STATE_SHADOW)
+ kgsl_sharedmem_free0(&drawctxt->gpustate, GSL_CALLER_PROCESSID_GET());
+
+ // destroy gmem shadow, if allocated
+ if (drawctxt->context_gmem_shadow.gmemshadow.size > 0)
+ {
+ kgsl_sharedmem_free0(&drawctxt->context_gmem_shadow.gmemshadow, GSL_CALLER_PROCESSID_GET());
+ drawctxt->context_gmem_shadow.gmemshadow.size = 0;
+ }
+
+ drawctxt->flags = CTXT_FLAGS_NOT_IN_USE;
+
+ device->drawctxt_count--;
+ KOS_ASSERT(device->drawctxt_count >= 0);
+ }
+ }
+
+ return (GSL_SUCCESS);
+}
+
+#endif
diff --git a/drivers/mxc/amd-gpu/common/gsl_driver.c b/drivers/mxc/amd-gpu/common/gsl_driver.c
new file mode 100644
index 000000000000..fd4bcc0df96a
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/gsl_driver.c
@@ -0,0 +1,330 @@
+/* Copyright (c) 2008-2010, Advanced Micro Devices. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "gsl.h"
+#include "gsl_hal.h"
+
+
+//////////////////////////////////////////////////////////////////////////////
+// defines
+//////////////////////////////////////////////////////////////////////////////
+#define GSL_PROCESSID_NONE 0x00000000
+
+#define GSL_DRVFLAGS_EXTERNAL 0x10000000
+#define GSL_DRVFLAGS_INTERNAL 0x20000000
+
+
+//////////////////////////////////////////////////////////////////////////////
+// globals
+//////////////////////////////////////////////////////////////////////////////
+#ifndef KGSL_USER_MODE
+static gsl_flags_t gsl_driver_initialized = 0;
+gsl_driver_t gsl_driver;
+#else
+extern gsl_flags_t gsl_driver_initialized;
+extern gsl_driver_t gsl_driver;
+#endif
+
+
+//////////////////////////////////////////////////////////////////////////////
+// functions
+//////////////////////////////////////////////////////////////////////////////
+
+int
+kgsl_driver_init0(gsl_flags_t flags, gsl_flags_t flags_debug)
+{
+ int status = GSL_SUCCESS;
+
+ if (!(gsl_driver_initialized & GSL_FLAGS_INITIALIZED0))
+ {
+#ifdef GSL_LOG
+ // Uncomment these to enable logging.
+ //kgsl_log_init();
+ //kgsl_log_open_stdout( KGSL_LOG_GROUP_ALL | KGSL_LOG_LEVEL_ALL | KGSL_LOG_TIMESTAMP
+ // | KGSL_LOG_THREAD_ID | KGSL_LOG_PROCESS_ID );
+ //kgsl_log_open_file( "c:\\kgsl_log.txt", KGSL_LOG_GROUP_ALL | KGSL_LOG_LEVEL_ALL | KGSL_LOG_TIMESTAMP
+ // | KGSL_LOG_THREAD_ID | KGSL_LOG_PROCESS_ID );
+#endif
+ kos_memset(&gsl_driver, 0, sizeof(gsl_driver_t));
+
+ GSL_API_MUTEX_CREATE();
+ }
+
+#ifdef _DEBUG
+ // set debug flags on every entry, and prior to hal initialization
+ gsl_driver.flags_debug |= flags_debug;
+#else
+ (void) flags_debug; // unref formal parameter
+#endif // _DEBUG
+
+
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX,
+ {
+ KGSL_DEBUG_DUMPX_OPEN("dumpx.tb", 0);
+ KGSL_DEBUG_DUMPX( BB_DUMP_ENABLE, 0, 0, 0, " ");
+ });
+
+ KGSL_DEBUG_TBDUMP_OPEN("tbdump.txt");
+
+ if (!(gsl_driver_initialized & GSL_FLAGS_INITIALIZED0))
+ {
+ GSL_API_MUTEX_LOCK();
+
+ // init hal
+ status = kgsl_hal_init();
+
+ if (status == GSL_SUCCESS)
+ {
+ gsl_driver_initialized |= flags;
+ gsl_driver_initialized |= GSL_FLAGS_INITIALIZED0;
+ }
+
+ GSL_API_MUTEX_UNLOCK();
+ }
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_driver_close0(gsl_flags_t flags)
+{
+ int status = GSL_SUCCESS;
+
+ if ((gsl_driver_initialized & GSL_FLAGS_INITIALIZED0) && (gsl_driver_initialized & flags))
+ {
+ GSL_API_MUTEX_LOCK();
+
+ // close hall
+ status = kgsl_hal_close();
+
+ GSL_API_MUTEX_UNLOCK();
+
+ GSL_API_MUTEX_FREE();
+
+#ifdef GSL_LOG
+ kgsl_log_close();
+#endif
+
+ gsl_driver_initialized &= ~flags;
+ gsl_driver_initialized &= ~GSL_FLAGS_INITIALIZED0;
+
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX,
+ {
+ KGSL_DEBUG_DUMPX_CLOSE();
+ });
+
+ KGSL_DEBUG_TBDUMP_CLOSE();
+ }
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_driver_init()
+{
+ // only an external (platform specific device driver) component should call this
+
+ return(kgsl_driver_init0(GSL_DRVFLAGS_EXTERNAL, 0));
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_driver_close()
+{
+ // only an external (platform specific device driver) component should call this
+
+ return(kgsl_driver_close0(GSL_DRVFLAGS_EXTERNAL));
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_driver_entry(gsl_flags_t flags)
+{
+ int status = GSL_FAILURE;
+ int index, i;
+ unsigned int pid;
+
+ if (kgsl_driver_init0(GSL_DRVFLAGS_INTERNAL, flags) != GSL_SUCCESS)
+ {
+ return (GSL_FAILURE);
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_DRIVER | KGSL_LOG_LEVEL_TRACE, "--> int kgsl_driver_entry( gsl_flags_t flags=%d )\n", flags );
+
+ GSL_API_MUTEX_LOCK();
+
+ pid = GSL_CALLER_PROCESSID_GET();
+
+ // if caller process has not already opened access
+ status = kgsl_driver_getcallerprocessindex(pid, &index);
+ if (status != GSL_SUCCESS)
+ {
+ // then, add caller pid to process table
+ status = kgsl_driver_getcallerprocessindex(GSL_PROCESSID_NONE, &index);
+ if (status == GSL_SUCCESS)
+ {
+ gsl_driver.callerprocess[index] = pid;
+ gsl_driver.refcnt++;
+ }
+ }
+
+ if (status == GSL_SUCCESS)
+ {
+ if (!(gsl_driver_initialized & GSL_FLAGS_INITIALIZED))
+ {
+ // init memory apertures
+ status = kgsl_sharedmem_init(&gsl_driver.shmem);
+ if (status == GSL_SUCCESS)
+ {
+ // init devices
+ for (i = 0; i < GSL_DEVICE_MAX; i++)
+ {
+ status = kgsl_device_init(&gsl_driver.device[i], (gsl_deviceid_t)(i + 1));
+ if (status != GSL_SUCCESS)
+ {
+ break;
+ }
+ }
+ }
+
+ if (status == GSL_SUCCESS)
+ {
+ gsl_driver_initialized |= GSL_FLAGS_INITIALIZED;
+ }
+ }
+
+ // walk through process attach callbacks
+ if (status == GSL_SUCCESS)
+ {
+ for (i = 0; i < GSL_DEVICE_MAX; i++)
+ {
+ status = kgsl_device_attachcallback(&gsl_driver.device[i], pid);
+ if (status != GSL_SUCCESS)
+ {
+ break;
+ }
+ }
+ }
+
+ // if something went wrong
+ if (status != GSL_SUCCESS)
+ {
+ // then, remove caller pid from process table
+ if (kgsl_driver_getcallerprocessindex(pid, &index) == GSL_SUCCESS)
+ {
+ gsl_driver.callerprocess[index] = GSL_PROCESSID_NONE;
+ gsl_driver.refcnt--;
+ }
+ }
+ }
+
+ GSL_API_MUTEX_UNLOCK();
+
+ kgsl_log_write( KGSL_LOG_GROUP_DRIVER | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_driver_entry. Return value: %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_driver_exit0(unsigned int pid)
+{
+ int status = GSL_SUCCESS;
+ int index, i;
+
+ GSL_API_MUTEX_LOCK();
+
+ if (gsl_driver_initialized & GSL_FLAGS_INITIALIZED)
+ {
+ if (kgsl_driver_getcallerprocessindex(pid, &index) == GSL_SUCCESS)
+ {
+ // walk through process detach callbacks
+ for (i = 0; i < GSL_DEVICE_MAX; i++)
+ {
+ // Empty the freememqueue of this device
+ kgsl_cmdstream_memqueue_drain(&gsl_driver.device[i]);
+
+ // Detach callback
+ status = kgsl_device_detachcallback(&gsl_driver.device[i], pid);
+ if (status != GSL_SUCCESS)
+ {
+ break;
+ }
+ }
+
+ // last running caller process
+ if (gsl_driver.refcnt - 1 == 0)
+ {
+ // close devices
+ for (i = 0; i < GSL_DEVICE_MAX; i++)
+ {
+ kgsl_device_close(&gsl_driver.device[i]);
+ }
+
+ // shutdown memory apertures
+ kgsl_sharedmem_close(&gsl_driver.shmem);
+
+ gsl_driver_initialized &= ~GSL_FLAGS_INITIALIZED;
+ }
+
+ // remove caller pid from process table
+ gsl_driver.callerprocess[index] = GSL_PROCESSID_NONE;
+ gsl_driver.refcnt--;
+ }
+ }
+
+ GSL_API_MUTEX_UNLOCK();
+
+ if (!(gsl_driver_initialized & GSL_FLAGS_INITIALIZED))
+ {
+ kgsl_driver_close0(GSL_DRVFLAGS_INTERNAL);
+ }
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_driver_exit(void)
+{
+ int status;
+
+ kgsl_log_write( KGSL_LOG_GROUP_DRIVER | KGSL_LOG_LEVEL_TRACE, "--> int kgsl_driver_exit()\n" );
+
+ status = kgsl_driver_exit0(GSL_CALLER_PROCESSID_GET());
+
+ kgsl_log_write( KGSL_LOG_GROUP_DRIVER | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_driver_exit(). Return value: %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_driver_destroy(unsigned int pid)
+{
+ return (kgsl_driver_exit0(pid));
+}
diff --git a/drivers/mxc/amd-gpu/common/gsl_g12.c b/drivers/mxc/amd-gpu/common/gsl_g12.c
new file mode 100644
index 000000000000..513f6728a842
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/gsl_g12.c
@@ -0,0 +1,987 @@
+/* Copyright (c) 2002,2007-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "gsl.h"
+#include "gsl_hal.h"
+#include "kos_libapi.h"
+#include "gsl_cmdstream.h"
+#ifdef _LINUX
+#include <linux/sched.h>
+#endif
+
+#ifdef GSL_BLD_G12
+#define GSL_TIMESTAMP_EPSILON 20000
+#define GSL_IRQ_TIMEOUT 200
+
+
+//----------------------------------------------------------------------------
+
+#define GSL_HAL_NUMCMDBUFFERS 5
+#define GSL_HAL_CMDBUFFERSIZE (1024 + 13) * sizeof(unsigned int)
+
+#define ALIGN_IN_BYTES( dim, alignment ) ( ( (dim) + (alignment-1) ) & ~(alignment-1) )
+
+
+#ifdef _Z180
+#define NUMTEXUNITS 4
+#define TEXUNITREGCOUNT 25
+#define VG_REGCOUNT 0x39
+#define GSL_HAL_EDGE0BUFSIZE 0x3E8+64
+#define GSL_HAL_EDGE1BUFSIZE 0x8000+64
+#define GSL_HAL_EDGE2BUFSIZE 0x80020+64
+#define GSL_HAL_EDGE0REG ADDR_VGV1_CBUF
+#define GSL_HAL_EDGE1REG ADDR_VGV1_BBUF
+#define GSL_HAL_EDGE2REG ADDR_VGV1_EBUF
+#else
+#define NUMTEXUNITS 2
+#define TEXUNITREGCOUNT 24
+#define VG_REGCOUNT 0x3A
+#define L1TILESIZE 64
+#define GSL_HAL_EDGE0BUFSIZE L1TILESIZE*L1TILESIZE*4+64
+#define GSL_HAL_EDGE1BUFSIZE L1TILESIZE*L1TILESIZE*16+64
+#define GSL_HAL_EDGE0REG ADDR_VGV1_CBASE1
+#define GSL_HAL_EDGE1REG ADDR_VGV1_UBASE2
+#endif
+
+#define PACKETSIZE_BEGIN 3
+#define PACKETSIZE_G2DCOLOR 2
+#define PACKETSIZE_TEXUNIT (TEXUNITREGCOUNT*2)
+#define PACKETSIZE_REG (VG_REGCOUNT*2)
+#define PACKETSIZE_STATE (PACKETSIZE_TEXUNIT*NUMTEXUNITS + PACKETSIZE_REG + PACKETSIZE_BEGIN + PACKETSIZE_G2DCOLOR)
+#define PACKETSIZE_STATESTREAM ALIGN_IN_BYTES((PACKETSIZE_STATE*sizeof(unsigned int)), 32) / sizeof(unsigned int)
+
+//----------------------------------------------------------------------------
+
+typedef struct
+{
+ unsigned int id;
+ // unsigned int regs[];
+}gsl_hal_z1xxdrawctx_t;
+
+typedef struct
+{
+ unsigned int offs;
+ unsigned int curr;
+ unsigned int prevctx;
+
+ gsl_memdesc_t e0;
+ gsl_memdesc_t e1;
+ gsl_memdesc_t e2;
+ unsigned int* cmdbuf[GSL_HAL_NUMCMDBUFFERS];
+ gsl_memdesc_t cmdbufdesc[GSL_HAL_NUMCMDBUFFERS];
+ gsl_timestamp_t timestamp[GSL_HAL_NUMCMDBUFFERS];
+
+ unsigned int numcontext;
+}gsl_z1xx_t;
+
+static gsl_z1xx_t g_z1xx = {0};
+
+//----------------------------------------------------------------------------
+
+
+//////////////////////////////////////////////////////////////////////////////
+// functions
+//////////////////////////////////////////////////////////////////////////////
+
+static int kgsl_g12_addtimestamp(gsl_device_t* device, gsl_timestamp_t *timestamp);
+static int kgsl_g12_issueibcmds(gsl_device_t* device, int drawctxt_index, gpuaddr_t ibaddr, int sizedwords, gsl_timestamp_t *timestamp, unsigned int flags);
+static int kgsl_g12_context_create(gsl_device_t* device, gsl_context_type_t type, unsigned int *drawctxt_id, gsl_flags_t flags);
+static int kgsl_g12_context_destroy(gsl_device_t* device, unsigned int drawctxt_id);
+static unsigned int drawctx_id = 0;
+static int kgsl_g12_idle(gsl_device_t *device, unsigned int timeout);
+#ifndef _LINUX
+static void irq_thread(void);
+#endif
+
+//----------------------------------------------------------------------------
+
+void
+kgsl_g12_intrcallback(gsl_intrid_t id, void *cookie)
+{
+ gsl_device_t *device = (gsl_device_t *) cookie;
+
+ switch(id)
+ {
+ // non-error condition interrupt
+ case GSL_INTR_G12_G2D:
+#ifdef _LINUX
+ queue_work(device->irq_workq, &(device->irq_work));
+ break;
+#endif
+#ifndef _Z180
+ case GSL_INTR_G12_FBC:
+#endif //_Z180
+ // signal intr completion event
+ kos_event_signal(device->intr.evnt[id]);
+ break;
+
+ // error condition interrupt
+ case GSL_INTR_G12_FIFO:
+ device->ftbl.device_destroy(device);
+ break;
+
+ case GSL_INTR_G12_MH:
+ // don't do anything. this is handled by the MMU manager
+ break;
+
+ default:
+ break;
+ }
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_g12_isr(gsl_device_t *device)
+{
+ unsigned int status;
+#ifdef _DEBUG
+ REG_MH_MMU_PAGE_FAULT page_fault = {0};
+ REG_MH_AXI_ERROR axi_error = {0};
+#endif // DEBUG
+
+ // determine if G12 is interrupting
+ device->ftbl.device_regread(device, (ADDR_VGC_IRQSTATUS >> 2), &status);
+
+ if (status)
+ {
+ // if G12 MH is interrupting, clear MH block interrupt first, then master G12 MH interrupt
+ if (status & (1 << VGC_IRQSTATUS_MH_FSHIFT))
+ {
+#ifdef _DEBUG
+ // obtain mh error information
+ device->ftbl.device_regread(device, ADDR_MH_MMU_PAGE_FAULT, (unsigned int *)&page_fault);
+ device->ftbl.device_regread(device, ADDR_MH_AXI_ERROR, (unsigned int *)&axi_error);
+#endif // DEBUG
+
+ kgsl_intr_decode(device, GSL_INTR_BLOCK_G12_MH);
+ }
+
+ kgsl_intr_decode(device, GSL_INTR_BLOCK_G12);
+ }
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_g12_tlbinvalidate(gsl_device_t *device, unsigned int reg_invalidate, unsigned int pid)
+{
+#ifndef GSL_NO_MMU
+ REG_MH_MMU_INVALIDATE mh_mmu_invalidate = {0};
+
+ // unreferenced formal parameter
+ (void) pid;
+
+ mh_mmu_invalidate.INVALIDATE_ALL = 1;
+ mh_mmu_invalidate.INVALIDATE_TC = 1;
+
+ device->ftbl.device_regwrite(device, reg_invalidate, *(unsigned int *) &mh_mmu_invalidate);
+#else
+ (void)device;
+ (void)reg_invalidate;
+#endif
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_g12_setpagetable(gsl_device_t *device, unsigned int reg_ptbase, gpuaddr_t ptbase, unsigned int pid)
+{
+ // unreferenced formal parameter
+ (void) pid;
+#ifndef GSL_NO_MMU
+ device->ftbl.device_idle(device, GSL_TIMEOUT_DEFAULT);
+ device->ftbl.device_regwrite(device, reg_ptbase, ptbase);
+#else
+ (void)device;
+ (void)reg_ptbase;
+ (void)reg_varange;
+#endif
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+#ifdef _LINUX
+static void kgsl_g12_updatetimestamp(gsl_device_t *device)
+{
+ unsigned int count = 0;
+ device->ftbl.device_regread(device, (ADDR_VGC_IRQ_ACTIVE_CNT >> 2), &count);
+ count >>= 8;
+ count &= 255;
+ device->timestamp += count;
+ kgsl_sharedmem_write0(&device->memstore, GSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp), &device->timestamp, 4, 0);
+}
+
+//----------------------------------------------------------------------------
+
+static void kgsl_g12_irqtask(struct work_struct *work)
+{
+ gsl_device_t *device = &gsl_driver.device[GSL_DEVICE_G12-1];
+ kgsl_g12_updatetimestamp(device);
+ wake_up_interruptible_all(&device->timestamp_waitq);
+}
+#endif
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_g12_init(gsl_device_t *device)
+{
+ int status = GSL_FAILURE;
+
+ device->flags |= GSL_FLAGS_INITIALIZED;
+
+ kgsl_hal_setpowerstate(device->id, GSL_PWRFLAGS_POWER_ON, 100);
+
+ // setup MH arbiter - MH offsets are considered to be dword based, therefore no down shift
+ device->ftbl.device_regwrite(device, ADDR_MH_ARBITER_CONFIG, *(unsigned int *) &gsl_cfg_g12_mharb);
+
+ // init interrupt
+ status = kgsl_intr_init(device);
+ if (status != GSL_SUCCESS)
+ {
+ device->ftbl.device_stop(device);
+ return (status);
+ }
+
+ // enable irq
+ device->ftbl.device_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0x3);
+
+#ifndef GSL_NO_MMU
+ // enable master interrupt for G12 MH
+ kgsl_intr_attach(&device->intr, GSL_INTR_G12_MH, kgsl_g12_intrcallback, (void *) device);
+ kgsl_intr_enable(&device->intr, GSL_INTR_G12_MH);
+
+ // init mmu
+ status = kgsl_mmu_init(device);
+ if (status != GSL_SUCCESS)
+ {
+ device->ftbl.device_stop(device);
+ return (status);
+ }
+#endif
+
+#ifdef IRQTHREAD_POLL
+ // Create event to trigger IRQ polling thread
+ device->irqthread_event = kos_event_create(0);
+#endif
+
+ // enable interrupts
+ kgsl_intr_attach(&device->intr, GSL_INTR_G12_G2D, kgsl_g12_intrcallback, (void *) device);
+ kgsl_intr_attach(&device->intr, GSL_INTR_G12_FIFO, kgsl_g12_intrcallback, (void *) device);
+ kgsl_intr_enable(&device->intr, GSL_INTR_G12_G2D);
+ kgsl_intr_enable(&device->intr, GSL_INTR_G12_FIFO);
+
+#ifndef _Z180
+ kgsl_intr_attach(&device->intr, GSL_INTR_G12_FBC, kgsl_g12_intrcallback, (void *) device);
+ //kgsl_intr_enable(&device->intr, GSL_INTR_G12_FBC);
+#endif //_Z180
+
+ // create thread for IRQ handling
+#if defined(__SYMBIAN32__)
+ kos_thread_create( (oshandle_t)irq_thread, &(device->irq_thread) );
+#elif defined(_LINUX)
+ device->irq_workq = create_singlethread_workqueue("z1xx_workq");
+ INIT_WORK(&device->irq_work, kgsl_g12_irqtask);
+#else
+ #pragma warning(disable:4152)
+ device->irq_thread_handle = kos_thread_create( (oshandle_t)irq_thread, &(device->irq_thread) );
+#endif
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_g12_close(gsl_device_t *device)
+{
+ int status = GSL_FAILURE;
+
+ if (device->refcnt == 0)
+ {
+ // wait pending interrupts before shutting down G12 intr thread to
+ // empty irq counters. Otherwise there's a possibility to have them in
+ // registers next time systems starts up and this results in a hang.
+ status = device->ftbl.device_idle(device, 1000);
+ KOS_ASSERT(status == GSL_SUCCESS);
+
+#ifndef _LINUX
+ kos_thread_destroy(device->irq_thread_handle);
+#else
+ destroy_workqueue(device->irq_workq);
+#endif
+
+ // shutdown command window
+ kgsl_cmdwindow_close(device);
+
+#ifndef GSL_NO_MMU
+ // shutdown mmu
+ kgsl_mmu_close(device);
+#endif
+ // disable interrupts
+ kgsl_intr_detach(&device->intr, GSL_INTR_G12_MH);
+ kgsl_intr_detach(&device->intr, GSL_INTR_G12_G2D);
+ kgsl_intr_detach(&device->intr, GSL_INTR_G12_FIFO);
+#ifndef _Z180
+ kgsl_intr_detach(&device->intr, GSL_INTR_G12_FBC);
+#endif //_Z180
+
+ // shutdown interrupt
+ kgsl_intr_close(device);
+
+ kgsl_hal_setpowerstate(device->id, GSL_PWRFLAGS_POWER_OFF, 0);
+
+ device->ftbl.device_idle(device, GSL_TIMEOUT_NONE);
+ device->flags &= ~GSL_FLAGS_INITIALIZED;
+
+#if defined(__SYMBIAN32__)
+ while(device->irq_thread)
+ {
+ kos_sleep(20);
+ }
+#endif
+ drawctx_id = 0;
+
+ KOS_ASSERT(g_z1xx.numcontext == 0);
+ }
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_g12_destroy(gsl_device_t *device)
+{
+ int i;
+ unsigned int pid;
+
+#ifdef _DEBUG
+ // for now, signal catastrophic failure in a brute force way
+ KOS_ASSERT(0);
+#endif // _DEBUG
+
+ //todo: hard reset core?
+
+ for (i = 0; i < GSL_CALLER_PROCESS_MAX; i++)
+ {
+ pid = device->callerprocess[i];
+ if (pid)
+ {
+ device->ftbl.device_stop(device);
+ kgsl_driver_destroy(pid);
+
+ // todo: terminate client process?
+ }
+ }
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_g12_start(gsl_device_t *device, gsl_flags_t flags)
+{
+ int status = GSL_SUCCESS;
+
+ (void) flags; // unreferenced formal parameter
+
+ kgsl_hal_setpowerstate(device->id, GSL_PWRFLAGS_CLK_ON, 100);
+
+ // init command window
+ status = kgsl_cmdwindow_init(device);
+ if (status != GSL_SUCCESS)
+ {
+ device->ftbl.device_stop(device);
+ return (status);
+ }
+
+ KOS_ASSERT(g_z1xx.numcontext == 0);
+
+ device->flags |= GSL_FLAGS_STARTED;
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_g12_stop(gsl_device_t *device)
+{
+ int status;
+
+ KOS_ASSERT(device->refcnt == 0);
+
+ /* wait for device to idle before setting it's clock off */
+ status = device->ftbl.device_idle(device, 1000);
+ KOS_ASSERT(status == GSL_SUCCESS);
+
+ status = kgsl_hal_setpowerstate(device->id, GSL_PWRFLAGS_CLK_OFF, 0);
+ device->flags &= ~GSL_FLAGS_STARTED;
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_g12_getproperty(gsl_device_t *device, gsl_property_type_t type, void *value, unsigned int sizebytes)
+{
+ int status = GSL_FAILURE;
+ // unreferenced formal parameter
+ (void) sizebytes;
+
+ if (type == GSL_PROP_DEVICE_INFO)
+ {
+ gsl_devinfo_t *devinfo = (gsl_devinfo_t *) value;
+
+ KOS_ASSERT(sizebytes == sizeof(gsl_devinfo_t));
+
+ devinfo->device_id = device->id;
+ devinfo->chip_id = (gsl_chipid_t)device->chip_id;
+#ifndef GSL_NO_MMU
+ devinfo->mmu_enabled = kgsl_mmu_isenabled(&device->mmu);
+#endif
+
+ status = GSL_SUCCESS;
+ }
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_g12_setproperty(gsl_device_t *device, gsl_property_type_t type, void *value, unsigned int sizebytes)
+{
+ int status = GSL_FAILURE;
+
+ // unreferenced formal parameters
+ (void) device;
+
+ if (type == GSL_PROP_DEVICE_POWER)
+ {
+ gsl_powerprop_t *power = (gsl_powerprop_t *) value;
+
+ KOS_ASSERT(sizebytes == sizeof(gsl_powerprop_t));
+
+ if (!(device->flags & GSL_FLAGS_SAFEMODE))
+ {
+ kgsl_hal_setpowerstate(device->id, power->flags, power->value);
+ }
+
+ status = GSL_SUCCESS;
+ }
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_g12_idle(gsl_device_t *device, unsigned int timeout)
+{
+ if ( device->flags & GSL_FLAGS_STARTED )
+ {
+ for ( ; ; )
+ {
+ gsl_timestamp_t retired = kgsl_cmdstream_readtimestamp0( device->id, GSL_TIMESTAMP_RETIRED );
+ gsl_timestamp_t ts_diff = retired - device->current_timestamp;
+ if ( ts_diff >= 0 || ts_diff < -GSL_TIMESTAMP_EPSILON )
+ break;
+ kos_sleep(10);
+ }
+ }
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_g12_regread(gsl_device_t *device, unsigned int offsetwords, unsigned int *value)
+{
+ // G12 MH register values can only be retrieved via dedicated read registers
+ if ((offsetwords >= ADDR_MH_ARBITER_CONFIG && offsetwords <= ADDR_MH_AXI_HALT_CONTROL) ||
+ (offsetwords >= ADDR_MH_MMU_CONFIG && offsetwords <= ADDR_MH_MMU_MPU_END))
+ {
+#ifdef _Z180
+ device->ftbl.device_regwrite(device, (ADDR_VGC_MH_READ_ADDR >> 2), offsetwords);
+ GSL_HAL_REG_READ(device->id, (unsigned int) device->regspace.mmio_virt_base, (ADDR_VGC_MH_READ_ADDR >> 2), value);
+#else
+ device->ftbl.device_regwrite(device, (ADDR_MMU_READ_ADDR >> 2), offsetwords);
+ GSL_HAL_REG_READ(device->id, (unsigned int) device->regspace.mmio_virt_base, (ADDR_MMU_READ_DATA >> 2), value);
+#endif
+ }
+ else
+ {
+ GSL_HAL_REG_READ(device->id, (unsigned int) device->regspace.mmio_virt_base, offsetwords, value);
+ }
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_g12_regwrite(gsl_device_t *device, unsigned int offsetwords, unsigned int value)
+{
+ // G12 MH registers can only be written via the command window
+ if ((offsetwords >= ADDR_MH_ARBITER_CONFIG && offsetwords <= ADDR_MH_AXI_HALT_CONTROL) ||
+ (offsetwords >= ADDR_MH_MMU_CONFIG && offsetwords <= ADDR_MH_MMU_MPU_END))
+ {
+ kgsl_cmdwindow_write0(device->id, GSL_CMDWINDOW_MMU, offsetwords, value);
+ }
+ else
+ {
+ GSL_HAL_REG_WRITE(device->id, (unsigned int) device->regspace.mmio_virt_base, offsetwords, value);
+ }
+
+ // idle device when running in safe mode
+ if (device->flags & GSL_FLAGS_SAFEMODE)
+ {
+ device->ftbl.device_idle(device, GSL_TIMEOUT_DEFAULT);
+ }
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_g12_waitirq(gsl_device_t *device, gsl_intrid_t intr_id, unsigned int *count, unsigned int timeout)
+{
+ int status = GSL_FAILURE_NOTSUPPORTED;
+#ifdef VG_HDK
+ (void)timeout;
+#endif
+
+#ifndef _Z180
+ if (intr_id == GSL_INTR_G12_G2D || intr_id == GSL_INTR_G12_FBC)
+#else
+ if (intr_id == GSL_INTR_G12_G2D)
+#endif //_Z180
+ {
+#ifndef VG_HDK
+ if (kgsl_intr_isenabled(&device->intr, intr_id) == GSL_SUCCESS)
+#endif
+ {
+ // wait until intr completion event is received and check that
+ // the interrupt is still enabled. If event is received, but
+ // interrupt is not enabled any more, the driver is shutting
+ // down and event structure is not valid anymore.
+#ifndef VG_HDK
+ if (kos_event_wait(device->intr.evnt[intr_id], timeout) == OS_SUCCESS && kgsl_intr_isenabled(&device->intr, intr_id) == GSL_SUCCESS)
+#endif
+ {
+ unsigned int cntrs;
+ int i;
+ kgsl_device_active(device);
+#ifndef VG_HDK
+ kos_event_reset(device->intr.evnt[intr_id]);
+ device->ftbl.device_regread(device, (ADDR_VGC_IRQ_ACTIVE_CNT >> 2), &cntrs);
+#else
+ device->ftbl.device_regread(device, (0x38 >> 2), &cntrs);
+#endif
+
+ for (i = 0; i < GSL_G12_INTR_COUNT; i++)
+ {
+ int intrcnt = cntrs >> ((8 * i)) & 255;
+
+ // maximum allowed counter value is 254. if set to 255 then something has gone wrong
+ if (intrcnt && (intrcnt < 0xFF))
+ {
+ device->intrcnt[i] += intrcnt;
+ }
+ }
+
+ *count = device->intrcnt[intr_id - GSL_INTR_G12_MH];
+ device->intrcnt[intr_id - GSL_INTR_G12_MH] = 0;
+ status = GSL_SUCCESS;
+ }
+#ifndef VG_HDK
+ else
+ {
+ status = GSL_FAILURE_TIMEOUT;
+ }
+#endif
+ }
+ }
+ else if(intr_id == GSL_INTR_FOOBAR)
+ {
+ if (kgsl_intr_isenabled(&device->intr, GSL_INTR_G12_G2D) == GSL_SUCCESS)
+ {
+ kos_event_signal(device->intr.evnt[GSL_INTR_G12_G2D]);
+ }
+ }
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_g12_waittimestamp(gsl_device_t *device, gsl_timestamp_t timestamp, unsigned int timeout)
+{
+#ifndef _LINUX
+ return kos_event_wait( device->timestamp_event, timeout );
+#else
+ int status = wait_event_interruptible_timeout(device->timestamp_waitq,
+ kgsl_cmdstream_check_timestamp(device->id, timestamp),
+ msecs_to_jiffies(timeout));
+ if (status > 0)
+ return GSL_SUCCESS;
+ else
+ return GSL_FAILURE;
+#endif
+}
+
+int
+kgsl_g12_getfunctable(gsl_functable_t *ftbl)
+{
+ ftbl->device_init = kgsl_g12_init;
+ ftbl->device_close = kgsl_g12_close;
+ ftbl->device_destroy = kgsl_g12_destroy;
+ ftbl->device_start = kgsl_g12_start;
+ ftbl->device_stop = kgsl_g12_stop;
+ ftbl->device_getproperty = kgsl_g12_getproperty;
+ ftbl->device_setproperty = kgsl_g12_setproperty;
+ ftbl->device_idle = kgsl_g12_idle;
+ ftbl->device_regread = kgsl_g12_regread;
+ ftbl->device_regwrite = kgsl_g12_regwrite;
+ ftbl->device_waitirq = kgsl_g12_waitirq;
+ ftbl->device_waittimestamp = kgsl_g12_waittimestamp;
+ ftbl->device_runpending = NULL;
+ ftbl->device_addtimestamp = kgsl_g12_addtimestamp;
+ ftbl->intr_isr = kgsl_g12_isr;
+ ftbl->mmu_tlbinvalidate = kgsl_g12_tlbinvalidate;
+ ftbl->mmu_setpagetable = kgsl_g12_setpagetable;
+ ftbl->cmdstream_issueibcmds = kgsl_g12_issueibcmds;
+ ftbl->context_create = kgsl_g12_context_create;
+ ftbl->context_destroy = kgsl_g12_context_destroy;
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+static void addmarker(gsl_z1xx_t* z1xx)
+{
+ KOS_ASSERT(z1xx);
+ {
+ unsigned int *p = z1xx->cmdbuf[z1xx->curr];
+ /* todo: use symbolic values */
+ p[z1xx->offs++] = 0x7C000176;
+ p[z1xx->offs++] = (0x8000|5);
+ p[z1xx->offs++] = ADDR_VGV3_LAST<<24;
+ p[z1xx->offs++] = ADDR_VGV3_LAST<<24;
+ p[z1xx->offs++] = ADDR_VGV3_LAST<<24;
+ p[z1xx->offs++] = 0x7C000176;
+ p[z1xx->offs++] = 5;
+ p[z1xx->offs++] = ADDR_VGV3_LAST<<24;
+ p[z1xx->offs++] = ADDR_VGV3_LAST<<24;
+ p[z1xx->offs++] = ADDR_VGV3_LAST<<24;
+ }
+}
+
+//----------------------------------------------------------------------------
+static void beginpacket(gsl_z1xx_t* z1xx, gpuaddr_t cmd, unsigned int nextcnt)
+{
+ unsigned int *p = z1xx->cmdbuf[z1xx->curr];
+
+ p[z1xx->offs++] = 0x7C000176;
+ p[z1xx->offs++] = 5;
+ p[z1xx->offs++] = ADDR_VGV3_LAST<<24;
+ p[z1xx->offs++] = ADDR_VGV3_LAST<<24;
+ p[z1xx->offs++] = ADDR_VGV3_LAST<<24;
+ p[z1xx->offs++] = 0x7C000275;
+ p[z1xx->offs++] = cmd;
+ p[z1xx->offs++] = 0x1000|nextcnt; // nextcount
+ p[z1xx->offs++] = ADDR_VGV3_LAST<<24;
+ p[z1xx->offs++] = ADDR_VGV3_LAST<<24;
+}
+
+//----------------------------------------------------------------------------
+
+static int
+kgsl_g12_issueibcmds(gsl_device_t* device, int drawctxt_index, gpuaddr_t ibaddr, int sizedwords, gsl_timestamp_t *timestamp, unsigned int flags)
+{
+ unsigned int ofs = PACKETSIZE_STATESTREAM*sizeof(unsigned int);
+ unsigned int cnt = 5;
+ unsigned int cmd = ibaddr;
+ unsigned int nextbuf = (g_z1xx.curr+1)%GSL_HAL_NUMCMDBUFFERS;
+ unsigned int nextaddr = g_z1xx.cmdbufdesc[nextbuf].gpuaddr;
+ unsigned int nextcnt = 0x9000|5;
+ gsl_memdesc_t tmp = {0};
+ gsl_timestamp_t processed_timestamp;
+
+ (void) flags;
+
+ // read what is the latest timestamp device have processed
+ GSL_CMDSTREAM_GET_EOP_TIMESTAMP(device, (int *)&processed_timestamp);
+
+ /* wait for the next buffer's timestamp to occur */
+ while(processed_timestamp < g_z1xx.timestamp[nextbuf])
+ {
+#ifndef _LINUX
+ kos_event_wait(device->timestamp_event, 1000);
+ kos_event_reset(device->timestamp_event);
+#else
+ kgsl_cmdstream_waittimestamp(device->id, g_z1xx.timestamp[nextbuf], 1000);
+#endif
+ GSL_CMDSTREAM_GET_EOP_TIMESTAMP(device, (int *)&processed_timestamp);
+ }
+
+ *timestamp = g_z1xx.timestamp[nextbuf] = device->current_timestamp + 1;
+
+ /* context switch */
+ if (drawctxt_index != (int)g_z1xx.prevctx)
+ {
+ cnt = PACKETSIZE_STATESTREAM;
+ ofs = 0;
+ }
+ g_z1xx.prevctx = drawctxt_index;
+
+ g_z1xx.offs = 10;
+ beginpacket(&g_z1xx, cmd+ofs, cnt);
+
+ tmp.gpuaddr=ibaddr+(sizedwords*sizeof(unsigned int));
+ kgsl_sharedmem_write0(&tmp, 4, &nextaddr, 4, false);
+ kgsl_sharedmem_write0(&tmp, 8, &nextcnt, 4, false);
+
+ /* sync mem */
+ kgsl_sharedmem_write0((const gsl_memdesc_t *)&g_z1xx.cmdbufdesc[g_z1xx.curr], 0, g_z1xx.cmdbuf[g_z1xx.curr], (512 + 13) * sizeof(unsigned int), false);
+
+ g_z1xx.offs = 0;
+ g_z1xx.curr = nextbuf;
+
+ /* increment mark counter */
+ kgsl_cmdwindow_write0(2, GSL_CMDWINDOW_2D, ADDR_VGV3_CONTROL, flags);
+ kgsl_cmdwindow_write0(2, GSL_CMDWINDOW_2D, ADDR_VGV3_CONTROL, 0);
+
+ /* increment consumed timestamp */
+ device->current_timestamp++;
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+static int
+kgsl_g12_context_create(gsl_device_t* device, gsl_context_type_t type, unsigned int *drawctxt_id, gsl_flags_t flags)
+{
+ int status = 0;
+ int i;
+ int cmd;
+ gsl_flags_t gslflags = (GSL_MEMFLAGS_CONPHYS | GSL_MEMFLAGS_ALIGNPAGE);
+
+ // unreferenced formal parameters
+ (void) device;
+ (void) type;
+ //(void) drawctxt_id;
+ (void) flags;
+
+ kgsl_device_active(device);
+
+ if (g_z1xx.numcontext==0)
+ {
+ /* todo: move this to device create or start. Error checking!! */
+ for (i=0;i<GSL_HAL_NUMCMDBUFFERS;i++)
+ {
+ status = kgsl_sharedmem_alloc0(GSL_DEVICE_ANY, gslflags, GSL_HAL_CMDBUFFERSIZE, &g_z1xx.cmdbufdesc[i]);
+ KOS_ASSERT(status == GSL_SUCCESS);
+ g_z1xx.cmdbuf[i]=kos_malloc(GSL_HAL_CMDBUFFERSIZE);
+ KOS_ASSERT(g_z1xx.cmdbuf[i]);
+ kos_memset((void*)g_z1xx.cmdbuf[i], 0, GSL_HAL_CMDBUFFERSIZE);
+
+ g_z1xx.curr = i;
+ g_z1xx.offs = 0;
+ addmarker(&g_z1xx);
+ status = kgsl_sharedmem_write0(&g_z1xx.cmdbufdesc[i],0, g_z1xx.cmdbuf[i], (512 + 13) * sizeof(unsigned int), false);
+ KOS_ASSERT(status == GSL_SUCCESS);
+ }
+ g_z1xx.curr = 0;
+ cmd = (int)(((VGV3_NEXTCMD_JUMP) & VGV3_NEXTCMD_NEXTCMD_FMASK)<< VGV3_NEXTCMD_NEXTCMD_FSHIFT);
+
+ /* set cmd stream buffer to hw */
+ status |= kgsl_cmdwindow_write0(GSL_DEVICE_G12, GSL_CMDWINDOW_2D, ADDR_VGV3_MODE, 4);
+ status |= kgsl_cmdwindow_write0(GSL_DEVICE_G12, GSL_CMDWINDOW_2D, ADDR_VGV3_NEXTADDR, g_z1xx.cmdbufdesc[0].gpuaddr );
+ status |= kgsl_cmdwindow_write0(GSL_DEVICE_G12, GSL_CMDWINDOW_2D, ADDR_VGV3_NEXTCMD, cmd | 5);
+
+ KOS_ASSERT(status == GSL_SUCCESS);
+
+ /* Edge buffer setup todo: move register setup to own function.
+ This function can be then called, if power managemnet is used and clocks are turned off and then on.
+ */
+ status |= kgsl_sharedmem_alloc0(GSL_DEVICE_ANY, gslflags, GSL_HAL_EDGE0BUFSIZE, &g_z1xx.e0);
+ status |= kgsl_sharedmem_alloc0(GSL_DEVICE_ANY, gslflags, GSL_HAL_EDGE1BUFSIZE, &g_z1xx.e1);
+ status |= kgsl_sharedmem_set0(&g_z1xx.e0, 0, 0, GSL_HAL_EDGE0BUFSIZE);
+ status |= kgsl_sharedmem_set0(&g_z1xx.e1, 0, 0, GSL_HAL_EDGE1BUFSIZE);
+
+ status |= kgsl_cmdwindow_write0(GSL_DEVICE_G12, GSL_CMDWINDOW_2D, GSL_HAL_EDGE0REG, g_z1xx.e0.gpuaddr);
+ status |= kgsl_cmdwindow_write0(GSL_DEVICE_G12, GSL_CMDWINDOW_2D, GSL_HAL_EDGE1REG, g_z1xx.e1.gpuaddr);
+#ifdef _Z180
+ kgsl_sharedmem_alloc0(GSL_DEVICE_ANY, gslflags, GSL_HAL_EDGE2BUFSIZE, &g_z1xx.e2);
+ kgsl_sharedmem_set0(&g_z1xx.e2, 0, 0, GSL_HAL_EDGE2BUFSIZE);
+ kgsl_cmdwindow_write0(GSL_DEVICE_G12, GSL_CMDWINDOW_2D, GSL_HAL_EDGE2REG, g_z1xx.e2.gpuaddr);
+#endif
+ KOS_ASSERT(status == GSL_SUCCESS);
+ }
+
+ if(g_z1xx.numcontext < GSL_CONTEXT_MAX)
+ {
+ g_z1xx.numcontext++;
+ *drawctxt_id=g_z1xx.numcontext;
+ status = GSL_SUCCESS;
+ }
+ else
+ {
+ status = GSL_FAILURE;
+ }
+
+ return status;
+}
+
+//----------------------------------------------------------------------------
+
+static int
+kgsl_g12_context_destroy(gsl_device_t* device, unsigned int drawctxt_id)
+{
+
+ // unreferenced formal parameters
+ (void) device;
+ (void) drawctxt_id;
+
+ g_z1xx.numcontext--;
+ if (g_z1xx.numcontext<0)
+ {
+ g_z1xx.numcontext=0;
+ return (GSL_FAILURE);
+ }
+
+ if (g_z1xx.numcontext==0)
+ {
+ int i;
+ for (i=0;i<GSL_HAL_NUMCMDBUFFERS;i++)
+ {
+ kgsl_sharedmem_free0(&g_z1xx.cmdbufdesc[i], GSL_CALLER_PROCESSID_GET());
+ kos_free(g_z1xx.cmdbuf[i]);
+ }
+ kgsl_sharedmem_free0(&g_z1xx.e0, GSL_CALLER_PROCESSID_GET());
+ kgsl_sharedmem_free0(&g_z1xx.e1, GSL_CALLER_PROCESSID_GET());
+#ifdef _Z180
+ kgsl_sharedmem_free0(&g_z1xx.e2, GSL_CALLER_PROCESSID_GET());
+#endif
+ kos_memset(&g_z1xx,0,sizeof(gsl_z1xx_t));
+ }
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+#if !defined GSL_BLD_YAMATO && (!defined __SYMBIAN32__ || defined __WINSCW__)
+KGSL_API int kgsl_drawctxt_bind_gmem_shadow(gsl_deviceid_t device_id, unsigned int drawctxt_id, const gsl_rect_t* gmem_rect, unsigned int shadow_x, unsigned int shadow_y, const gsl_buffer_desc_t* shadow_buffer, unsigned int buffer_id)
+{
+ (void)device_id;
+ (void)drawctxt_id;
+ (void)gmem_rect;
+ (void)shadow_x;
+ (void)shadow_y;
+ (void)shadow_buffer;
+ (void)buffer_id;
+ return (GSL_FAILURE);
+}
+#endif
+//----------------------------------------------------------------------------
+
+#ifndef _LINUX
+static void irq_thread(void)
+{
+ int error = 0;
+ unsigned int irq_count;
+ gsl_device_t* device = &gsl_driver.device[GSL_DEVICE_G12-1];
+ gsl_timestamp_t timestamp;
+
+ while( !error )
+ {
+#ifdef IRQTHREAD_POLL
+ if(kos_event_wait(device->irqthread_event, GSL_IRQ_TIMEOUT)==GSL_SUCCESS)
+ {
+ kgsl_g12_waitirq(device, GSL_INTR_G12_G2D, &irq_count, GSL_IRQ_TIMEOUT);
+#else
+
+ if( kgsl_g12_waitirq(device, GSL_INTR_G12_G2D, &irq_count, GSL_IRQ_TIMEOUT) == GSL_SUCCESS )
+ {
+#endif
+ /* Read a timestamp value */
+#ifdef VG_HDK
+ timestamp = device->timestamp;
+#else
+ GSL_CMDSTREAM_GET_EOP_TIMESTAMP(device, (int *)&timestamp);
+#endif
+ /* Increase the timestamp value */
+ timestamp += irq_count;
+
+ KOS_ASSERT( timestamp <= device->current_timestamp );
+ /* Write the new timestamp value */
+#ifdef VG_HDK
+ device->timestamp = timestamp;
+#else
+ kgsl_sharedmem_write0(&device->memstore, GSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp), &timestamp, 4, false);
+#endif
+
+ /* Notify timestamp event */
+#ifndef _LINUX
+ kos_event_signal( device->timestamp_event );
+#else
+ wake_up_interruptible_all(&(device->timestamp_waitq));
+#endif
+ }
+ else
+ {
+ /* Timeout */
+
+
+ if(!(device->flags&GSL_FLAGS_INITIALIZED))
+ {
+ /* if device is closed -> thread exit */
+#if defined(__SYMBIAN32__)
+ device->irq_thread = 0;
+#endif
+ return;
+ }
+ }
+ }
+}
+#endif
+
+//----------------------------------------------------------------------------
+
+static int
+kgsl_g12_addtimestamp(gsl_device_t* device, gsl_timestamp_t *timestamp)
+{
+ device->current_timestamp++;
+ *timestamp = device->current_timestamp;
+
+ return (GSL_SUCCESS);
+}
+#endif
diff --git a/drivers/mxc/amd-gpu/common/gsl_intrmgr.c b/drivers/mxc/amd-gpu/common/gsl_intrmgr.c
new file mode 100644
index 000000000000..2c8b278dfe7a
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/gsl_intrmgr.c
@@ -0,0 +1,305 @@
+/* Copyright (c) 2008-2010, Advanced Micro Devices. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "gsl.h"
+
+//////////////////////////////////////////////////////////////////////////////
+// macros
+//////////////////////////////////////////////////////////////////////////////
+#define GSL_INTRID_VALIDATE(id) (((id) < 0) || ((id) >= GSL_INTR_COUNT))
+
+
+//////////////////////////////////////////////////////////////////////////////
+// functions
+//////////////////////////////////////////////////////////////////////////////
+
+static const gsl_intrblock_reg_t *
+kgsl_intr_id2block(gsl_intrid_t id)
+{
+ const gsl_intrblock_reg_t *block;
+ int i;
+
+ // interrupt id to hw block
+ for (i = 0; i < GSL_INTR_BLOCK_COUNT; i++)
+ {
+ block = &gsl_cfg_intrblock_reg[i];
+
+ if (block->first_id <= id && id <= block->last_id)
+ {
+ return (block);
+ }
+ }
+
+ return (NULL);
+}
+
+//----------------------------------------------------------------------------
+
+void
+kgsl_intr_decode(gsl_device_t *device, gsl_intrblock_t block_id)
+{
+ const gsl_intrblock_reg_t *block = &gsl_cfg_intrblock_reg[block_id];
+ gsl_intrid_t id;
+ unsigned int status;
+
+ // read the block's interrupt status bits
+ device->ftbl.device_regread(device, block->status_reg, &status);
+
+ // mask off any interrupts which are disabled
+ status &= device->intr.enabled[block->id];
+
+ // acknowledge the block's interrupts
+ device->ftbl.device_regwrite(device, block->clear_reg, status);
+
+ // loop through the block's masks, determine which interrupt bits are active, and call callback (or TODO queue DPC)
+ for (id = block->first_id; id <= block->last_id; id++)
+ {
+ if (status & gsl_cfg_intr_mask[id])
+ {
+ device->intr.handler[id].callback(id, device->intr.handler[id].cookie);
+ }
+ }
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API void
+kgsl_intr_isr()
+{
+ gsl_deviceid_t device_id;
+ gsl_device_t *device;
+
+ // loop through the devices, and call device specific isr
+ for (device_id = (gsl_deviceid_t)(GSL_DEVICE_ANY + 1); device_id <= GSL_DEVICE_MAX; device_id++)
+ {
+ device = &gsl_driver.device[device_id-1]; // device_id is 1 based
+
+ if (device->intr.flags & GSL_FLAGS_INITIALIZED)
+ {
+ kgsl_device_active(device);
+ device->ftbl.intr_isr(device);
+ }
+ }
+}
+
+//----------------------------------------------------------------------------
+
+int kgsl_intr_init(gsl_device_t *device)
+{
+ if (device->ftbl.intr_isr == NULL)
+ {
+ return (GSL_FAILURE_BADPARAM);
+ }
+
+ if (device->intr.flags & GSL_FLAGS_INITIALIZED)
+ {
+ return (GSL_SUCCESS);
+ }
+
+ device->intr.device = device;
+ device->intr.flags |= GSL_FLAGS_INITIALIZED;
+
+ // os_interrupt_setcallback(YAMATO_INTR, kgsl_intr_isr);
+ // os_interrupt_enable(YAMATO_INTR);
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int kgsl_intr_close(gsl_device_t *device)
+{
+ const gsl_intrblock_reg_t *block;
+ int i, id;
+
+ if (device->intr.flags & GSL_FLAGS_INITIALIZED)
+ {
+ // check if there are any enabled interrupts lingering around
+ for (i = 0; i < GSL_INTR_BLOCK_COUNT; i++)
+ {
+ if (device->intr.enabled[i])
+ {
+ block = &gsl_cfg_intrblock_reg[i];
+
+ // loop through the block's masks, disable interrupts which active
+ for (id = block->first_id; id <= block->last_id; id++)
+ {
+ if (device->intr.enabled[i] & gsl_cfg_intr_mask[id])
+ {
+ kgsl_intr_disable(&device->intr, (gsl_intrid_t)id);
+ }
+ }
+ }
+ }
+
+ kos_memset(&device->intr, 0, sizeof(gsl_intr_t));
+ }
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int kgsl_intr_enable(gsl_intr_t *intr, gsl_intrid_t id)
+{
+ const gsl_intrblock_reg_t *block;
+ unsigned int mask;
+ unsigned int enabled;
+
+ if (GSL_INTRID_VALIDATE(id))
+ {
+ return (GSL_FAILURE_BADPARAM);
+ }
+
+ if (intr->handler[id].callback == NULL)
+ {
+ return (GSL_FAILURE_NOTINITIALIZED);
+ }
+
+ block = kgsl_intr_id2block(id);
+ if (block == NULL)
+ {
+ return (GSL_FAILURE_SYSTEMERROR);
+ }
+
+ mask = gsl_cfg_intr_mask[id];
+ enabled = intr->enabled[block->id];
+
+ if (mask && !(enabled & mask))
+ {
+ intr->evnt[id] = kos_event_create(0);
+
+ enabled |= mask;
+ intr->enabled[block->id] = enabled;
+ intr->device->ftbl.device_regwrite(intr->device, block->mask_reg, enabled);
+ }
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int kgsl_intr_disable(gsl_intr_t *intr, gsl_intrid_t id)
+{
+ const gsl_intrblock_reg_t *block;
+ unsigned int mask;
+ unsigned int enabled;
+
+ if (GSL_INTRID_VALIDATE(id))
+ {
+ return (GSL_FAILURE_BADPARAM);
+ }
+
+ if (intr->handler[id].callback == NULL)
+ {
+ return (GSL_FAILURE_NOTINITIALIZED);
+ }
+
+ block = kgsl_intr_id2block(id);
+ if (block == NULL)
+ {
+ return (GSL_FAILURE_SYSTEMERROR);
+ }
+
+ mask = gsl_cfg_intr_mask[id];
+ enabled = intr->enabled[block->id];
+
+ if (enabled & mask)
+ {
+ enabled &= ~mask;
+ intr->enabled[block->id] = enabled;
+ intr->device->ftbl.device_regwrite(intr->device, block->mask_reg, enabled);
+
+ kos_event_signal(intr->evnt[id]); // wake up waiting threads before destroying the event
+ kos_event_destroy(intr->evnt[id]);
+ intr->evnt[id] = 0;
+ }
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_intr_attach(gsl_intr_t *intr, gsl_intrid_t id, gsl_intr_callback_t callback, void *cookie)
+{
+ if (GSL_INTRID_VALIDATE(id) || callback == NULL)
+ {
+ return (GSL_FAILURE_BADPARAM);
+ }
+
+ if (intr->handler[id].callback != NULL)
+ {
+ if (intr->handler[id].callback == callback && intr->handler[id].cookie == cookie)
+ {
+ return (GSL_FAILURE_ALREADYINITIALIZED);
+ }
+ else
+ {
+ return (GSL_FAILURE_NOMOREAVAILABLE);
+ }
+ }
+
+ intr->handler[id].callback = callback;
+ intr->handler[id].cookie = cookie;
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_intr_detach(gsl_intr_t *intr, gsl_intrid_t id)
+{
+ if (GSL_INTRID_VALIDATE(id))
+ {
+ return (GSL_FAILURE_BADPARAM);
+ }
+
+ if (intr->handler[id].callback == NULL)
+ {
+ return (GSL_FAILURE_NOTINITIALIZED);
+ }
+
+ kgsl_intr_disable(intr, id);
+
+ intr->handler[id].callback = NULL;
+ intr->handler[id].cookie = NULL;
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_intr_isenabled(gsl_intr_t *intr, gsl_intrid_t id)
+{
+ int status = GSL_FAILURE;
+ const gsl_intrblock_reg_t *block = kgsl_intr_id2block(id);
+
+ if (block != NULL)
+ {
+ // check if interrupt is enabled
+ if (intr->enabled[block->id] & gsl_cfg_intr_mask[id])
+ {
+ status = GSL_SUCCESS;
+ }
+ }
+
+ return (status);
+}
diff --git a/drivers/mxc/amd-gpu/common/gsl_log.c b/drivers/mxc/amd-gpu/common/gsl_log.c
new file mode 100644
index 000000000000..79a14a5f4b21
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/gsl_log.c
@@ -0,0 +1,591 @@
+/* Copyright (c) 2002,2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifdef GSL_LOG
+
+#define _CRT_SECURE_NO_WARNINGS
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include "gsl.h"
+
+#define KGSL_OUTPUT_TYPE_MEMBUF 0
+#define KGSL_OUTPUT_TYPE_STDOUT 1
+#define KGSL_OUTPUT_TYPE_FILE 2
+
+#define REG_OUTPUT( X ) case X: b += sprintf( b, "%s", #X ); break;
+#define INTRID_OUTPUT( X ) case X: b += sprintf( b, "%s", #X ); break;
+
+typedef struct log_output
+{
+ unsigned char type;
+ unsigned int flags;
+ oshandle_t file;
+
+ struct log_output* next;
+} log_output_t;
+
+static log_output_t* outputs = NULL;
+
+static oshandle_t log_mutex = NULL;
+static char buffer[256];
+static char buffer2[256];
+static int log_initialized = 0;
+
+//----------------------------------------------------------------------------
+
+int kgsl_log_init()
+{
+ log_mutex = kos_mutex_create( "log_mutex" );
+
+ log_initialized = 1;
+
+ return GSL_SUCCESS;
+}
+
+//----------------------------------------------------------------------------
+
+int kgsl_log_close()
+{
+ if( !log_initialized ) return GSL_SUCCESS;
+
+ // Go throught output list and free every node
+ while( outputs != NULL )
+ {
+ log_output_t* temp = outputs->next;
+
+ switch( outputs->type )
+ {
+ case KGSL_OUTPUT_TYPE_FILE:
+ kos_fclose( outputs->file );
+ break;
+ }
+
+ kos_free( outputs );
+ outputs = temp;
+ }
+
+ kos_mutex_free( log_mutex );
+
+ log_initialized = 0;
+
+ return GSL_SUCCESS;
+}
+
+//----------------------------------------------------------------------------
+
+int kgsl_log_open_stdout( unsigned int log_flags )
+{
+ log_output_t* output;
+
+ if( !log_initialized ) return GSL_SUCCESS;
+
+ output = kos_malloc( sizeof( log_output_t ) );
+ output->type = KGSL_OUTPUT_TYPE_STDOUT;
+ output->flags = log_flags;
+
+ // Add to the list
+ if( outputs == NULL )
+ {
+ // First node in the list.
+ outputs = output;
+ output->next = NULL;
+ }
+ else
+ {
+ // Add to the start of the list
+ output->next = outputs;
+ outputs = output;
+ }
+
+ return GSL_SUCCESS;
+}
+
+//----------------------------------------------------------------------------
+
+int kgsl_log_open_membuf( int* memBufId, unsigned int log_flags )
+{
+ // TODO
+
+ return GSL_SUCCESS;
+}
+
+//----------------------------------------------------------------------------
+
+int kgsl_log_open_file( char* filename, unsigned int log_flags )
+{
+ log_output_t* output;
+
+ if( !log_initialized ) return GSL_SUCCESS;
+
+ output = kos_malloc( sizeof( log_output_t ) );
+ output->type = KGSL_OUTPUT_TYPE_FILE;
+ output->flags = log_flags;
+ output->file = kos_fopen( filename, "w" );
+
+ // Add to the list
+ if( outputs == NULL )
+ {
+ // First node in the list.
+ outputs = output;
+ output->next = NULL;
+ }
+ else
+ {
+ // Add to the start of the list
+ output->next = outputs;
+ outputs = output;
+ }
+
+ return GSL_SUCCESS;
+}
+
+//----------------------------------------------------------------------------
+
+int kgsl_log_flush_membuf( char* filename, int memBufId )
+{
+ // TODO
+
+ return GSL_SUCCESS;
+}
+//----------------------------------------------------------------------------
+
+int kgsl_log_write( unsigned int log_flags, char* format, ... )
+{
+ char *c = format;
+ char *b = buffer;
+ char *p1, *p2;
+ log_output_t* output;
+ va_list arguments;
+
+ if( !log_initialized ) return GSL_SUCCESS;
+
+ // Acquire mutex lock as we are using shared buffer for the string parsing
+ kos_mutex_lock( log_mutex );
+
+ // Add separator
+ *(b++) = '|'; *(b++) = ' ';
+
+ va_start( arguments, format );
+
+ while( 1 )
+ {
+ // Find the first occurence of %
+ p1 = strchr( c, '%' );
+ if( !p1 )
+ {
+ // No more % characters -> copy rest of the string
+ strcpy( b, c );
+
+ break;
+ }
+
+ // Find the second occurence of % and handle the string until that point
+ p2 = strchr( p1+1, '%' );
+
+ // If not found, just use the end of the buffer
+ if( !p2 ) p2 = strchr( p1+1, '\0' );
+
+ // Break the string to this point
+ kos_memcpy( buffer2, c, p2-c );
+ *(buffer2+(unsigned int)(p2-c)) = '\0';
+
+ switch( *(p1+1) )
+ {
+ // gsl_memdesc_t
+ case 'M':
+ {
+ gsl_memdesc_t val = va_arg( arguments, gsl_memdesc_t );
+ // Handle string before %M
+ kos_memcpy( b, c, p1-c );
+ b += (unsigned int)p1-(unsigned int)c;
+ // Replace %M
+ b += sprintf( b, "[hostptr=0x%08x, gpuaddr=0x%08x]", val.hostptr, val.gpuaddr );
+ // Handle string after %M
+ kos_memcpy( b, p1+2, p2-(p1+2) );
+ b += (unsigned int)p2-(unsigned int)(p1+2);
+ *b = '\0';
+ }
+ break;
+
+ // GSL_SUCCESS/GSL_FAILURE
+ case 'B':
+ {
+ int val = va_arg( arguments, int );
+ // Handle string before %B
+ kos_memcpy( b, c, p1-c );
+ b += (unsigned int)p1-(unsigned int)c;
+ // Replace %B
+ if( val == GSL_SUCCESS )
+ b += sprintf( b, "%s", "GSL_SUCCESS" );
+ else
+ b += sprintf( b, "%s", "GSL_FAILURE" );
+ // Handle string after %B
+ kos_memcpy( b, p1+2, p2-(p1+2) );
+ b += (unsigned int)p2-(unsigned int)(p1+2);
+ *b = '\0';
+ }
+ break;
+
+ // gsl_deviceid_t
+ case 'D':
+ {
+ gsl_deviceid_t val = va_arg( arguments, gsl_deviceid_t );
+ // Handle string before %D
+ kos_memcpy( b, c, p1-c );
+ b += (unsigned int)p1-(unsigned int)c;
+ // Replace %D
+ switch( val )
+ {
+ case GSL_DEVICE_ANY:
+ b += sprintf( b, "%s", "GSL_DEVICE_ANY" );
+ break;
+ case GSL_DEVICE_YAMATO:
+ b += sprintf( b, "%s", "GSL_DEVICE_YAMATO" );
+ break;
+ case GSL_DEVICE_G12:
+ b += sprintf( b, "%s", "GSL_DEVICE_G12" );
+ break;
+ default:
+ b += sprintf( b, "%s", "UNKNOWN DEVICE" );
+ break;
+ }
+ // Handle string after %D
+ kos_memcpy( b, p1+2, p2-(p1+2) );
+ b += (unsigned int)p2-(unsigned int)(p1+2);
+ *b = '\0';
+ }
+ break;
+
+ // gsl_intrid_t
+ case 'I':
+ {
+ unsigned int val = va_arg( arguments, unsigned int );
+ // Handle string before %I
+ kos_memcpy( b, c, p1-c );
+ b += (unsigned int)p1-(unsigned int)c;
+ // Replace %I
+ switch( val )
+ {
+ INTRID_OUTPUT( GSL_INTR_YDX_MH_AXI_READ_ERROR );
+ INTRID_OUTPUT( GSL_INTR_YDX_MH_AXI_WRITE_ERROR );
+ INTRID_OUTPUT( GSL_INTR_YDX_MH_MMU_PAGE_FAULT );
+ INTRID_OUTPUT( GSL_INTR_YDX_CP_SW_INT );
+ INTRID_OUTPUT( GSL_INTR_YDX_CP_T0_PACKET_IN_IB );
+ INTRID_OUTPUT( GSL_INTR_YDX_CP_OPCODE_ERROR );
+ INTRID_OUTPUT( GSL_INTR_YDX_CP_PROTECTED_MODE_ERROR );
+ INTRID_OUTPUT( GSL_INTR_YDX_CP_RESERVED_BIT_ERROR );
+ INTRID_OUTPUT( GSL_INTR_YDX_CP_IB_ERROR );
+ INTRID_OUTPUT( GSL_INTR_YDX_CP_IB2_INT );
+ INTRID_OUTPUT( GSL_INTR_YDX_CP_IB1_INT );
+ INTRID_OUTPUT( GSL_INTR_YDX_CP_RING_BUFFER );
+ INTRID_OUTPUT( GSL_INTR_YDX_RBBM_READ_ERROR );
+ INTRID_OUTPUT( GSL_INTR_YDX_RBBM_DISPLAY_UPDATE );
+ INTRID_OUTPUT( GSL_INTR_YDX_RBBM_GUI_IDLE );
+ INTRID_OUTPUT( GSL_INTR_YDX_SQ_PS_WATCHDOG );
+ INTRID_OUTPUT( GSL_INTR_YDX_SQ_VS_WATCHDOG );
+ INTRID_OUTPUT( GSL_INTR_G12_MH );
+ INTRID_OUTPUT( GSL_INTR_G12_G2D );
+ INTRID_OUTPUT( GSL_INTR_G12_FIFO );
+#ifndef _Z180
+ INTRID_OUTPUT( GSL_INTR_G12_FBC );
+#endif // _Z180
+ INTRID_OUTPUT( GSL_INTR_G12_MH_AXI_READ_ERROR );
+ INTRID_OUTPUT( GSL_INTR_G12_MH_AXI_WRITE_ERROR );
+ INTRID_OUTPUT( GSL_INTR_G12_MH_MMU_PAGE_FAULT );
+ INTRID_OUTPUT( GSL_INTR_COUNT );
+ INTRID_OUTPUT( GSL_INTR_FOOBAR );
+
+ default:
+ b += sprintf( b, "%s", "UNKNOWN INTERRUPT ID" );
+ break;
+ }
+ // Handle string after %I
+ kos_memcpy( b, p1+2, p2-(p1+2) );
+ b += (unsigned int)p2-(unsigned int)(p1+2);
+ *b = '\0';
+ }
+ break;
+
+ // Register offset
+ case 'R':
+ {
+ unsigned int val = va_arg( arguments, unsigned int );
+
+ // Handle string before %R
+ kos_memcpy( b, c, p1-c );
+ b += (unsigned int)p1-(unsigned int)c;
+ // Replace %R
+ switch( val )
+ {
+ REG_OUTPUT( mmPA_CL_VPORT_XSCALE ); REG_OUTPUT( mmPA_CL_VPORT_XOFFSET ); REG_OUTPUT( mmPA_CL_VPORT_YSCALE );
+ REG_OUTPUT( mmPA_CL_VPORT_YOFFSET ); REG_OUTPUT( mmPA_CL_VPORT_ZSCALE ); REG_OUTPUT( mmPA_CL_VPORT_ZOFFSET );
+ REG_OUTPUT( mmPA_CL_VTE_CNTL ); REG_OUTPUT( mmPA_CL_CLIP_CNTL ); REG_OUTPUT( mmPA_CL_GB_VERT_CLIP_ADJ );
+ REG_OUTPUT( mmPA_CL_GB_VERT_DISC_ADJ ); REG_OUTPUT( mmPA_CL_GB_HORZ_CLIP_ADJ ); REG_OUTPUT( mmPA_CL_GB_HORZ_DISC_ADJ );
+ REG_OUTPUT( mmPA_CL_ENHANCE ); REG_OUTPUT( mmPA_SC_ENHANCE ); REG_OUTPUT( mmPA_SU_VTX_CNTL );
+ REG_OUTPUT( mmPA_SU_POINT_SIZE ); REG_OUTPUT( mmPA_SU_POINT_MINMAX ); REG_OUTPUT( mmPA_SU_LINE_CNTL );
+ REG_OUTPUT( mmPA_SU_FACE_DATA ); REG_OUTPUT( mmPA_SU_SC_MODE_CNTL ); REG_OUTPUT( mmPA_SU_POLY_OFFSET_FRONT_SCALE );
+ REG_OUTPUT( mmPA_SU_POLY_OFFSET_FRONT_OFFSET ); REG_OUTPUT( mmPA_SU_POLY_OFFSET_BACK_SCALE ); REG_OUTPUT( mmPA_SU_POLY_OFFSET_BACK_OFFSET );
+ REG_OUTPUT( mmPA_SU_PERFCOUNTER0_SELECT ); REG_OUTPUT( mmPA_SU_PERFCOUNTER1_SELECT ); REG_OUTPUT( mmPA_SU_PERFCOUNTER2_SELECT );
+ REG_OUTPUT( mmPA_SU_PERFCOUNTER3_SELECT ); REG_OUTPUT( mmPA_SU_PERFCOUNTER0_LOW ); REG_OUTPUT( mmPA_SU_PERFCOUNTER0_HI );
+ REG_OUTPUT( mmPA_SU_PERFCOUNTER1_LOW ); REG_OUTPUT( mmPA_SU_PERFCOUNTER1_HI ); REG_OUTPUT( mmPA_SU_PERFCOUNTER2_LOW );
+ REG_OUTPUT( mmPA_SU_PERFCOUNTER2_HI ); REG_OUTPUT( mmPA_SU_PERFCOUNTER3_LOW ); REG_OUTPUT( mmPA_SU_PERFCOUNTER3_HI );
+ REG_OUTPUT( mmPA_SC_WINDOW_OFFSET ); REG_OUTPUT( mmPA_SC_AA_CONFIG ); REG_OUTPUT( mmPA_SC_AA_MASK );
+ REG_OUTPUT( mmPA_SC_LINE_STIPPLE ); REG_OUTPUT( mmPA_SC_LINE_CNTL ); REG_OUTPUT( mmPA_SC_WINDOW_SCISSOR_TL );
+ REG_OUTPUT( mmPA_SC_WINDOW_SCISSOR_BR ); REG_OUTPUT( mmPA_SC_SCREEN_SCISSOR_TL ); REG_OUTPUT( mmPA_SC_SCREEN_SCISSOR_BR );
+ REG_OUTPUT( mmPA_SC_VIZ_QUERY ); REG_OUTPUT( mmPA_SC_VIZ_QUERY_STATUS ); REG_OUTPUT( mmPA_SC_LINE_STIPPLE_STATE );
+ REG_OUTPUT( mmPA_SC_PERFCOUNTER0_SELECT ); REG_OUTPUT( mmPA_SC_PERFCOUNTER0_LOW ); REG_OUTPUT( mmPA_SC_PERFCOUNTER0_HI );
+ REG_OUTPUT( mmPA_CL_CNTL_STATUS ); REG_OUTPUT( mmPA_SU_CNTL_STATUS ); REG_OUTPUT( mmPA_SC_CNTL_STATUS );
+ REG_OUTPUT( mmPA_SU_DEBUG_CNTL ); REG_OUTPUT( mmPA_SU_DEBUG_DATA ); REG_OUTPUT( mmPA_SC_DEBUG_CNTL );
+ REG_OUTPUT( mmPA_SC_DEBUG_DATA ); REG_OUTPUT( mmGFX_COPY_STATE ); REG_OUTPUT( mmVGT_DRAW_INITIATOR );
+ REG_OUTPUT( mmVGT_EVENT_INITIATOR ); REG_OUTPUT( mmVGT_DMA_BASE ); REG_OUTPUT( mmVGT_DMA_SIZE );
+ REG_OUTPUT( mmVGT_BIN_BASE ); REG_OUTPUT( mmVGT_BIN_SIZE ); REG_OUTPUT( mmVGT_CURRENT_BIN_ID_MIN );
+ REG_OUTPUT( mmVGT_CURRENT_BIN_ID_MAX ); REG_OUTPUT( mmVGT_IMMED_DATA ); REG_OUTPUT( mmVGT_MAX_VTX_INDX );
+ REG_OUTPUT( mmVGT_MIN_VTX_INDX ); REG_OUTPUT( mmVGT_INDX_OFFSET ); REG_OUTPUT( mmVGT_VERTEX_REUSE_BLOCK_CNTL );
+ REG_OUTPUT( mmVGT_OUT_DEALLOC_CNTL ); REG_OUTPUT( mmVGT_MULTI_PRIM_IB_RESET_INDX ); REG_OUTPUT( mmVGT_ENHANCE );
+ REG_OUTPUT( mmVGT_VTX_VECT_EJECT_REG ); REG_OUTPUT( mmVGT_LAST_COPY_STATE ); REG_OUTPUT( mmVGT_DEBUG_CNTL );
+ REG_OUTPUT( mmVGT_DEBUG_DATA ); REG_OUTPUT( mmVGT_CNTL_STATUS ); REG_OUTPUT( mmVGT_CRC_SQ_DATA );
+ REG_OUTPUT( mmVGT_CRC_SQ_CTRL ); REG_OUTPUT( mmVGT_PERFCOUNTER0_SELECT ); REG_OUTPUT( mmVGT_PERFCOUNTER1_SELECT );
+ REG_OUTPUT( mmVGT_PERFCOUNTER2_SELECT ); REG_OUTPUT( mmVGT_PERFCOUNTER3_SELECT ); REG_OUTPUT( mmVGT_PERFCOUNTER0_LOW );
+ REG_OUTPUT( mmVGT_PERFCOUNTER1_LOW ); REG_OUTPUT( mmVGT_PERFCOUNTER2_LOW ); REG_OUTPUT( mmVGT_PERFCOUNTER3_LOW );
+ REG_OUTPUT( mmVGT_PERFCOUNTER0_HI ); REG_OUTPUT( mmVGT_PERFCOUNTER1_HI ); REG_OUTPUT( mmVGT_PERFCOUNTER2_HI );
+ REG_OUTPUT( mmVGT_PERFCOUNTER3_HI ); REG_OUTPUT( mmTC_CNTL_STATUS ); REG_OUTPUT( mmTCR_CHICKEN );
+ REG_OUTPUT( mmTCF_CHICKEN ); REG_OUTPUT( mmTCM_CHICKEN ); REG_OUTPUT( mmTCR_PERFCOUNTER0_SELECT );
+ REG_OUTPUT( mmTCR_PERFCOUNTER1_SELECT ); REG_OUTPUT( mmTCR_PERFCOUNTER0_HI ); REG_OUTPUT( mmTCR_PERFCOUNTER1_HI );
+ REG_OUTPUT( mmTCR_PERFCOUNTER0_LOW ); REG_OUTPUT( mmTCR_PERFCOUNTER1_LOW ); REG_OUTPUT( mmTP_TC_CLKGATE_CNTL );
+ REG_OUTPUT( mmTPC_CNTL_STATUS ); REG_OUTPUT( mmTPC_DEBUG0 ); REG_OUTPUT( mmTPC_DEBUG1 );
+ REG_OUTPUT( mmTPC_CHICKEN ); REG_OUTPUT( mmTP0_CNTL_STATUS ); REG_OUTPUT( mmTP0_DEBUG );
+ REG_OUTPUT( mmTP0_CHICKEN ); REG_OUTPUT( mmTP0_PERFCOUNTER0_SELECT ); REG_OUTPUT( mmTP0_PERFCOUNTER0_HI );
+ REG_OUTPUT( mmTP0_PERFCOUNTER0_LOW ); REG_OUTPUT( mmTP0_PERFCOUNTER1_SELECT ); REG_OUTPUT( mmTP0_PERFCOUNTER1_HI );
+ REG_OUTPUT( mmTP0_PERFCOUNTER1_LOW ); REG_OUTPUT( mmTCM_PERFCOUNTER0_SELECT ); REG_OUTPUT( mmTCM_PERFCOUNTER1_SELECT );
+ REG_OUTPUT( mmTCM_PERFCOUNTER0_HI ); REG_OUTPUT( mmTCM_PERFCOUNTER1_HI ); REG_OUTPUT( mmTCM_PERFCOUNTER0_LOW );
+ REG_OUTPUT( mmTCM_PERFCOUNTER1_LOW ); REG_OUTPUT( mmTCF_PERFCOUNTER0_SELECT ); REG_OUTPUT( mmTCF_PERFCOUNTER1_SELECT );
+ REG_OUTPUT( mmTCF_PERFCOUNTER2_SELECT ); REG_OUTPUT( mmTCF_PERFCOUNTER3_SELECT ); REG_OUTPUT( mmTCF_PERFCOUNTER4_SELECT );
+ REG_OUTPUT( mmTCF_PERFCOUNTER5_SELECT ); REG_OUTPUT( mmTCF_PERFCOUNTER6_SELECT ); REG_OUTPUT( mmTCF_PERFCOUNTER7_SELECT );
+ REG_OUTPUT( mmTCF_PERFCOUNTER8_SELECT ); REG_OUTPUT( mmTCF_PERFCOUNTER9_SELECT ); REG_OUTPUT( mmTCF_PERFCOUNTER10_SELECT );
+ REG_OUTPUT( mmTCF_PERFCOUNTER11_SELECT ); REG_OUTPUT( mmTCF_PERFCOUNTER0_HI ); REG_OUTPUT( mmTCF_PERFCOUNTER1_HI );
+ REG_OUTPUT( mmTCF_PERFCOUNTER2_HI ); REG_OUTPUT( mmTCF_PERFCOUNTER3_HI ); REG_OUTPUT( mmTCF_PERFCOUNTER4_HI );
+ REG_OUTPUT( mmTCF_PERFCOUNTER5_HI ); REG_OUTPUT( mmTCF_PERFCOUNTER6_HI ); REG_OUTPUT( mmTCF_PERFCOUNTER7_HI );
+ REG_OUTPUT( mmTCF_PERFCOUNTER8_HI ); REG_OUTPUT( mmTCF_PERFCOUNTER9_HI ); REG_OUTPUT( mmTCF_PERFCOUNTER10_HI );
+ REG_OUTPUT( mmTCF_PERFCOUNTER11_HI ); REG_OUTPUT( mmTCF_PERFCOUNTER0_LOW ); REG_OUTPUT( mmTCF_PERFCOUNTER1_LOW );
+ REG_OUTPUT( mmTCF_PERFCOUNTER2_LOW ); REG_OUTPUT( mmTCF_PERFCOUNTER3_LOW ); REG_OUTPUT( mmTCF_PERFCOUNTER4_LOW );
+ REG_OUTPUT( mmTCF_PERFCOUNTER5_LOW ); REG_OUTPUT( mmTCF_PERFCOUNTER6_LOW ); REG_OUTPUT( mmTCF_PERFCOUNTER7_LOW );
+ REG_OUTPUT( mmTCF_PERFCOUNTER8_LOW ); REG_OUTPUT( mmTCF_PERFCOUNTER9_LOW ); REG_OUTPUT( mmTCF_PERFCOUNTER10_LOW );
+ REG_OUTPUT( mmTCF_PERFCOUNTER11_LOW ); REG_OUTPUT( mmTCF_DEBUG ); REG_OUTPUT( mmTCA_FIFO_DEBUG );
+ REG_OUTPUT( mmTCA_PROBE_DEBUG ); REG_OUTPUT( mmTCA_TPC_DEBUG ); REG_OUTPUT( mmTCB_CORE_DEBUG );
+ REG_OUTPUT( mmTCB_TAG0_DEBUG ); REG_OUTPUT( mmTCB_TAG1_DEBUG ); REG_OUTPUT( mmTCB_TAG2_DEBUG );
+ REG_OUTPUT( mmTCB_TAG3_DEBUG ); REG_OUTPUT( mmTCB_FETCH_GEN_SECTOR_WALKER0_DEBUG ); REG_OUTPUT( mmTCB_FETCH_GEN_WALKER_DEBUG );
+ REG_OUTPUT( mmTCB_FETCH_GEN_PIPE0_DEBUG ); REG_OUTPUT( mmTCD_INPUT0_DEBUG ); REG_OUTPUT( mmTCD_DEGAMMA_DEBUG );
+ REG_OUTPUT( mmTCD_DXTMUX_SCTARB_DEBUG ); REG_OUTPUT( mmTCD_DXTC_ARB_DEBUG ); REG_OUTPUT( mmTCD_STALLS_DEBUG );
+ REG_OUTPUT( mmTCO_STALLS_DEBUG ); REG_OUTPUT( mmTCO_QUAD0_DEBUG0 ); REG_OUTPUT( mmTCO_QUAD0_DEBUG1 );
+ REG_OUTPUT( mmSQ_GPR_MANAGEMENT ); REG_OUTPUT( mmSQ_FLOW_CONTROL ); REG_OUTPUT( mmSQ_INST_STORE_MANAGMENT );
+ REG_OUTPUT( mmSQ_RESOURCE_MANAGMENT ); REG_OUTPUT( mmSQ_EO_RT ); REG_OUTPUT( mmSQ_DEBUG_MISC );
+ REG_OUTPUT( mmSQ_ACTIVITY_METER_CNTL ); REG_OUTPUT( mmSQ_ACTIVITY_METER_STATUS ); REG_OUTPUT( mmSQ_INPUT_ARB_PRIORITY );
+ REG_OUTPUT( mmSQ_THREAD_ARB_PRIORITY ); REG_OUTPUT( mmSQ_VS_WATCHDOG_TIMER ); REG_OUTPUT( mmSQ_PS_WATCHDOG_TIMER );
+ REG_OUTPUT( mmSQ_INT_CNTL ); REG_OUTPUT( mmSQ_INT_STATUS ); REG_OUTPUT( mmSQ_INT_ACK );
+ REG_OUTPUT( mmSQ_DEBUG_INPUT_FSM ); REG_OUTPUT( mmSQ_DEBUG_CONST_MGR_FSM ); REG_OUTPUT( mmSQ_DEBUG_TP_FSM );
+ REG_OUTPUT( mmSQ_DEBUG_FSM_ALU_0 ); REG_OUTPUT( mmSQ_DEBUG_FSM_ALU_1 ); REG_OUTPUT( mmSQ_DEBUG_EXP_ALLOC );
+ REG_OUTPUT( mmSQ_DEBUG_PTR_BUFF ); REG_OUTPUT( mmSQ_DEBUG_GPR_VTX ); REG_OUTPUT( mmSQ_DEBUG_GPR_PIX );
+ REG_OUTPUT( mmSQ_DEBUG_TB_STATUS_SEL ); REG_OUTPUT( mmSQ_DEBUG_VTX_TB_0 ); REG_OUTPUT( mmSQ_DEBUG_VTX_TB_1 );
+ REG_OUTPUT( mmSQ_DEBUG_VTX_TB_STATUS_REG ); REG_OUTPUT( mmSQ_DEBUG_VTX_TB_STATE_MEM ); REG_OUTPUT( mmSQ_DEBUG_PIX_TB_0 );
+ REG_OUTPUT( mmSQ_DEBUG_PIX_TB_STATUS_REG_0 ); REG_OUTPUT( mmSQ_DEBUG_PIX_TB_STATUS_REG_1 ); REG_OUTPUT( mmSQ_DEBUG_PIX_TB_STATUS_REG_2 );
+ REG_OUTPUT( mmSQ_DEBUG_PIX_TB_STATUS_REG_3 ); REG_OUTPUT( mmSQ_DEBUG_PIX_TB_STATE_MEM ); REG_OUTPUT( mmSQ_PERFCOUNTER0_SELECT );
+ REG_OUTPUT( mmSQ_PERFCOUNTER1_SELECT ); REG_OUTPUT( mmSQ_PERFCOUNTER2_SELECT ); REG_OUTPUT( mmSQ_PERFCOUNTER3_SELECT );
+ REG_OUTPUT( mmSQ_PERFCOUNTER0_LOW ); REG_OUTPUT( mmSQ_PERFCOUNTER0_HI ); REG_OUTPUT( mmSQ_PERFCOUNTER1_LOW );
+ REG_OUTPUT( mmSQ_PERFCOUNTER1_HI ); REG_OUTPUT( mmSQ_PERFCOUNTER2_LOW ); REG_OUTPUT( mmSQ_PERFCOUNTER2_HI );
+ REG_OUTPUT( mmSQ_PERFCOUNTER3_LOW ); REG_OUTPUT( mmSQ_PERFCOUNTER3_HI ); REG_OUTPUT( mmSX_PERFCOUNTER0_SELECT );
+ REG_OUTPUT( mmSX_PERFCOUNTER0_LOW ); REG_OUTPUT( mmSX_PERFCOUNTER0_HI ); REG_OUTPUT( mmSQ_INSTRUCTION_ALU_0 );
+ REG_OUTPUT( mmSQ_INSTRUCTION_ALU_1 ); REG_OUTPUT( mmSQ_INSTRUCTION_ALU_2 ); REG_OUTPUT( mmSQ_INSTRUCTION_CF_EXEC_0 );
+ REG_OUTPUT( mmSQ_INSTRUCTION_CF_EXEC_1 ); REG_OUTPUT( mmSQ_INSTRUCTION_CF_EXEC_2 ); REG_OUTPUT( mmSQ_INSTRUCTION_CF_LOOP_0 );
+ REG_OUTPUT( mmSQ_INSTRUCTION_CF_LOOP_1 ); REG_OUTPUT( mmSQ_INSTRUCTION_CF_LOOP_2 ); REG_OUTPUT( mmSQ_INSTRUCTION_CF_JMP_CALL_0 );
+ REG_OUTPUT( mmSQ_INSTRUCTION_CF_JMP_CALL_1 ); REG_OUTPUT( mmSQ_INSTRUCTION_CF_JMP_CALL_2 ); REG_OUTPUT( mmSQ_INSTRUCTION_CF_ALLOC_0 );
+ REG_OUTPUT( mmSQ_INSTRUCTION_CF_ALLOC_1 ); REG_OUTPUT( mmSQ_INSTRUCTION_CF_ALLOC_2 ); REG_OUTPUT( mmSQ_INSTRUCTION_TFETCH_0 );
+ REG_OUTPUT( mmSQ_INSTRUCTION_TFETCH_1 ); REG_OUTPUT( mmSQ_INSTRUCTION_TFETCH_2 ); REG_OUTPUT( mmSQ_INSTRUCTION_VFETCH_0 );
+ REG_OUTPUT( mmSQ_INSTRUCTION_VFETCH_1 ); REG_OUTPUT( mmSQ_INSTRUCTION_VFETCH_2 ); REG_OUTPUT( mmSQ_CONSTANT_0 );
+ REG_OUTPUT( mmSQ_CONSTANT_1 ); REG_OUTPUT( mmSQ_CONSTANT_2 ); REG_OUTPUT( mmSQ_CONSTANT_3 );
+ REG_OUTPUT( mmSQ_FETCH_0 ); REG_OUTPUT( mmSQ_FETCH_1 ); REG_OUTPUT( mmSQ_FETCH_2 );
+ REG_OUTPUT( mmSQ_FETCH_3 ); REG_OUTPUT( mmSQ_FETCH_4 ); REG_OUTPUT( mmSQ_FETCH_5 );
+ REG_OUTPUT( mmSQ_CONSTANT_VFETCH_0 ); REG_OUTPUT( mmSQ_CONSTANT_VFETCH_1 ); REG_OUTPUT( mmSQ_CONSTANT_T2 );
+ REG_OUTPUT( mmSQ_CONSTANT_T3 ); REG_OUTPUT( mmSQ_CF_BOOLEANS ); REG_OUTPUT( mmSQ_CF_LOOP );
+ REG_OUTPUT( mmSQ_CONSTANT_RT_0 ); REG_OUTPUT( mmSQ_CONSTANT_RT_1 ); REG_OUTPUT( mmSQ_CONSTANT_RT_2 );
+ REG_OUTPUT( mmSQ_CONSTANT_RT_3 ); REG_OUTPUT( mmSQ_FETCH_RT_0 ); REG_OUTPUT( mmSQ_FETCH_RT_1 );
+ REG_OUTPUT( mmSQ_FETCH_RT_2 ); REG_OUTPUT( mmSQ_FETCH_RT_3 ); REG_OUTPUT( mmSQ_FETCH_RT_4 );
+ REG_OUTPUT( mmSQ_FETCH_RT_5 ); REG_OUTPUT( mmSQ_CF_RT_BOOLEANS ); REG_OUTPUT( mmSQ_CF_RT_LOOP );
+ REG_OUTPUT( mmSQ_VS_PROGRAM ); REG_OUTPUT( mmSQ_PS_PROGRAM ); REG_OUTPUT( mmSQ_CF_PROGRAM_SIZE );
+ REG_OUTPUT( mmSQ_INTERPOLATOR_CNTL ); REG_OUTPUT( mmSQ_PROGRAM_CNTL ); REG_OUTPUT( mmSQ_WRAPPING_0 );
+ REG_OUTPUT( mmSQ_WRAPPING_1 ); REG_OUTPUT( mmSQ_VS_CONST ); REG_OUTPUT( mmSQ_PS_CONST );
+ REG_OUTPUT( mmSQ_CONTEXT_MISC ); REG_OUTPUT( mmSQ_CF_RD_BASE ); REG_OUTPUT( mmSQ_DEBUG_MISC_0 );
+ REG_OUTPUT( mmSQ_DEBUG_MISC_1 ); REG_OUTPUT( mmMH_ARBITER_CONFIG ); REG_OUTPUT( mmMH_CLNT_AXI_ID_REUSE );
+ REG_OUTPUT( mmMH_INTERRUPT_MASK ); REG_OUTPUT( mmMH_INTERRUPT_STATUS ); REG_OUTPUT( mmMH_INTERRUPT_CLEAR );
+ REG_OUTPUT( mmMH_AXI_ERROR ); REG_OUTPUT( mmMH_PERFCOUNTER0_SELECT ); REG_OUTPUT( mmMH_PERFCOUNTER1_SELECT );
+ REG_OUTPUT( mmMH_PERFCOUNTER0_CONFIG ); REG_OUTPUT( mmMH_PERFCOUNTER1_CONFIG ); REG_OUTPUT( mmMH_PERFCOUNTER0_LOW );
+ REG_OUTPUT( mmMH_PERFCOUNTER1_LOW ); REG_OUTPUT( mmMH_PERFCOUNTER0_HI ); REG_OUTPUT( mmMH_PERFCOUNTER1_HI );
+ REG_OUTPUT( mmMH_DEBUG_CTRL ); REG_OUTPUT( mmMH_DEBUG_DATA ); REG_OUTPUT( mmMH_AXI_HALT_CONTROL );
+ REG_OUTPUT( mmMH_MMU_CONFIG ); REG_OUTPUT( mmMH_MMU_VA_RANGE ); REG_OUTPUT( mmMH_MMU_PT_BASE );
+ REG_OUTPUT( mmMH_MMU_PAGE_FAULT ); REG_OUTPUT( mmMH_MMU_TRAN_ERROR ); REG_OUTPUT( mmMH_MMU_INVALIDATE );
+ REG_OUTPUT( mmMH_MMU_MPU_BASE ); REG_OUTPUT( mmMH_MMU_MPU_END ); REG_OUTPUT( mmWAIT_UNTIL );
+ REG_OUTPUT( mmRBBM_ISYNC_CNTL ); REG_OUTPUT( mmRBBM_STATUS ); REG_OUTPUT( mmRBBM_DSPLY );
+ REG_OUTPUT( mmRBBM_RENDER_LATEST ); REG_OUTPUT( mmRBBM_RTL_RELEASE ); REG_OUTPUT( mmRBBM_PATCH_RELEASE );
+ REG_OUTPUT( mmRBBM_AUXILIARY_CONFIG ); REG_OUTPUT( mmRBBM_PERIPHID0 ); REG_OUTPUT( mmRBBM_PERIPHID1 );
+ REG_OUTPUT( mmRBBM_PERIPHID2 ); REG_OUTPUT( mmRBBM_PERIPHID3 ); REG_OUTPUT( mmRBBM_CNTL );
+ REG_OUTPUT( mmRBBM_SKEW_CNTL ); REG_OUTPUT( mmRBBM_SOFT_RESET ); REG_OUTPUT( mmRBBM_PM_OVERRIDE1 );
+ REG_OUTPUT( mmRBBM_PM_OVERRIDE2 ); REG_OUTPUT( mmGC_SYS_IDLE ); REG_OUTPUT( mmNQWAIT_UNTIL );
+ REG_OUTPUT( mmRBBM_DEBUG_OUT ); REG_OUTPUT( mmRBBM_DEBUG_CNTL ); REG_OUTPUT( mmRBBM_DEBUG );
+ REG_OUTPUT( mmRBBM_READ_ERROR ); REG_OUTPUT( mmRBBM_WAIT_IDLE_CLOCKS ); REG_OUTPUT( mmRBBM_INT_CNTL );
+ REG_OUTPUT( mmRBBM_INT_STATUS ); REG_OUTPUT( mmRBBM_INT_ACK ); REG_OUTPUT( mmMASTER_INT_SIGNAL );
+ REG_OUTPUT( mmRBBM_PERFCOUNTER1_SELECT ); REG_OUTPUT( mmRBBM_PERFCOUNTER1_LO ); REG_OUTPUT( mmRBBM_PERFCOUNTER1_HI );
+ REG_OUTPUT( mmCP_RB_BASE ); REG_OUTPUT( mmCP_RB_CNTL ); REG_OUTPUT( mmCP_RB_RPTR_ADDR );
+ REG_OUTPUT( mmCP_RB_RPTR ); REG_OUTPUT( mmCP_RB_RPTR_WR ); REG_OUTPUT( mmCP_RB_WPTR );
+ REG_OUTPUT( mmCP_RB_WPTR_DELAY ); REG_OUTPUT( mmCP_RB_WPTR_BASE ); REG_OUTPUT( mmCP_IB1_BASE );
+ REG_OUTPUT( mmCP_IB1_BUFSZ ); REG_OUTPUT( mmCP_IB2_BASE ); REG_OUTPUT( mmCP_IB2_BUFSZ );
+ REG_OUTPUT( mmCP_ST_BASE ); REG_OUTPUT( mmCP_ST_BUFSZ ); REG_OUTPUT( mmCP_QUEUE_THRESHOLDS );
+ REG_OUTPUT( mmCP_MEQ_THRESHOLDS ); REG_OUTPUT( mmCP_CSQ_AVAIL ); REG_OUTPUT( mmCP_STQ_AVAIL );
+ REG_OUTPUT( mmCP_MEQ_AVAIL ); REG_OUTPUT( mmCP_CSQ_RB_STAT ); REG_OUTPUT( mmCP_CSQ_IB1_STAT );
+ REG_OUTPUT( mmCP_CSQ_IB2_STAT ); REG_OUTPUT( mmCP_NON_PREFETCH_CNTRS ); REG_OUTPUT( mmCP_STQ_ST_STAT );
+ REG_OUTPUT( mmCP_MEQ_STAT ); REG_OUTPUT( mmCP_MIU_TAG_STAT ); REG_OUTPUT( mmCP_CMD_INDEX );
+ REG_OUTPUT( mmCP_CMD_DATA ); REG_OUTPUT( mmCP_ME_CNTL ); REG_OUTPUT( mmCP_ME_STATUS );
+ REG_OUTPUT( mmCP_ME_RAM_WADDR ); REG_OUTPUT( mmCP_ME_RAM_RADDR ); REG_OUTPUT( mmCP_ME_RAM_DATA );
+ REG_OUTPUT( mmCP_ME_RDADDR ); REG_OUTPUT( mmCP_DEBUG ); REG_OUTPUT( mmSCRATCH_REG0 );
+ REG_OUTPUT( mmSCRATCH_REG1 ); REG_OUTPUT( mmSCRATCH_REG2 ); REG_OUTPUT( mmSCRATCH_REG3 );
+ REG_OUTPUT( mmSCRATCH_REG4 ); REG_OUTPUT( mmSCRATCH_REG5 ); REG_OUTPUT( mmSCRATCH_REG6 );
+ REG_OUTPUT( mmSCRATCH_REG7 );
+ REG_OUTPUT( mmSCRATCH_UMSK ); REG_OUTPUT( mmSCRATCH_ADDR ); REG_OUTPUT( mmCP_ME_VS_EVENT_SRC );
+ REG_OUTPUT( mmCP_ME_VS_EVENT_ADDR ); REG_OUTPUT( mmCP_ME_VS_EVENT_DATA ); REG_OUTPUT( mmCP_ME_VS_EVENT_ADDR_SWM );
+ REG_OUTPUT( mmCP_ME_VS_EVENT_DATA_SWM ); REG_OUTPUT( mmCP_ME_PS_EVENT_SRC ); REG_OUTPUT( mmCP_ME_PS_EVENT_ADDR );
+ REG_OUTPUT( mmCP_ME_PS_EVENT_DATA ); REG_OUTPUT( mmCP_ME_PS_EVENT_ADDR_SWM ); REG_OUTPUT( mmCP_ME_PS_EVENT_DATA_SWM );
+ REG_OUTPUT( mmCP_ME_CF_EVENT_SRC ); REG_OUTPUT( mmCP_ME_CF_EVENT_ADDR ); REG_OUTPUT( mmCP_ME_CF_EVENT_DATA );
+ REG_OUTPUT( mmCP_ME_NRT_ADDR ); REG_OUTPUT( mmCP_ME_NRT_DATA ); REG_OUTPUT( mmCP_ME_VS_FETCH_DONE_SRC );
+ REG_OUTPUT( mmCP_ME_VS_FETCH_DONE_ADDR ); REG_OUTPUT( mmCP_ME_VS_FETCH_DONE_DATA ); REG_OUTPUT( mmCP_INT_CNTL );
+ REG_OUTPUT( mmCP_INT_STATUS ); REG_OUTPUT( mmCP_INT_ACK ); REG_OUTPUT( mmCP_PFP_UCODE_ADDR );
+ REG_OUTPUT( mmCP_PFP_UCODE_DATA ); REG_OUTPUT( mmCP_PERFMON_CNTL ); REG_OUTPUT( mmCP_PERFCOUNTER_SELECT );
+ REG_OUTPUT( mmCP_PERFCOUNTER_LO ); REG_OUTPUT( mmCP_PERFCOUNTER_HI ); REG_OUTPUT( mmCP_BIN_MASK_LO );
+ REG_OUTPUT( mmCP_BIN_MASK_HI ); REG_OUTPUT( mmCP_BIN_SELECT_LO ); REG_OUTPUT( mmCP_BIN_SELECT_HI );
+ REG_OUTPUT( mmCP_NV_FLAGS_0 ); REG_OUTPUT( mmCP_NV_FLAGS_1 ); REG_OUTPUT( mmCP_NV_FLAGS_2 );
+ REG_OUTPUT( mmCP_NV_FLAGS_3 ); REG_OUTPUT( mmCP_STATE_DEBUG_INDEX ); REG_OUTPUT( mmCP_STATE_DEBUG_DATA );
+ REG_OUTPUT( mmCP_PROG_COUNTER ); REG_OUTPUT( mmCP_STAT ); REG_OUTPUT( mmBIOS_0_SCRATCH );
+ REG_OUTPUT( mmBIOS_1_SCRATCH ); REG_OUTPUT( mmBIOS_2_SCRATCH ); REG_OUTPUT( mmBIOS_3_SCRATCH );
+ REG_OUTPUT( mmBIOS_4_SCRATCH ); REG_OUTPUT( mmBIOS_5_SCRATCH ); REG_OUTPUT( mmBIOS_6_SCRATCH );
+ REG_OUTPUT( mmBIOS_7_SCRATCH ); REG_OUTPUT( mmBIOS_8_SCRATCH ); REG_OUTPUT( mmBIOS_9_SCRATCH );
+ REG_OUTPUT( mmBIOS_10_SCRATCH ); REG_OUTPUT( mmBIOS_11_SCRATCH ); REG_OUTPUT( mmBIOS_12_SCRATCH );
+ REG_OUTPUT( mmBIOS_13_SCRATCH ); REG_OUTPUT( mmBIOS_14_SCRATCH ); REG_OUTPUT( mmBIOS_15_SCRATCH );
+ REG_OUTPUT( mmCOHER_SIZE_PM4 ); REG_OUTPUT( mmCOHER_BASE_PM4 ); REG_OUTPUT( mmCOHER_STATUS_PM4 );
+ REG_OUTPUT( mmCOHER_SIZE_HOST ); REG_OUTPUT( mmCOHER_BASE_HOST ); REG_OUTPUT( mmCOHER_STATUS_HOST );
+ REG_OUTPUT( mmCOHER_DEST_BASE_0 ); REG_OUTPUT( mmCOHER_DEST_BASE_1 ); REG_OUTPUT( mmCOHER_DEST_BASE_2 );
+ REG_OUTPUT( mmCOHER_DEST_BASE_3 ); REG_OUTPUT( mmCOHER_DEST_BASE_4 ); REG_OUTPUT( mmCOHER_DEST_BASE_5 );
+ REG_OUTPUT( mmCOHER_DEST_BASE_6 ); REG_OUTPUT( mmCOHER_DEST_BASE_7 ); REG_OUTPUT( mmRB_SURFACE_INFO );
+ REG_OUTPUT( mmRB_COLOR_INFO ); REG_OUTPUT( mmRB_DEPTH_INFO ); REG_OUTPUT( mmRB_STENCILREFMASK );
+ REG_OUTPUT( mmRB_ALPHA_REF ); REG_OUTPUT( mmRB_COLOR_MASK ); REG_OUTPUT( mmRB_BLEND_RED );
+ REG_OUTPUT( mmRB_BLEND_GREEN ); REG_OUTPUT( mmRB_BLEND_BLUE ); REG_OUTPUT( mmRB_BLEND_ALPHA );
+ REG_OUTPUT( mmRB_FOG_COLOR ); REG_OUTPUT( mmRB_STENCILREFMASK_BF ); REG_OUTPUT( mmRB_DEPTHCONTROL );
+ REG_OUTPUT( mmRB_BLENDCONTROL ); REG_OUTPUT( mmRB_COLORCONTROL ); REG_OUTPUT( mmRB_MODECONTROL );
+ REG_OUTPUT( mmRB_COLOR_DEST_MASK ); REG_OUTPUT( mmRB_COPY_CONTROL ); REG_OUTPUT( mmRB_COPY_DEST_BASE );
+ REG_OUTPUT( mmRB_COPY_DEST_PITCH ); REG_OUTPUT( mmRB_COPY_DEST_INFO ); REG_OUTPUT( mmRB_COPY_DEST_PIXEL_OFFSET );
+ REG_OUTPUT( mmRB_DEPTH_CLEAR ); REG_OUTPUT( mmRB_SAMPLE_COUNT_CTL ); REG_OUTPUT( mmRB_SAMPLE_COUNT_ADDR );
+ REG_OUTPUT( mmRB_BC_CONTROL ); REG_OUTPUT( mmRB_EDRAM_INFO ); REG_OUTPUT( mmRB_CRC_RD_PORT );
+ REG_OUTPUT( mmRB_CRC_CONTROL ); REG_OUTPUT( mmRB_CRC_MASK ); REG_OUTPUT( mmRB_PERFCOUNTER0_SELECT );
+ REG_OUTPUT( mmRB_PERFCOUNTER0_LOW ); REG_OUTPUT( mmRB_PERFCOUNTER0_HI ); REG_OUTPUT( mmRB_TOTAL_SAMPLES );
+ REG_OUTPUT( mmRB_ZPASS_SAMPLES ); REG_OUTPUT( mmRB_ZFAIL_SAMPLES ); REG_OUTPUT( mmRB_SFAIL_SAMPLES );
+ REG_OUTPUT( mmRB_DEBUG_0 ); REG_OUTPUT( mmRB_DEBUG_1 ); REG_OUTPUT( mmRB_DEBUG_2 );
+ REG_OUTPUT( mmRB_DEBUG_3 ); REG_OUTPUT( mmRB_DEBUG_4 ); REG_OUTPUT( mmRB_FLAG_CONTROL );
+ REG_OUTPUT( mmRB_BC_SPARES ); REG_OUTPUT( mmBC_DUMMY_CRAYRB_ENUMS ); REG_OUTPUT( mmBC_DUMMY_CRAYRB_MOREENUMS );
+
+ default:
+ b += sprintf( b, "%s", "UNKNOWN REGISTER OFFSET" );
+ break;
+ }
+ // Handle string after %R
+ kos_memcpy( b, p1+2, p2-(p1+2) );
+ b += (unsigned int)p2-(unsigned int)(p1+2);
+ *b = '\0';
+ }
+ break;
+
+
+ default:
+ {
+ int val = va_arg( arguments, int );
+ // Standard format. Use vsprintf.
+ b += sprintf( b, buffer2, val );
+ }
+ break;
+ }
+
+
+ c = p2;
+ }
+
+ // Add this string to all outputs
+ output = outputs;
+
+ while( output != NULL )
+ {
+ // Filter according to the flags
+ if( ( output->flags & log_flags ) == log_flags )
+ {
+ // Passed the filter. Now commit this message.
+ switch( output->type )
+ {
+ case KGSL_OUTPUT_TYPE_MEMBUF:
+ // TODO
+ break;
+
+ case KGSL_OUTPUT_TYPE_STDOUT:
+ // Write timestamp if enabled
+ if( output->flags & KGSL_LOG_TIMESTAMP )
+ printf( "[Timestamp: %d] ", kos_timestamp() );
+ // Write process id if enabled
+ if( output->flags & KGSL_LOG_PROCESS_ID )
+ printf( "[Process ID: %d] ", kos_process_getid() );
+ // Write thread id if enabled
+ if( output->flags & KGSL_LOG_THREAD_ID )
+ printf( "[Thread ID: %d] ", kos_thread_getid() );
+
+ // Write the message
+ printf( buffer );
+ break;
+
+ case KGSL_OUTPUT_TYPE_FILE:
+ // Write timestamp if enabled
+ if( output->flags & KGSL_LOG_TIMESTAMP )
+ kos_fprintf( output->file, "[Timestamp: %d] ", kos_timestamp() );
+ // Write process id if enabled
+ if( output->flags & KGSL_LOG_PROCESS_ID )
+ kos_fprintf( output->file, "[Process ID: %d] ", kos_process_getid() );
+ // Write thread id if enabled
+ if( output->flags & KGSL_LOG_THREAD_ID )
+ kos_fprintf( output->file, "[Thread ID: %d] ", kos_thread_getid() );
+
+ // Write the message
+ kos_fprintf( output->file, buffer );
+ break;
+ }
+ }
+
+ output = output->next;
+ }
+
+ va_end( arguments );
+
+ kos_mutex_unlock( log_mutex );
+
+ return GSL_SUCCESS;
+}
+
+//----------------------------------------------------------------------------
+#endif
diff --git a/drivers/mxc/amd-gpu/common/gsl_memmgr.c b/drivers/mxc/amd-gpu/common/gsl_memmgr.c
new file mode 100644
index 000000000000..75f250ae59b1
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/gsl_memmgr.c
@@ -0,0 +1,949 @@
+/* Copyright (c) 2008-2010, Advanced Micro Devices. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "gsl.h"
+#include "gsl_hal.h"
+
+//////////////////////////////////////////////////////////////////////////////
+// defines
+//////////////////////////////////////////////////////////////////////////////
+#define GSL_MEMARENAPRIV_SIGNATURE_MASK 0x0000FFFF
+#define GSL_MEMARENAPRIV_APERTUREID_MASK 0xF0000000
+#define GSL_MEMARENAPRIV_MMUVIRTUALIZED_MASK 0x0F000000
+
+#define GSL_MEMARENAPRIV_SIGNATURE_SHIFT 0
+#define GSL_MEMARENAPRIV_MMUVIRTUALIZED_SHIFT 24
+#define GSL_MEMARENAPRIV_APERTUREID_SHIFT 28
+
+#define GSL_MEMARENA_INSTANCE_SIGNATURE 0x0000CAFE
+
+#ifdef GSL_STATS_MEM
+#define GSL_MEMARENA_STATS(x) x
+#else
+#define GSL_MEMARENA_STATS(x)
+#endif // GSL_STATS_MEM
+
+
+/////////////////////////////////////////////////////////////////////////////
+// macros
+//////////////////////////////////////////////////////////////////////////////
+#define GSL_MEMARENA_LOCK() kos_mutex_lock(memarena->mutex)
+#define GSL_MEMARENA_UNLOCK() kos_mutex_unlock(memarena->mutex)
+
+#define GSL_MEMARENA_SET_SIGNATURE (memarena->priv |= ((GSL_MEMARENA_INSTANCE_SIGNATURE << GSL_MEMARENAPRIV_SIGNATURE_SHIFT) & GSL_MEMARENAPRIV_SIGNATURE_MASK))
+#define GSL_MEMARENA_SET_MMU_VIRTUALIZED (memarena->priv |= ((mmu_virtualized << GSL_MEMARENAPRIV_MMUVIRTUALIZED_SHIFT) & GSL_MEMARENAPRIV_MMUVIRTUALIZED_MASK))
+#define GSL_MEMARENA_SET_ID (memarena->priv |= ((aperture_id << GSL_MEMARENAPRIV_APERTUREID_SHIFT) & GSL_MEMARENAPRIV_APERTUREID_MASK))
+
+#define GSL_MEMARENA_GET_SIGNATURE ((memarena->priv & GSL_MEMARENAPRIV_SIGNATURE_MASK) >> GSL_MEMARENAPRIV_SIGNATURE_SHIFT)
+#define GSL_MEMARENA_IS_MMU_VIRTUALIZED ((memarena->priv & GSL_MEMARENAPRIV_MMUVIRTUALIZED_MASK) >> GSL_MEMARENAPRIV_MMUVIRTUALIZED_SHIFT)
+#define GSL_MEMARENA_GET_ID ((memarena->priv & GSL_MEMARENAPRIV_APERTUREID_MASK) >> GSL_MEMARENAPRIV_APERTUREID_SHIFT)
+
+
+//////////////////////////////////////////////////////////////////////////////
+// validate
+//////////////////////////////////////////////////////////////////////////////
+#define GSL_MEMARENA_VALIDATE(memarena) \
+ KOS_ASSERT(memarena); \
+ if (GSL_MEMARENA_GET_SIGNATURE != GSL_MEMARENA_INSTANCE_SIGNATURE) \
+ { \
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, \
+ "ERROR: Memarena validation failed.\n" ); \
+ return (GSL_FAILURE); \
+ }
+
+//////////////////////////////////////////////////////////////////////////////
+// block alignment shift count
+//////////////////////////////////////////////////////////////////////////////
+OSINLINE unsigned int
+gsl_memarena_alignmentshift(gsl_flags_t flags)
+{
+ int alignshift = ((flags & GSL_MEMFLAGS_ALIGN_MASK) >> GSL_MEMFLAGS_ALIGN_SHIFT);
+ if (alignshift == 0)
+ alignshift = 5; // 32 bytes is the minimum alignment boundary
+ return (alignshift);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// address alignment
+//////////////////////////////////////////////////////////////////////////////
+OSINLINE unsigned int
+gsl_memarena_alignaddr(unsigned int address, int shift)
+{
+ //
+ // the value of the returned address is guaranteed to be an even multiple
+ // of the block alignment shift specified.
+ //
+ unsigned int alignedbaseaddr = ((address) >> shift) << shift;
+ if (alignedbaseaddr < address)
+ {
+ alignedbaseaddr += (1 << shift);
+ }
+ return (alignedbaseaddr);
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// memory management API
+//////////////////////////////////////////////////////////////////////////////
+
+OSINLINE memblk_t*
+kgsl_memarena_getmemblknode(gsl_memarena_t *memarena)
+{
+#ifdef GSL_MEMARENA_NODE_POOL_ENABLED
+ gsl_nodepool_t *nodepool = memarena->nodepool;
+ memblk_t *memblk = NULL;
+ int allocnewpool = 1;
+ int i;
+
+ if (nodepool)
+ {
+ // walk through list of existing pools
+ for ( ; ; )
+ {
+ // if there is a pool with a free memblk node
+ if (nodepool->priv != (1 << GSL_MEMARENA_NODE_POOL_MAX)-1)
+ {
+ // get index of the first free memblk node
+ for (i = 0; i < GSL_MEMARENA_NODE_POOL_MAX; i++)
+ {
+ if (((nodepool->priv >> i) & 0x1) == 0)
+ {
+ break;
+ }
+ }
+
+ // mark memblk node as used
+ nodepool->priv |= 1 << i;
+
+ memblk = &nodepool->memblk[i];
+ memblk->nodepoolindex = i;
+ memblk->blkaddr = 0;
+ memblk->blksize = 0;
+
+ allocnewpool = 0;
+
+ break;
+ }
+ else
+ {
+ nodepool = nodepool->next;
+
+ if (nodepool == memarena->nodepool)
+ {
+ // no free memblk node found
+ break;
+ }
+ }
+ }
+ }
+
+ // if no existing pool has a free memblk node
+ if (allocnewpool)
+ {
+ // alloc new pool of memblk nodes
+ nodepool = ((gsl_nodepool_t *)kos_malloc(sizeof(gsl_nodepool_t)));
+ if (nodepool)
+ {
+ kos_memset(nodepool, 0, sizeof(gsl_nodepool_t));
+
+ if (memarena->nodepool)
+ {
+ nodepool->next = memarena->nodepool->next;
+ nodepool->prev = memarena->nodepool;
+ memarena->nodepool->next->prev = nodepool;
+ memarena->nodepool->next = nodepool;
+ }
+ else
+ {
+ nodepool->next = nodepool;
+ nodepool->prev = nodepool;
+ }
+
+ // reposition pool head
+ memarena->nodepool = nodepool;
+
+ // mark memblk node as used
+ nodepool->priv |= 0x1;
+
+ memblk = &nodepool->memblk[0];
+ memblk->nodepoolindex = 0;
+ }
+ }
+
+ KOS_ASSERT(memblk);
+
+ return (memblk);
+#else
+ // unreferenced formal parameter
+ (void) memarena;
+
+ return ((memblk_t *)kos_malloc(sizeof(memblk_t)));
+#endif // GSL_MEMARENA_NODE_POOL_ENABLED
+}
+
+//----------------------------------------------------------------------------
+
+OSINLINE void
+kgsl_memarena_releasememblknode(gsl_memarena_t *memarena, memblk_t *memblk)
+{
+#ifdef GSL_MEMARENA_NODE_POOL_ENABLED
+ gsl_nodepool_t *nodepool = memarena->nodepool;
+
+ KOS_ASSERT(memblk);
+ KOS_ASSERT(nodepool);
+
+ // locate pool to which this memblk node belongs
+ while (((unsigned int) memblk) < ((unsigned int) nodepool) ||
+ ((unsigned int) memblk) > ((unsigned int) nodepool) + sizeof(gsl_nodepool_t))
+ {
+ nodepool = nodepool->prev;
+
+ KOS_ASSERT(nodepool != memarena->nodepool);
+ }
+
+ // mark memblk node as unused
+ nodepool->priv &= ~(1 << memblk->nodepoolindex);
+
+ // free pool when all its memblk nodes are unused
+ if (nodepool->priv == 0)
+ {
+ if (nodepool != nodepool->prev)
+ {
+ // reposition pool head
+ if (nodepool == memarena->nodepool)
+ {
+ memarena->nodepool = nodepool->prev;
+ }
+
+ nodepool->prev->next = nodepool->next;
+ nodepool->next->prev = nodepool->prev;
+ }
+ else
+ {
+ memarena->nodepool = NULL;
+ }
+
+ kos_free((void *)nodepool);
+ }
+ else
+ {
+ // leave pool head in last pool a memblk node was released
+ memarena->nodepool = nodepool;
+ }
+#else
+ // unreferenced formal parameter
+ (void) memarena;
+
+ kos_free((void *)memblk);
+#endif // GSL_MEMARENA_NODE_POOL_ENABLED
+}
+
+//----------------------------------------------------------------------------
+
+gsl_memarena_t*
+kgsl_memarena_create(int aperture_id, int mmu_virtualized, unsigned int hostbaseaddr, gpuaddr_t gpubaseaddr, int sizebytes)
+{
+ static int count = 0;
+ char name[100], id_str[2];
+ int len;
+ gsl_memarena_t *memarena;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> gsl_memarena_t* kgsl_memarena_create(int aperture_id=%d, gpuaddr_t gpubaseaddr=0x%08x, int sizebytes=%d)\n", aperture_id, gpubaseaddr, sizebytes );
+
+ memarena = (gsl_memarena_t *)kos_malloc(sizeof(gsl_memarena_t));
+
+ if (!memarena)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR,
+ "ERROR: Memarena allocation failed.\n" );
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "<-- kgsl_memarena_create. Return value: 0x%08x\n", NULL );
+ return (NULL);
+ }
+
+ kos_memset(memarena, 0, sizeof(gsl_memarena_t));
+
+ GSL_MEMARENA_SET_SIGNATURE;
+ GSL_MEMARENA_SET_MMU_VIRTUALIZED;
+ GSL_MEMARENA_SET_ID;
+
+ // define unique mutex for each memory arena instance
+ id_str[0] = (char) (count + '0');
+ id_str[1] = '\0';
+ kos_strcpy(name, "GSL_memory_arena_");
+ len = kos_strlen(name);
+ kos_strcpy(&name[len], id_str);
+
+ memarena->mutex = kos_mutex_create(name);
+
+ // set up the memory arena
+ memarena->hostbaseaddr = hostbaseaddr;
+ memarena->gpubaseaddr = gpubaseaddr;
+ memarena->sizebytes = sizebytes;
+
+ // allocate a memory block in free list which represents all memory in arena
+ memarena->freelist.head = kgsl_memarena_getmemblknode(memarena);
+ memarena->freelist.head->blkaddr = 0;
+ memarena->freelist.head->blksize = memarena->sizebytes;
+ memarena->freelist.head->next = memarena->freelist.head;
+ memarena->freelist.head->prev = memarena->freelist.head;
+ memarena->freelist.allocrover = memarena->freelist.head;
+ memarena->freelist.freerover = memarena->freelist.head;
+
+ count++;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_create. Return value: 0x%08x\n", memarena );
+
+ return (memarena);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_memarena_destroy(gsl_memarena_t *memarena)
+{
+ int status = GSL_SUCCESS;
+ memblk_t *p, *next;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_memarena_destroy(gsl_memarena_t *memarena=0x%08x)\n", memarena );
+
+ GSL_MEMARENA_VALIDATE(memarena);
+
+ GSL_MEMARENA_LOCK();
+
+#ifdef _DEBUG
+ // memory leak check
+ if (memarena->freelist.head->blksize != memarena->sizebytes)
+ {
+ if (GSL_MEMARENA_GET_ID == GSL_APERTURE_EMEM)
+ {
+ // external memory leak detected
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_FATAL,
+ "ERROR: External memory leak detected.\n" );
+ return (GSL_FAILURE);
+ }
+ }
+#endif // _DEBUG
+
+ p = memarena->freelist.head;
+ do
+ {
+ next = p->next;
+ kgsl_memarena_releasememblknode(memarena, p);
+ p = next;
+ } while (p != memarena->freelist.head);
+
+ GSL_MEMARENA_UNLOCK();
+
+ if (memarena->mutex)
+ {
+ kos_mutex_free(memarena->mutex);
+ }
+
+ kos_free((void *)memarena);
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_destroy. Return value: %B\n", GSL_SUCCESS );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_memarena_isvirtualized(gsl_memarena_t *memarena)
+{
+ // mmu virtualization enabled
+ return (GSL_MEMARENA_IS_MMU_VIRTUALIZED);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_memarena_checkconsistency(gsl_memarena_t *memarena)
+{
+ memblk_t *p;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_memarena_checkconsistency(gsl_memarena_t *memarena=0x%08x)\n", memarena );
+
+ // go through list of free blocks and make sure there are no detectable errors
+
+ p = memarena->freelist.head;
+ do
+ {
+ if (p->next->blkaddr != memarena->freelist.head->blkaddr)
+ {
+ if (p->prev->next->blkaddr != p->blkaddr ||
+ p->next->prev->blkaddr != p->blkaddr ||
+ p->blkaddr + p->blksize >= p->next->blkaddr)
+ {
+ KOS_ASSERT(0);
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_checkconsistency. Return value: %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+ }
+ p = p->next;
+
+ } while (p != memarena->freelist.head);
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_checkconsistency. Return value: %B\n", GSL_SUCCESS );
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_memarena_querystats(gsl_memarena_t *memarena, gsl_memarena_stats_t *stats)
+{
+#ifdef GSL_STATS_MEM
+ KOS_ASSERT(stats);
+ GSL_MEMARENA_VALIDATE(memarena);
+
+ kos_memcpy(stats, &memarena->stats, sizeof(gsl_memarena_stats_t));
+
+ return (GSL_SUCCESS);
+#else
+ // unreferenced formal parameters
+ (void) memarena;
+ (void) stats;
+
+ return (GSL_FAILURE_NOTSUPPORTED);
+#endif // GSL_STATS_MEM
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_memarena_checkfreeblock(gsl_memarena_t *memarena, int bytesneeded)
+{
+ memblk_t *p;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_memarena_checkfreeblock(gsl_memarena_t *memarena=0x%08x, int bytesneeded=%d)\n", memarena, bytesneeded );
+
+ GSL_MEMARENA_VALIDATE(memarena);
+
+ if (bytesneeded < 1)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Illegal number of bytes needed.\n" );
+ KOS_ASSERT(0);
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_checkfreeblock. Return value: %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ GSL_MEMARENA_LOCK();
+
+ p = memarena->freelist.head;
+ do
+ {
+ if (p->blksize >= (unsigned int)bytesneeded)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_checkfreeblock. Return value: %B\n", GSL_SUCCESS );
+ return (GSL_SUCCESS);
+ }
+
+ p = p->next;
+ } while (p != memarena->freelist.head);
+
+ GSL_MEMARENA_UNLOCK();
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_checkfreeblock. Return value: %B\n", GSL_FAILURE );
+
+ return (GSL_FAILURE);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_memarena_alloc(gsl_memarena_t *memarena, gsl_flags_t flags, int size, gsl_memdesc_t *memdesc)
+{
+ int result = GSL_FAILURE_OUTOFMEM;
+ memblk_t *ptrfree, *ptrlast, *p;
+ unsigned int blksize;
+ unsigned int baseaddr, alignedbaseaddr, alignfragment;
+ int freeblk, alignmentshift;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_memarena_alloc(gsl_memarena_t *memarena=0x%08x, gsl_flags_t flags=0x%08x, int size=%d, gsl_memdesc_t *memdesc=%M)\n", memarena, flags, size, memdesc );
+
+ GSL_MEMARENA_VALIDATE(memarena);
+
+ if (size <= 0)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Invalid size for memory allocation.\n" );
+ KOS_ASSERT(0);
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_alloc. Return value: %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ //
+ // go through the list of free blocks. check to find block which can satisfy the alloc request
+ //
+ // if no block can satisfy the alloc request this implies that the memory is too fragmented
+ // and the requestor needs to free up other memory blocks and re-request the allocation
+ //
+ // if we do find a block that can satisfy the alloc request then reduce the size of free block
+ // by blksize and return the address after allocating the memory. if the free block size becomes
+ // 0 then remove this node from the free list
+ //
+ // there would be no node on the free list if all available memory were to be allocated.
+ // handling an empty list would require executing error checking code in the main branch which
+ // is not desired. instead, the free list will have at least one node at all times. This node
+ // could have a block size of zero
+ //
+ // we use a next fit allocation mechanism that uses a roving pointer on a circular free block list.
+ // the pointer is advanced along the chain when searching for a fit. Thus each allocation begins
+ // looking where the previous one finished.
+ //
+
+ // when allocating from external memory aperture, round up size of requested block to multiple of page size if needed
+ if (GSL_MEMARENA_GET_ID == GSL_APERTURE_EMEM)
+ {
+ if ((flags & GSL_MEMFLAGS_FORCEPAGESIZE) || GSL_MEMARENA_IS_MMU_VIRTUALIZED)
+ {
+ if (size & (GSL_PAGESIZE-1))
+ {
+ size = ((size >> GSL_PAGESIZE_SHIFT) + 1) << GSL_PAGESIZE_SHIFT;
+ }
+ }
+ }
+
+ // determine shift count for alignment requested
+ alignmentshift = gsl_memarena_alignmentshift(flags);
+
+ // adjust size of requested block to include alignment
+ blksize = (unsigned int)((size + ((1 << alignmentshift) - 1)) >> alignmentshift) << alignmentshift;
+
+ GSL_MEMARENA_LOCK();
+
+ // check consistency, debug only
+ KGSL_DEBUG(GSL_DBGFLAGS_MEMMGR, kgsl_memarena_checkconsistency(memarena));
+
+ ptrfree = memarena->freelist.allocrover;
+ ptrlast = memarena->freelist.head->prev;
+ freeblk = 0;
+
+ do
+ {
+ // align base address
+ baseaddr = ptrfree->blkaddr + memarena->gpubaseaddr;
+ alignedbaseaddr = gsl_memarena_alignaddr(baseaddr, alignmentshift);
+
+ alignfragment = alignedbaseaddr - baseaddr;
+
+ if (ptrfree->blksize >= blksize + alignfragment)
+ {
+ result = GSL_SUCCESS;
+ freeblk = 1;
+
+ memdesc->gpuaddr = alignedbaseaddr;
+ memdesc->hostptr = kgsl_memarena_gethostptr(memarena, memdesc->gpuaddr);
+ memdesc->size = blksize;
+
+ if (alignfragment > 0)
+ {
+ // insert new node to handle newly created (small) fragment
+ p = kgsl_memarena_getmemblknode(memarena);
+ p->blkaddr = ptrfree->blkaddr;
+ p->blksize = alignfragment;
+
+ p->next = ptrfree;
+ p->prev = ptrfree->prev;
+ ptrfree->prev->next = p;
+ ptrfree->prev = p;
+
+ if (ptrfree == memarena->freelist.head)
+ {
+ memarena->freelist.head = p;
+ }
+ }
+
+ ptrfree->blkaddr += alignfragment + blksize;
+ ptrfree->blksize -= alignfragment + blksize;
+
+ memarena->freelist.allocrover = ptrfree;
+
+ if (ptrfree->blksize == 0 && ptrfree != ptrlast)
+ {
+ ptrfree->prev->next = ptrfree->next;
+ ptrfree->next->prev = ptrfree->prev;
+ if (ptrfree == memarena->freelist.head)
+ {
+ memarena->freelist.head = ptrfree->next;
+ }
+ if (ptrfree == memarena->freelist.allocrover)
+ {
+ memarena->freelist.allocrover = ptrfree->next;
+ }
+ if (ptrfree == memarena->freelist.freerover)
+ {
+ memarena->freelist.freerover = ptrfree->prev;
+ }
+ p = ptrfree;
+ ptrfree = ptrfree->prev;
+ kgsl_memarena_releasememblknode(memarena, p);
+ }
+ }
+
+ ptrfree = ptrfree->next;
+
+ } while (!freeblk && ptrfree != memarena->freelist.allocrover);
+
+ GSL_MEMARENA_UNLOCK();
+
+ if (result == GSL_SUCCESS)
+ {
+ GSL_MEMARENA_STATS(
+ {
+ int i = 0;
+ while (memdesc->size >> (GSL_PAGESIZE_SHIFT + i))
+ {
+ i++;
+ }
+ i = i > (GSL_MEMARENA_PAGE_DIST_MAX-1) ? (GSL_MEMARENA_PAGE_DIST_MAX-1) : i;
+ memarena->stats.allocs_pagedistribution[i]++;
+ });
+
+ GSL_MEMARENA_STATS(memarena->stats.allocs_success++);
+ }
+ else
+ {
+ GSL_MEMARENA_STATS(memarena->stats.allocs_fail++);
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_alloc. Return value: %B\n", result );
+
+ return (result);
+}
+
+//----------------------------------------------------------------------------
+
+void
+kgsl_memarena_free(gsl_memarena_t *memarena, gsl_memdesc_t *memdesc)
+{
+ //
+ // request to free a malloc'ed block from the memory arena
+ // add this block to the free list
+ // adding a block to the free list requires the following:
+ // going through the list of free blocks to decide where to add this free block (based on address)
+ // coalesce free blocks
+ //
+ memblk_t *ptrfree, *ptrend, *p;
+ int mallocfreeblk, clockwise;
+ unsigned int addrtofree;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> void kgsl_memarena_free(gsl_memarena_t *memarena=0x%08x, gsl_memdesc_t *memdesc=%M)\n", memarena, memdesc );
+
+ KOS_ASSERT(memarena);
+ if (GSL_MEMARENA_GET_SIGNATURE != GSL_MEMARENA_INSTANCE_SIGNATURE)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_free.\n" );
+ return;
+ }
+
+ // check size of malloc'ed block
+ if (memdesc->size <= 0)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Illegal size for the memdesc.\n" );
+ KOS_ASSERT(0);
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_free.\n" );
+ return;
+ }
+
+ // check address range
+ KOS_ASSERT( memarena->gpubaseaddr <= memdesc->gpuaddr);
+ KOS_ASSERT((memarena->gpubaseaddr + memarena->sizebytes) >= memdesc->gpuaddr + memdesc->size);
+
+ GSL_MEMARENA_LOCK();
+
+ // check consistency of memory map, debug only
+ KGSL_DEBUG(GSL_DBGFLAGS_MEMMGR, kgsl_memarena_checkconsistency(memarena));
+
+ addrtofree = memdesc->gpuaddr - memarena->gpubaseaddr;
+ mallocfreeblk = 1;
+
+ if (addrtofree < memarena->freelist.head->blkaddr)
+ {
+ // add node to head of free list
+
+ if (addrtofree + memdesc->size == memarena->freelist.head->blkaddr)
+ {
+ memarena->freelist.head->blkaddr = addrtofree;
+ memarena->freelist.head->blksize += memdesc->size;
+
+ mallocfreeblk = 0;
+ }
+
+ ptrfree = memarena->freelist.head->prev;
+ }
+ else if (addrtofree >= memarena->freelist.head->prev->blkaddr)
+ {
+ // add node to tail of free list
+
+ ptrfree = memarena->freelist.head->prev;
+
+ if (ptrfree->blkaddr + ptrfree->blksize == addrtofree)
+ {
+ ptrfree->blksize += memdesc->size;
+
+ mallocfreeblk = 0;
+ }
+ }
+ else
+ {
+ // determine range of free list nodes to traverse and orientation in which to traverse them
+ // keep this code segment unrolled for performance reasons!
+ if (addrtofree > memarena->freelist.freerover->blkaddr)
+ {
+ if (addrtofree - memarena->freelist.freerover->blkaddr < memarena->freelist.head->prev->blkaddr - addrtofree)
+ {
+ ptrfree = memarena->freelist.freerover; // rover
+ ptrend = memarena->freelist.head->prev; // tail
+ clockwise = 1;
+ }
+ else
+ {
+ ptrfree = memarena->freelist.head->prev->prev; // tail
+ ptrend = memarena->freelist.freerover->prev; // rover
+ clockwise = 0;
+ }
+ }
+ else
+ {
+ if (addrtofree - memarena->freelist.head->blkaddr < memarena->freelist.freerover->blkaddr - addrtofree)
+ {
+ ptrfree = memarena->freelist.head; // head
+ ptrend = memarena->freelist.freerover; // rover
+ clockwise = 1;
+ }
+ else
+ {
+ ptrfree = memarena->freelist.freerover->prev; // rover
+ ptrend = memarena->freelist.head->prev; // head
+ clockwise = 0;
+ }
+ }
+
+ // traverse the nodes
+ do
+ {
+ if ((addrtofree >= ptrfree->blkaddr + ptrfree->blksize) &&
+ (addrtofree + memdesc->size <= ptrfree->next->blkaddr))
+ {
+ if (addrtofree == ptrfree->blkaddr + ptrfree->blksize)
+ {
+ memblk_t *next;
+
+ ptrfree->blksize += memdesc->size;
+ next = ptrfree->next;
+
+ if (ptrfree->blkaddr + ptrfree->blksize == next->blkaddr)
+ {
+ ptrfree->blksize += next->blksize;
+ ptrfree->next = next->next;
+ next->next->prev = ptrfree;
+
+ if (next == memarena->freelist.allocrover)
+ {
+ memarena->freelist.allocrover = ptrfree;
+ }
+
+ kgsl_memarena_releasememblknode(memarena, next);
+ }
+
+ mallocfreeblk = 0;
+ }
+ else if (addrtofree + memdesc->size == ptrfree->next->blkaddr)
+ {
+ ptrfree->next->blkaddr = addrtofree;
+ ptrfree->next->blksize += memdesc->size;
+
+ mallocfreeblk = 0;
+ }
+
+ break;
+ }
+
+ if (clockwise)
+ {
+ ptrfree = ptrfree->next;
+ }
+ else
+ {
+ ptrfree = ptrfree->prev;
+ }
+
+ } while (ptrfree != ptrend);
+ }
+
+ // this free block could not be coalesced, so create a new free block
+ // and add it to the free list in the memory arena
+ if (mallocfreeblk)
+ {
+ p = kgsl_memarena_getmemblknode(memarena);
+ p->blkaddr = addrtofree;
+ p->blksize = memdesc->size;
+
+ p->next = ptrfree->next;
+ p->prev = ptrfree;
+ ptrfree->next->prev = p;
+ ptrfree->next = p;
+
+ if (p->blkaddr < memarena->freelist.head->blkaddr)
+ {
+ memarena->freelist.head = p;
+ }
+
+ memarena->freelist.freerover = p;
+ }
+ else
+ {
+ memarena->freelist.freerover = ptrfree;
+ }
+
+ GSL_MEMARENA_UNLOCK();
+
+ GSL_MEMARENA_STATS(
+ {
+ int i = 0;
+ while (memdesc->size >> (GSL_PAGESIZE_SHIFT + i))
+ {
+ i++;
+ }
+ i = i > (GSL_MEMARENA_PAGE_DIST_MAX-1) ? (GSL_MEMARENA_PAGE_DIST_MAX-1) : i;
+ memarena->stats.frees_pagedistribution[i]++;
+ });
+
+ GSL_MEMARENA_STATS(memarena->stats.frees++);
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_free.\n" );
+}
+
+//----------------------------------------------------------------------------
+
+void *
+kgsl_memarena_gethostptr(gsl_memarena_t *memarena, gpuaddr_t gpuaddr)
+{
+ //
+ // get the host mapped address for a hardware device address
+ //
+
+ void *hostptr = NULL;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> void* kgsl_memarena_gethostptr(gsl_memarena_t *memarena=0x%08x, gpuaddr_t gpuaddr=0x%08x)\n", memarena, gpuaddr );
+
+ KOS_ASSERT(memarena);
+ if (GSL_MEMARENA_GET_SIGNATURE != GSL_MEMARENA_INSTANCE_SIGNATURE)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_gethostptr. Return value: 0x%08x\n", NULL );
+ return (NULL);
+ }
+
+ // check address range
+ KOS_ASSERT(gpuaddr >= memarena->gpubaseaddr);
+ KOS_ASSERT(gpuaddr < memarena->gpubaseaddr + memarena->sizebytes);
+
+ hostptr = (void *)((gpuaddr - memarena->gpubaseaddr) + memarena->hostbaseaddr);
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_gethostptr. Return value: 0x%08x\n", hostptr );
+
+ return (hostptr);
+}
+
+//----------------------------------------------------------------------------
+
+gpuaddr_t
+kgsl_memarena_getgpuaddr(gsl_memarena_t *memarena, void *hostptr)
+{
+ //
+ // get the hardware device address for a host mapped address
+ //
+
+ gpuaddr_t gpuaddr = 0;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_memarena_getgpuaddr(gsl_memarena_t *memarena=0x%08x, void *hostptr=0x%08x)\n", memarena, hostptr );
+
+ KOS_ASSERT(memarena);
+ if (GSL_MEMARENA_GET_SIGNATURE != GSL_MEMARENA_INSTANCE_SIGNATURE)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_getgpuaddr. Return value: 0x%08x\n", 0 );
+ return (0);
+ }
+
+ // check address range
+ KOS_ASSERT(hostptr >= (void *)memarena->hostbaseaddr);
+ KOS_ASSERT(hostptr < (void *)(memarena->hostbaseaddr + memarena->sizebytes));
+
+ gpuaddr = ((unsigned int)hostptr - memarena->hostbaseaddr) + memarena->gpubaseaddr;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_getgpuaddr. Return value: 0x%08x\n", gpuaddr );
+
+ return (gpuaddr);
+}
+
+//----------------------------------------------------------------------------
+
+unsigned int
+kgsl_memarena_getlargestfreeblock(gsl_memarena_t *memarena, gsl_flags_t flags)
+{
+ memblk_t *ptrfree;
+ unsigned int blocksize, largestblocksize = 0;
+ int alignmentshift;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> unsigned int kgsl_memarena_getlargestfreeblock(gsl_memarena_t *memarena=0x%08x, gsl_flags_t flags=0x%08x)\n", memarena, flags );
+
+ KOS_ASSERT(memarena);
+ if (GSL_MEMARENA_GET_SIGNATURE != GSL_MEMARENA_INSTANCE_SIGNATURE)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_getlargestfreeblock. Return value: %d\n", 0 );
+ return (0);
+ }
+
+ // determine shift count for alignment requested
+ alignmentshift = gsl_memarena_alignmentshift(flags);
+
+ GSL_MEMARENA_LOCK();
+
+ ptrfree = memarena->freelist.head;
+
+ do
+ {
+ blocksize = ptrfree->blksize - (ptrfree->blkaddr - ((ptrfree->blkaddr >> alignmentshift) << alignmentshift));
+
+ if (blocksize > largestblocksize)
+ {
+ largestblocksize = blocksize;
+ }
+
+ ptrfree = ptrfree->next;
+
+ } while (ptrfree != memarena->freelist.head);
+
+ GSL_MEMARENA_UNLOCK();
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_memarena_getlargestfreeblock. Return value: %d\n", largestblocksize );
+
+ return (largestblocksize);
+}
diff --git a/drivers/mxc/amd-gpu/common/gsl_mmu.c b/drivers/mxc/amd-gpu/common/gsl_mmu.c
new file mode 100644
index 000000000000..310677d926f0
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/gsl_mmu.c
@@ -0,0 +1,1036 @@
+/* Copyright (c) 2002,2007-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "gsl.h"
+#include "gsl_hal.h"
+
+
+//////////////////////////////////////////////////////////////////////////////
+// types
+//////////////////////////////////////////////////////////////////////////////
+
+// ---------
+// pte debug
+// ---------
+
+typedef struct _gsl_pte_debug_t
+{
+ unsigned int write :1;
+ unsigned int read :1;
+ unsigned int reserved :10;
+ unsigned int phyaddr :20;
+} gsl_pte_debug_t;
+
+
+//////////////////////////////////////////////////////////////////////////////
+// defines
+//////////////////////////////////////////////////////////////////////////////
+#define GSL_PT_ENTRY_SIZEBYTES 4
+#define GSL_PT_EXTRA_ENTRIES 16
+
+#define GSL_PT_PAGE_WRITE 0x00000001
+#define GSL_PT_PAGE_READ 0x00000002
+
+#define GSL_PT_PAGE_AP_MASK 0x00000003
+#define GSL_PT_PAGE_ADDR_MASK ~(GSL_PAGESIZE-1)
+
+#define GSL_MMUFLAGS_TLBFLUSH 0x80000000
+
+#define GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS (sizeof(unsigned char) * 8)
+
+
+//////////////////////////////////////////////////////////////////////////////
+// constants
+//////////////////////////////////////////////////////////////////////////////
+const unsigned int GSL_PT_PAGE_AP[4] = {(GSL_PT_PAGE_READ | GSL_PT_PAGE_WRITE), GSL_PT_PAGE_READ, GSL_PT_PAGE_WRITE, 0};
+
+
+/////////////////////////////////////////////////////////////////////////////
+// macros
+//////////////////////////////////////////////////////////////////////////////
+
+#define GSL_PT_ENTRY_GET(va) ((va - pagetable->va_base) >> GSL_PAGESIZE_SHIFT)
+#define GSL_PT_VIRT_GET(pte) (pagetable->va_base + (pte * GSL_PAGESIZE))
+
+#define GSL_PT_MAP_APDEFAULT GSL_PT_PAGE_AP[0]
+
+#define GSL_PT_MAP_GET(pte) *((unsigned int *)(((unsigned int)pagetable->base.hostptr) + ((pte) * GSL_PT_ENTRY_SIZEBYTES)))
+#define GSL_PT_MAP_GETADDR(pte) (GSL_PT_MAP_GET(pte) & GSL_PT_PAGE_ADDR_MASK)
+
+#define GSL_PT_MAP_DEBUG(pte) ((gsl_pte_debug_t*) &GSL_PT_MAP_GET(pte))
+
+#define GSL_PT_MAP_SETBITS(pte, bits) (GSL_PT_MAP_GET(pte) |= (((unsigned int) bits) & GSL_PT_PAGE_AP_MASK))
+#define GSL_PT_MAP_SETADDR(pte, pageaddr) (GSL_PT_MAP_GET(pte) = (GSL_PT_MAP_GET(pte) & ~GSL_PT_PAGE_ADDR_MASK) | (((unsigned int) pageaddr) & GSL_PT_PAGE_ADDR_MASK))
+
+#define GSL_PT_MAP_RESET(pte) (GSL_PT_MAP_GET(pte) = 0)
+#define GSL_PT_MAP_RESETBITS(pte, bits) (GSL_PT_MAP_GET(pte) &= ~(((unsigned int) bits) & GSL_PT_PAGE_AP_MASK))
+
+#define GSL_MMU_VIRT_TO_PAGE(va) *((unsigned int *)(pagetable->base.gpuaddr + (GSL_PT_ENTRY_GET(va) * GSL_PT_ENTRY_SIZEBYTES)))
+#define GSL_MMU_VIRT_TO_PHYS(va) ((GSL_MMU_VIRT_TO_PAGE(va) & GSL_PT_PAGE_ADDR_MASK) + (va & (GSL_PAGESIZE-1)))
+
+#define GSL_TLBFLUSH_FILTER_GET(superpte) *((unsigned char *)(((unsigned int)mmu->tlbflushfilter.base) + (superpte / GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)))
+#define GSL_TLBFLUSH_FILTER_SETDIRTY(superpte) (GSL_TLBFLUSH_FILTER_GET((superpte)) |= 1 << (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))
+#define GSL_TLBFLUSH_FILTER_ISDIRTY(superpte) (GSL_TLBFLUSH_FILTER_GET((superpte)) & (1 << (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)))
+#define GSL_TLBFLUSH_FILTER_RESET() kos_memset(mmu->tlbflushfilter.base, 0, mmu->tlbflushfilter.size)
+
+
+//////////////////////////////////////////////////////////////////////////////
+// process index in pagetable object table
+//////////////////////////////////////////////////////////////////////////////
+OSINLINE int
+kgsl_mmu_getprocessindex(unsigned int pid, int *pindex)
+{
+ int status = GSL_SUCCESS;
+#ifdef GSL_MMU_PAGETABLE_PERPROCESS
+ if (kgsl_driver_getcallerprocessindex(pid, pindex) != GSL_SUCCESS)
+ {
+ status = GSL_FAILURE;
+ }
+#else
+ (void) pid; // unreferenced formal parameter
+ *pindex = 0;
+#endif // GSL_MMU_PAGETABLE_PERPROCESS
+ return (status);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// pagetable object for current caller process
+//////////////////////////////////////////////////////////////////////////////
+OSINLINE gsl_pagetable_t*
+kgsl_mmu_getpagetableobject(gsl_mmu_t *mmu, unsigned int pid)
+{
+ int pindex = 0;
+ if (kgsl_mmu_getprocessindex(pid, &pindex) == GSL_SUCCESS)
+ {
+ return (mmu->pagetable[pindex]);
+ }
+ else
+ {
+ return (NULL);
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// functions
+//////////////////////////////////////////////////////////////////////////////
+
+void
+kgsl_mh_intrcallback(gsl_intrid_t id, void *cookie)
+{
+ gsl_mmu_t *mmu = (gsl_mmu_t *) cookie;
+ unsigned int devindex = mmu->device->id-1; // device_id is 1 based
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> void kgsl_mh_ntrcallback(gsl_intrid_t id=%I, void *cookie=0x%08x)\n", id, cookie );
+
+ // error condition interrupt
+ if (id == gsl_cfg_mh_intr[devindex].AXI_READ_ERROR ||
+ id == gsl_cfg_mh_intr[devindex].AXI_WRITE_ERROR ||
+ id == gsl_cfg_mh_intr[devindex].MMU_PAGE_FAULT)
+ {
+ mmu->device->ftbl.device_destroy(mmu->device);
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mh_intrcallback.\n" );
+}
+
+//----------------------------------------------------------------------------
+
+#ifdef _DEBUG
+static void
+kgsl_mmu_debug(gsl_mmu_t *mmu, gsl_mmu_debug_t *regs)
+{
+ unsigned int devindex = mmu->device->id-1; // device_id is 1 based
+
+ kos_memset(regs, 0, sizeof(gsl_mmu_debug_t));
+
+ mmu->device->ftbl.device_regread(mmu->device, gsl_cfg_mmu_reg[devindex].CONFIG, &regs->config);
+ mmu->device->ftbl.device_regread(mmu->device, gsl_cfg_mmu_reg[devindex].MPU_BASE, &regs->mpu_base);
+ mmu->device->ftbl.device_regread(mmu->device, gsl_cfg_mmu_reg[devindex].MPU_END, &regs->mpu_end);
+ mmu->device->ftbl.device_regread(mmu->device, gsl_cfg_mmu_reg[devindex].VA_RANGE, &regs->va_range);
+ mmu->device->ftbl.device_regread(mmu->device, gsl_cfg_mmu_reg[devindex].PT_BASE, &regs->pt_base);
+ mmu->device->ftbl.device_regread(mmu->device, gsl_cfg_mmu_reg[devindex].PAGE_FAULT, &regs->page_fault);
+ mmu->device->ftbl.device_regread(mmu->device, gsl_cfg_mmu_reg[devindex].TRAN_ERROR, &regs->trans_error);
+ mmu->device->ftbl.device_regread(mmu->device, gsl_cfg_mmu_reg[devindex].INVALIDATE, &regs->invalidate);
+}
+#endif
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_mmu_checkconsistency(gsl_pagetable_t *pagetable)
+{
+ unsigned int pte;
+ unsigned int data;
+ gsl_pte_debug_t *pte_debug;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_mmu_checkconsistency(gsl_pagetable_t *pagetable=0x%08x)\n", pagetable );
+
+ if (pagetable->last_superpte % GSL_PT_SUPER_PTE != 0)
+ {
+ KOS_ASSERT(0);
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_checkconsistency. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ // go through page table and make sure there are no detectable errors
+ pte = 0;
+ while (pte < pagetable->max_entries)
+ {
+ pte_debug = GSL_PT_MAP_DEBUG(pte);
+
+ if (GSL_PT_MAP_GETADDR(pte) != 0)
+ {
+ // pte is in use
+
+ // access first couple bytes of a page
+ data = *((unsigned int *)GSL_PT_VIRT_GET(pte));
+ }
+
+ pte++;
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_checkconsistency. Return value %B\n", GSL_SUCCESS );
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_mmu_destroypagetableobject(gsl_mmu_t *mmu, unsigned int pid)
+{
+ gsl_deviceid_t tmp_id;
+ gsl_device_t *tmp_device;
+ int pindex;
+ gsl_pagetable_t *pagetable;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "--> gsl_pagetable_t* kgsl_mmu_destroypagetableobject(gsl_mmu_t *mmu=0x%08x, unsigned int pid=0x%08x)\n", mmu, pid );
+
+ if (kgsl_mmu_getprocessindex(pid, &pindex) != GSL_SUCCESS)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_destroypagetableobject. Return value 0x%08x\n", GSL_SUCCESS );
+ return (GSL_FAILURE);
+ }
+
+ pagetable = mmu->pagetable[pindex];
+
+ // if pagetable object exists for current "current device mmu"/"current caller process" combination
+ if (pagetable)
+ {
+ // no more "device mmu"/"caller process" combinations attached to current pagetable object
+ if (pagetable->refcnt == 0)
+ {
+#ifdef _DEBUG
+ // memory leak check
+ if (pagetable->last_superpte != 0 || GSL_PT_MAP_GETADDR(pagetable->last_superpte))
+ {
+ /* many dumpx test cases forcefully exit, and thus trigger this assert. */
+ /* Because it is an annoyance for HW guys, it is disabled for dumpx */
+ if(!gsl_driver.flags_debug & GSL_DBGFLAGS_DUMPX)
+ {
+ KOS_ASSERT(0);
+ return (GSL_FAILURE);
+ }
+ }
+#endif // _DEBUG
+
+ if (pagetable->base.gpuaddr)
+ {
+ kgsl_sharedmem_free0(&pagetable->base, GSL_CALLER_PROCESSID_GET());
+ }
+
+ kos_free(pagetable);
+
+ // clear pagetable object reference for all "device mmu"/"current caller process" combinations
+ for (tmp_id = GSL_DEVICE_ANY + 1; tmp_id <= GSL_DEVICE_MAX; tmp_id++)
+ {
+ tmp_device = &gsl_driver.device[tmp_id-1];
+
+ if (tmp_device->mmu.flags & GSL_FLAGS_STARTED)
+ {
+ tmp_device->mmu.pagetable[pindex] = NULL;
+ }
+ }
+ }
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_destroypagetableobject. Return value 0x%08x\n", GSL_SUCCESS );
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+gsl_pagetable_t*
+kgsl_mmu_createpagetableobject(gsl_mmu_t *mmu, unsigned int pid)
+{
+ //
+ // create pagetable object for "current device mmu"/"current caller
+ // process" combination. If none exists, setup a new pagetable object.
+ //
+ int status = GSL_SUCCESS;
+ gsl_pagetable_t *tmp_pagetable = NULL;
+ gsl_deviceid_t tmp_id;
+ gsl_device_t *tmp_device;
+ int pindex;
+ gsl_flags_t flags;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "--> gsl_pagetable_t* kgsl_mmu_createpagetableobject(gsl_mmu_t *mmu=0x%08x, unsigned int pid=0x%08x)\n", mmu, pid );
+
+ status = kgsl_mmu_getprocessindex(pid, &pindex);
+ if (status != GSL_SUCCESS)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_createpagetableobject. Return value 0x%08x\n", NULL );
+ return (NULL);
+ }
+ // if pagetable object does not already exists for "current device mmu"/"current caller process" combination
+ if (!mmu->pagetable[pindex])
+ {
+ // then, check if pagetable object already exists for any "other device mmu"/"current caller process" combination
+ for (tmp_id = GSL_DEVICE_ANY + 1; tmp_id <= GSL_DEVICE_MAX; tmp_id++)
+ {
+ tmp_device = &gsl_driver.device[tmp_id-1];
+
+ if (tmp_device->mmu.flags & GSL_FLAGS_STARTED)
+ {
+ if (tmp_device->mmu.pagetable[pindex])
+ {
+ tmp_pagetable = tmp_device->mmu.pagetable[pindex];
+ break;
+ }
+ }
+ }
+
+ // pagetable object exists
+ if (tmp_pagetable)
+ {
+ KOS_ASSERT(tmp_pagetable->va_base == mmu->va_base);
+ KOS_ASSERT(tmp_pagetable->va_range == mmu->va_range);
+
+ // set pagetable object reference
+ mmu->pagetable[pindex] = tmp_pagetable;
+ }
+ // create new pagetable object
+ else
+ {
+ mmu->pagetable[pindex] = (void *)kos_malloc(sizeof(gsl_pagetable_t));
+ if (!mmu->pagetable[pindex])
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Unable to allocate pagetable object.\n" );
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_createpagetableobject. Return value 0x%08x\n", NULL );
+ return (NULL);
+ }
+
+ kos_memset(mmu->pagetable[pindex], 0, sizeof(gsl_pagetable_t));
+
+ mmu->pagetable[pindex]->pid = pid;
+ mmu->pagetable[pindex]->refcnt = 0;
+ mmu->pagetable[pindex]->va_base = mmu->va_base;
+ mmu->pagetable[pindex]->va_range = mmu->va_range;
+ mmu->pagetable[pindex]->last_superpte = 0;
+ mmu->pagetable[pindex]->max_entries = (mmu->va_range >> GSL_PAGESIZE_SHIFT) + GSL_PT_EXTRA_ENTRIES;
+
+ // allocate page table memory
+ flags = (GSL_MEMFLAGS_ALIGN4K | GSL_MEMFLAGS_CONPHYS | GSL_MEMFLAGS_STRICTREQUEST);
+ status = kgsl_sharedmem_alloc0(mmu->device->id, flags, mmu->pagetable[pindex]->max_entries * GSL_PT_ENTRY_SIZEBYTES, &mmu->pagetable[pindex]->base);
+
+ if (status == GSL_SUCCESS)
+ {
+ // reset page table entries
+ kgsl_sharedmem_set0(&mmu->pagetable[pindex]->base, 0, 0, mmu->pagetable[pindex]->base.size);
+
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX, KGSL_DEBUG_DUMPX(BB_DUMP_MMU_TBLADDR, mmu->pagetable[pindex]->base.gpuaddr, 0, mmu->pagetable[pindex]->base.size, "kgsl_mmu_init"));
+ }
+ else
+ {
+ kgsl_mmu_destroypagetableobject(mmu, pid);
+ }
+ }
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_createpagetableobject. Return value 0x%08x\n", mmu->pagetable[pindex] );
+
+ return (mmu->pagetable[pindex]);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_mmu_setpagetable(gsl_device_t *device, unsigned int pid)
+{
+ //
+ // set device mmu to use current caller process's page table
+ //
+ int status = GSL_SUCCESS;
+ unsigned int devindex = device->id-1; // device_id is 1 based
+ gsl_mmu_t *mmu = &device->mmu;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> gsl_pagetable_t* kgsl_mmu_setpagetable(gsl_device_t *device=0x%08x)\n", device );
+
+ if (mmu->flags & GSL_FLAGS_STARTED)
+ {
+#ifdef GSL_MMU_PAGETABLE_PERPROCESS
+ // page table not current, then setup mmu to use new specified page table
+ if (mmu->hwpagetable->pid != pid)
+ {
+ gsl_pagetable_t *pagetable = kgsl_mmu_getpagetableobject(mmu, pid);
+ if (pagetable)
+ {
+ mmu->hwpagetable = pagetable;
+
+ // flag tlb flush
+ mmu->flags |= GSL_MMUFLAGS_TLBFLUSH;
+
+ status = mmu->device->ftbl.mmu_setpagetable(mmu->device, gsl_cfg_mmu_reg[devindex].PT_BASE, pagetable->base.gpuaddr, pid);
+
+ GSL_MMU_STATS(mmu->stats.pt.switches++);
+ }
+ else
+ {
+ status = GSL_FAILURE;
+ }
+ }
+#endif // GSL_MMU_PAGETABLE_PERPROCESS
+
+ // if needed, invalidate device specific tlb
+ if ((mmu->flags & GSL_MMUFLAGS_TLBFLUSH) && status == GSL_SUCCESS)
+ {
+ mmu->flags &= ~GSL_MMUFLAGS_TLBFLUSH;
+
+ GSL_TLBFLUSH_FILTER_RESET();
+
+ status = mmu->device->ftbl.mmu_tlbinvalidate(mmu->device, gsl_cfg_mmu_reg[devindex].INVALIDATE, pid);
+
+ GSL_MMU_STATS(mmu->stats.tlbflushes++);
+ }
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_setpagetable. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_mmu_init(gsl_device_t *device)
+{
+ //
+ // intialize device mmu
+ //
+ // call this with the global lock held
+ //
+ int status;
+ gsl_flags_t flags;
+ gsl_pagetable_t *pagetable;
+ unsigned int devindex = device->id-1; // device_id is 1 based
+ gsl_mmu_t *mmu = &device->mmu;
+#ifdef _DEBUG
+ gsl_mmu_debug_t regs;
+#endif // _DEBUG
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_mmu_init(gsl_device_t *device=0x%08x)\n", device );
+
+ if (device->ftbl.mmu_tlbinvalidate == NULL || device->ftbl.mmu_setpagetable == NULL ||
+ !(device->flags & GSL_FLAGS_INITIALIZED))
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_init. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ if (mmu->flags & GSL_FLAGS_INITIALIZED0)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_INFO, "MMU already initialized.\n" );
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_init. Return value %B\n", GSL_SUCCESS );
+ return (GSL_SUCCESS);
+ }
+
+ // setup backward reference
+ mmu->device = device;
+
+ // disable MMU when running in safe mode
+ if (device->flags & GSL_FLAGS_SAFEMODE)
+ {
+ mmu->config = 0x00000000;
+ }
+
+ // setup MMU and sub-client behavior
+ device->ftbl.device_regwrite(device, gsl_cfg_mmu_reg[devindex].CONFIG, mmu->config);
+
+ // enable axi interrupts
+ kgsl_intr_attach(&device->intr, gsl_cfg_mh_intr[devindex].AXI_READ_ERROR, kgsl_mh_intrcallback, (void *) mmu);
+ kgsl_intr_attach(&device->intr, gsl_cfg_mh_intr[devindex].AXI_WRITE_ERROR, kgsl_mh_intrcallback, (void *) mmu);
+ kgsl_intr_enable(&device->intr, gsl_cfg_mh_intr[devindex].AXI_READ_ERROR);
+ kgsl_intr_enable(&device->intr, gsl_cfg_mh_intr[devindex].AXI_WRITE_ERROR);
+
+ mmu->refcnt = 0;
+ mmu->flags |= GSL_FLAGS_INITIALIZED0;
+
+ // MMU enabled
+ if (mmu->config & 0x1)
+ {
+ // idle device
+ device->ftbl.device_idle(device, GSL_TIMEOUT_DEFAULT);
+
+ // make sure aligned to pagesize
+ KOS_ASSERT((mmu->mpu_base & ((1 << GSL_PAGESIZE_SHIFT)-1)) == 0);
+ KOS_ASSERT(((mmu->mpu_base + mmu->mpu_range) & ((1 << GSL_PAGESIZE_SHIFT)-1)) == 0);
+
+ // define physical memory range accessible by the core
+ device->ftbl.device_regwrite(device, gsl_cfg_mmu_reg[devindex].MPU_BASE, mmu->mpu_base);
+ device->ftbl.device_regwrite(device, gsl_cfg_mmu_reg[devindex].MPU_END, mmu->mpu_base + mmu->mpu_range);
+
+ // enable page fault interrupt
+ kgsl_intr_attach(&device->intr, gsl_cfg_mh_intr[devindex].MMU_PAGE_FAULT, kgsl_mh_intrcallback, (void *) mmu);
+ kgsl_intr_enable(&device->intr, gsl_cfg_mh_intr[devindex].MMU_PAGE_FAULT);
+
+ mmu->flags |= GSL_FLAGS_INITIALIZED;
+
+ // sub-client MMU lookups require address translation
+ if ((mmu->config & ~0x1) > 0)
+ {
+ // make sure virtual address range is a multiple of 64Kb
+ KOS_ASSERT((mmu->va_range & ((1 << 16)-1)) == 0);
+
+ // setup pagetable object
+ pagetable = kgsl_mmu_createpagetableobject(mmu, GSL_CALLER_PROCESSID_GET());
+ if (!pagetable)
+ {
+ kgsl_mmu_close(device);
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_init. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ mmu->hwpagetable = pagetable;
+
+ // create tlb flush filter to track dirty superPTE's -- one bit per superPTE
+ mmu->tlbflushfilter.size = (mmu->va_range / (GSL_PAGESIZE * GSL_PT_SUPER_PTE * 8)) + 1;
+ mmu->tlbflushfilter.base = (unsigned int *)kos_malloc(mmu->tlbflushfilter.size);
+ if (!mmu->tlbflushfilter.base)
+ {
+ kgsl_mmu_close(device);
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_init. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ GSL_TLBFLUSH_FILTER_RESET();
+
+ // set page table base
+ device->ftbl.device_regwrite(device, gsl_cfg_mmu_reg[devindex].PT_BASE, mmu->hwpagetable->base.gpuaddr);
+
+ // define virtual address range
+ device->ftbl.device_regwrite(device, gsl_cfg_mmu_reg[devindex].VA_RANGE, (mmu->hwpagetable->va_base | (mmu->hwpagetable->va_range >> 16)));
+
+ // allocate memory used for completing r/w operations that cannot be mapped by the MMU
+ flags = (GSL_MEMFLAGS_ALIGN32 | GSL_MEMFLAGS_CONPHYS | GSL_MEMFLAGS_STRICTREQUEST);
+ status = kgsl_sharedmem_alloc0(device->id, flags, 32, &mmu->dummyspace);
+ if (status != GSL_SUCCESS)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Unable to allocate dummy space memory.\n" );
+ kgsl_mmu_close(device);
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_init. Return value %B\n", status );
+ return (status);
+ }
+
+ device->ftbl.device_regwrite(device, gsl_cfg_mmu_reg[devindex].TRAN_ERROR, mmu->dummyspace.gpuaddr);
+
+ // call device specific tlb invalidate
+ device->ftbl.mmu_tlbinvalidate(device, gsl_cfg_mmu_reg[devindex].INVALIDATE, mmu->hwpagetable->pid);
+
+ GSL_MMU_STATS(mmu->stats.tlbflushes++);
+
+ mmu->flags |= GSL_FLAGS_STARTED;
+ }
+ }
+
+ KGSL_DEBUG(GSL_DBGFLAGS_MMU, kgsl_mmu_debug(&device->mmu, &regs));
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_init. Return value %B\n", GSL_SUCCESS );
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_mmu_map(gsl_mmu_t *mmu, gpuaddr_t gpubaseaddr, const gsl_scatterlist_t *scatterlist, gsl_flags_t flags, unsigned int pid)
+{
+ //
+ // map physical pages into the gpu page table
+ //
+ int status = GSL_SUCCESS;
+ unsigned int i, phyaddr, ap;
+ unsigned int pte, ptefirst, ptelast, superpte;
+ int flushtlb;
+ gsl_pagetable_t *pagetable;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_mmu_map(gsl_mmu_t *mmu=0x%08x, gpuaddr_t gpubaseaddr=0x%08x, gsl_scatterlist_t *scatterlist=%M, gsl_flags_t flags=%d, unsigned int pid=%d)\n",
+ mmu, gpubaseaddr, scatterlist, flags, pid );
+
+ KOS_ASSERT(scatterlist);
+
+ if (scatterlist->num <= 0)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: num pages is too small.\n" );
+ KOS_ASSERT(0);
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_map. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ // get gpu access permissions
+ ap = GSL_PT_PAGE_AP[((flags & GSL_MEMFLAGS_GPUAP_MASK) >> GSL_MEMFLAGS_GPUAP_SHIFT)];
+
+ pagetable = kgsl_mmu_getpagetableobject(mmu, pid);
+ if (!pagetable)
+ {
+ return (GSL_FAILURE);
+ }
+
+ // check consistency, debug only
+ KGSL_DEBUG(GSL_DBGFLAGS_MMU, kgsl_mmu_checkconsistency(pagetable));
+
+ ptefirst = GSL_PT_ENTRY_GET(gpubaseaddr);
+ ptelast = GSL_PT_ENTRY_GET(gpubaseaddr + (GSL_PAGESIZE * (scatterlist->num-1)));
+ flushtlb = 0;
+
+ if (!GSL_PT_MAP_GETADDR(ptefirst))
+ {
+ // tlb needs to be flushed when the first and last pte are not at superpte boundaries
+ if ((ptefirst & (GSL_PT_SUPER_PTE-1)) != 0 || ((ptelast+1) & (GSL_PT_SUPER_PTE-1)) != 0)
+ {
+ flushtlb = 1;
+ }
+
+ // create page table entries
+ for (pte = ptefirst; pte <= ptelast; pte++)
+ {
+ if (scatterlist->contiguous)
+ {
+ phyaddr = scatterlist->pages[0] + ((pte-ptefirst) * GSL_PAGESIZE);
+ }
+ else
+ {
+ phyaddr = scatterlist->pages[pte-ptefirst];
+ }
+
+ GSL_PT_MAP_SETADDR(pte, phyaddr);
+ GSL_PT_MAP_SETBITS(pte, ap);
+
+ // tlb needs to be flushed when a dirty superPTE gets backed
+ if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
+ {
+ if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
+ {
+ flushtlb = 1;
+ }
+ }
+
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX, KGSL_DEBUG_DUMPX(BB_DUMP_SET_MMUTBL, pte , *(unsigned int*)(((char*)pagetable->base.hostptr) + (pte * GSL_PT_ENTRY_SIZEBYTES)), 0, "kgsl_mmu_map"));
+ }
+
+ if (flushtlb)
+ {
+ // every device's tlb needs to be flushed because the current page table is shared among all devices
+ for (i = 0; i < GSL_DEVICE_MAX; i++)
+ {
+ if (gsl_driver.device[i].flags & GSL_FLAGS_INITIALIZED)
+ {
+ gsl_driver.device[i].mmu.flags |= GSL_MMUFLAGS_TLBFLUSH;
+ }
+ }
+ }
+
+ // determine new last mapped superPTE
+ superpte = ptelast - (ptelast & (GSL_PT_SUPER_PTE-1));
+ if (superpte > pagetable->last_superpte)
+ {
+ pagetable->last_superpte = superpte;
+ }
+
+ GSL_MMU_STATS(mmu->stats.pt.maps++);
+ }
+ else
+ {
+ // this should never happen
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_FATAL, "FATAL: This should never happen.\n" );
+ KOS_ASSERT(0);
+ status = GSL_FAILURE;
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_map. Return value %B\n", GSL_SUCCESS );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_mmu_unmap(gsl_mmu_t *mmu, gpuaddr_t gpubaseaddr, int range, unsigned int pid)
+{
+ //
+ // remove mappings in the specified address range from the gpu page table
+ //
+ int status = GSL_SUCCESS;
+ gsl_pagetable_t *pagetable;
+ unsigned int numpages;
+ unsigned int pte, ptefirst, ptelast, superpte;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_mmu_unmap(gsl_mmu_t *mmu=0x%08x, gpuaddr_t gpubaseaddr=0x%08x, int range=%d, unsigned int pid=%d)\n",
+ mmu, gpubaseaddr, range, pid );
+
+ if (range <= 0)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Range is too small.\n" );
+ KOS_ASSERT(0);
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_unmap. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ numpages = (range >> GSL_PAGESIZE_SHIFT);
+ if (range & (GSL_PAGESIZE-1))
+ {
+ numpages++;
+ }
+
+ pagetable = kgsl_mmu_getpagetableobject(mmu, pid);
+ if (!pagetable)
+ {
+ return (GSL_FAILURE);
+ }
+
+ // check consistency, debug only
+ KGSL_DEBUG(GSL_DBGFLAGS_MMU, kgsl_mmu_checkconsistency(pagetable));
+
+ ptefirst = GSL_PT_ENTRY_GET(gpubaseaddr);
+ ptelast = GSL_PT_ENTRY_GET(gpubaseaddr + (GSL_PAGESIZE * (numpages-1)));
+
+ if (GSL_PT_MAP_GETADDR(ptefirst))
+ {
+ superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
+ GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
+
+ // remove page table entries
+ for (pte = ptefirst; pte <= ptelast; pte++)
+ {
+ GSL_PT_MAP_RESET(pte);
+
+ superpte = pte - (pte & (GSL_PT_SUPER_PTE-1));
+ if (pte == superpte)
+ {
+ GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
+ }
+
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX, KGSL_DEBUG_DUMPX(BB_DUMP_SET_MMUTBL, pte, *(unsigned int*)(((char*)pagetable->base.hostptr) + (pte * GSL_PT_ENTRY_SIZEBYTES)), 0, "kgsl_mmu_unmap, reset superPTE"));
+ }
+
+ // determine new last mapped superPTE
+ superpte = ptelast - (ptelast & (GSL_PT_SUPER_PTE-1));
+ if (superpte == pagetable->last_superpte && pagetable->last_superpte >= GSL_PT_SUPER_PTE)
+ {
+ do
+ {
+ pagetable->last_superpte -= GSL_PT_SUPER_PTE;
+ } while (!GSL_PT_MAP_GETADDR(pagetable->last_superpte) && pagetable->last_superpte >= GSL_PT_SUPER_PTE);
+ }
+
+ GSL_MMU_STATS(mmu->stats.pt.unmaps++);
+ }
+ else
+ {
+ // this should never happen
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_FATAL, "FATAL: This should never happen.\n" );
+ KOS_ASSERT(0);
+ status = GSL_FAILURE;
+ }
+
+ // invalidate tlb, debug only
+ KGSL_DEBUG(GSL_DBGFLAGS_MMU, mmu->device->ftbl.mmu_tlbinvalidate(mmu->device, gsl_cfg_mmu_reg[mmu->device->id-1].INVALIDATE, pagetable->pid));
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_unmap. Return value %B\n", GSL_SUCCESS );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_mmu_getmap(gsl_mmu_t *mmu, gpuaddr_t gpubaseaddr, int range, gsl_scatterlist_t *scatterlist, unsigned int pid)
+{
+ //
+ // obtain scatter list of physical pages for the given gpu address range.
+ // if all pages are physically contiguous they are coalesced into a single
+ // scatterlist entry.
+ //
+ gsl_pagetable_t *pagetable;
+ unsigned int numpages;
+ unsigned int pte, ptefirst, ptelast;
+ unsigned int contiguous = 1;
+
+ numpages = (range >> GSL_PAGESIZE_SHIFT);
+ if (range & (GSL_PAGESIZE-1))
+ {
+ numpages++;
+ }
+
+ if (range <= 0 || scatterlist->num != numpages)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Range is too small.\n" );
+ KOS_ASSERT(0);
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_getmap. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ pagetable = kgsl_mmu_getpagetableobject(mmu, pid);
+ if (!pagetable)
+ {
+ return (GSL_FAILURE);
+ }
+
+ ptefirst = GSL_PT_ENTRY_GET(gpubaseaddr);
+ ptelast = GSL_PT_ENTRY_GET(gpubaseaddr + (GSL_PAGESIZE * (numpages-1)));
+
+ // determine whether pages are physically contiguous
+ if (numpages > 1)
+ {
+ for (pte = ptefirst; pte <= ptelast-1; pte++)
+ {
+ if (GSL_PT_MAP_GETADDR(pte) + GSL_PAGESIZE != GSL_PT_MAP_GETADDR(pte+1))
+ {
+ contiguous = 0;
+ break;
+ }
+ }
+ }
+
+ if (!contiguous)
+ {
+ // populate scatter list
+ for (pte = ptefirst; pte <= ptelast; pte++)
+ {
+ scatterlist->pages[pte-ptefirst] = GSL_PT_MAP_GETADDR(pte);
+ }
+ }
+ else
+ {
+ // coalesce physically contiguous pages into a single scatter list entry
+ scatterlist->pages[0] = GSL_PT_MAP_GETADDR(ptefirst);
+ }
+
+ scatterlist->contiguous = contiguous;
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_mmu_close(gsl_device_t *device)
+{
+ //
+ // close device mmu
+ //
+ // call this with the global lock held
+ //
+ gsl_mmu_t *mmu = &device->mmu;
+ unsigned int devindex = mmu->device->id-1; // device_id is 1 based
+#ifdef _DEBUG
+ int i;
+#endif // _DEBUG
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "--> int kgsl_mmu_close(gsl_device_t *device=0x%08x)\n", device );
+
+ if (mmu->flags & GSL_FLAGS_INITIALIZED0)
+ {
+ if (mmu->flags & GSL_FLAGS_STARTED)
+ {
+ // terminate pagetable object
+ kgsl_mmu_destroypagetableobject(mmu, GSL_CALLER_PROCESSID_GET());
+ }
+
+ // no more processes attached to current device mmu
+ if (mmu->refcnt == 0)
+ {
+#ifdef _DEBUG
+ // check if there are any orphaned pagetable objects lingering around
+ for (i = 0; i < GSL_MMU_PAGETABLE_MAX; i++)
+ {
+ if (mmu->pagetable[i])
+ {
+ /* many dumpx test cases forcefully exit, and thus trigger this assert. */
+ /* Because it is an annoyance for HW guys, it is disabled for dumpx */
+ if(!gsl_driver.flags_debug & GSL_DBGFLAGS_DUMPX)
+ {
+ KOS_ASSERT(0);
+ return (GSL_FAILURE);
+ }
+ }
+ }
+#endif // _DEBUG
+
+ // disable mh interrupts
+ kgsl_intr_detach(&device->intr, gsl_cfg_mh_intr[devindex].AXI_READ_ERROR);
+ kgsl_intr_detach(&device->intr, gsl_cfg_mh_intr[devindex].AXI_WRITE_ERROR);
+ kgsl_intr_detach(&device->intr, gsl_cfg_mh_intr[devindex].MMU_PAGE_FAULT);
+
+ // disable MMU
+ device->ftbl.device_regwrite(device, gsl_cfg_mmu_reg[devindex].CONFIG, 0x00000000);
+
+ if (mmu->tlbflushfilter.base)
+ {
+ kos_free(mmu->tlbflushfilter.base);
+ }
+
+ if (mmu->dummyspace.gpuaddr)
+ {
+ kgsl_sharedmem_free0(&mmu->dummyspace, GSL_CALLER_PROCESSID_GET());
+ }
+
+ mmu->flags &= ~GSL_FLAGS_STARTED;
+ mmu->flags &= ~GSL_FLAGS_INITIALIZED;
+ mmu->flags &= ~GSL_FLAGS_INITIALIZED0;
+ }
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_close. Return value %B\n", GSL_SUCCESS );
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_mmu_attachcallback(gsl_mmu_t *mmu, unsigned int pid)
+{
+ //
+ // attach process
+ //
+ // call this with the global lock held
+ //
+ int status = GSL_SUCCESS;
+ gsl_pagetable_t *pagetable;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "--> int kgsl_mmu_attachcallback(gsl_mmu_t *mmu=0x%08x, unsigned int pid=0x%08x)\n", mmu, pid );
+
+ if (mmu->flags & GSL_FLAGS_INITIALIZED0)
+ {
+ // attach to current device mmu
+ mmu->refcnt++;
+
+ if (mmu->flags & GSL_FLAGS_STARTED)
+ {
+ // attach to pagetable object
+ pagetable = kgsl_mmu_createpagetableobject(mmu, pid);
+ if(pagetable)
+ {
+ pagetable->refcnt++;
+ }
+ else
+ {
+ status = GSL_FAILURE;
+ }
+ }
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_attachcallback. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_mmu_detachcallback(gsl_mmu_t *mmu, unsigned int pid)
+{
+ //
+ // detach process
+ //
+ int status = GSL_SUCCESS;
+ gsl_pagetable_t *pagetable;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "--> int kgsl_mmu_detachcallback(gsl_mmu_t *mmu=0x%08x, unsigned int pid=0x%08x)\n", mmu, pid );
+
+ if (mmu->flags & GSL_FLAGS_INITIALIZED0)
+ {
+ // detach from current device mmu
+ mmu->refcnt--;
+
+ if (mmu->flags & GSL_FLAGS_STARTED)
+ {
+ // detach from pagetable object
+ pagetable = kgsl_mmu_getpagetableobject(mmu, pid);
+ if(pagetable)
+ {
+ pagetable->refcnt--;
+ }
+ else
+ {
+ status = GSL_FAILURE;
+ }
+ }
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_mmu_detachcallback. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_mmu_querystats(gsl_mmu_t *mmu, gsl_mmustats_t *stats)
+{
+#ifdef GSL_STATS_MMU
+ int status = GSL_SUCCESS;
+
+ KOS_ASSERT(stats);
+
+ if (mmu->flags & GSL_FLAGS_STARTED)
+ {
+ kos_memcpy(stats, &mmu->stats, sizeof(gsl_mmustats_t));
+ }
+ else
+ {
+ kos_memset(stats, 0, sizeof(gsl_mmustats_t));
+ }
+
+ return (status);
+#else
+ // unreferenced formal parameters
+ (void) mmu;
+ (void) stats;
+
+ return (GSL_FAILURE_NOTSUPPORTED);
+#endif // GSL_STATS_MMU
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_mmu_bist(gsl_mmu_t *mmu)
+{
+ // unreferenced formal parameter
+ (void) mmu;
+
+ return (GSL_SUCCESS);
+}
diff --git a/drivers/mxc/amd-gpu/common/gsl_ringbuffer.c b/drivers/mxc/amd-gpu/common/gsl_ringbuffer.c
new file mode 100644
index 000000000000..c4b62b0174d2
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/gsl_ringbuffer.c
@@ -0,0 +1,1154 @@
+/* Copyright (c) 2002,2007-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "gsl.h"
+#include "gsl_hal.h"
+#include "gsl_cmdstream.h"
+
+#ifdef GSL_BLD_YAMATO
+
+//////////////////////////////////////////////////////////////////////////////
+// ucode
+//////////////////////////////////////////////////////////////////////////////
+#define uint32 unsigned int
+
+#include "pm4_microcode.inl"
+#include "pfp_microcode_nrt.inl"
+
+#undef uint32
+
+
+//////////////////////////////////////////////////////////////////////////////
+// defines
+//////////////////////////////////////////////////////////////////////////////
+#define GSL_RB_NOP_SIZEDWORDS 2 // default is 2
+#define GSL_RB_PROTECTED_MODE_CONTROL 0x00000000 // protected mode error checking below register address 0x800
+ // note: if CP_INTERRUPT packet is used then checking needs
+ // to change to below register address 0x7C8
+
+
+//////////////////////////////////////////////////////////////////////////////
+// ringbuffer size log2 quadwords equivalent
+//////////////////////////////////////////////////////////////////////////////
+OSINLINE unsigned int
+gsl_ringbuffer_sizelog2quadwords(unsigned int sizedwords)
+{
+ unsigned int sizelog2quadwords = 0;
+ int i = sizedwords >> 1;
+ while (i >>= 1)
+ {
+ sizelog2quadwords++;
+ }
+ return (sizelog2quadwords);
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// private prototypes
+//////////////////////////////////////////////////////////////////////////////
+#ifdef _DEBUG
+static void kgsl_ringbuffer_debug(gsl_ringbuffer_t *rb, gsl_rb_debug_t *rb_debug);
+#endif
+
+
+//////////////////////////////////////////////////////////////////////////////
+// functions
+//////////////////////////////////////////////////////////////////////////////
+
+void
+kgsl_cp_intrcallback(gsl_intrid_t id, void *cookie)
+{
+ gsl_ringbuffer_t *rb = (gsl_ringbuffer_t *) cookie;
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE,
+ "--> void kgsl_cp_intrcallback(gsl_intrid_t id=%I, void *cookie=0x%08x)\n", id, cookie );
+
+ switch(id)
+ {
+ // error condition interrupt
+ case GSL_INTR_YDX_CP_T0_PACKET_IN_IB:
+ case GSL_INTR_YDX_CP_OPCODE_ERROR:
+ case GSL_INTR_YDX_CP_PROTECTED_MODE_ERROR:
+ case GSL_INTR_YDX_CP_RESERVED_BIT_ERROR:
+ case GSL_INTR_YDX_CP_IB_ERROR:
+
+ rb->device->ftbl.device_destroy(rb->device);
+ break;
+
+ // non-error condition interrupt
+ case GSL_INTR_YDX_CP_SW_INT:
+ case GSL_INTR_YDX_CP_IB2_INT:
+ case GSL_INTR_YDX_CP_IB1_INT:
+ case GSL_INTR_YDX_CP_RING_BUFFER:
+
+ // signal intr completion event
+ kos_event_signal(rb->device->intr.evnt[id]);
+ break;
+
+ default:
+
+ break;
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_cp_intrcallback.\n" );
+}
+
+//----------------------------------------------------------------------------
+
+void
+kgsl_ringbuffer_watchdog()
+{
+ gsl_ringbuffer_t *rb = &(gsl_driver.device[GSL_DEVICE_YAMATO-1]).ringbuffer; // device_id is 1 based
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE,
+ "--> void kgsl_ringbuffer_watchdog()\n" );
+
+ if (rb->flags & GSL_FLAGS_STARTED)
+ {
+ GSL_RB_GET_READPTR(rb, &rb->rptr);
+
+ // ringbuffer is currently not empty
+ if (rb->rptr != rb->wptr)
+ {
+ // and a rptr sample was taken during interval n-1
+ if (rb->watchdog.flags & GSL_FLAGS_ACTIVE)
+ {
+ // and the rptr did not advance between interval n-1 and n
+ if (rb->rptr == rb->watchdog.rptr_sample)
+ {
+ // then the core has hung
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_FATAL,
+ "ERROR: Watchdog detected core hung.\n" );
+
+ rb->device->ftbl.device_destroy(rb->device);
+ return;
+ }
+ }
+
+ // save rptr sample for interval n
+ rb->watchdog.flags |= GSL_FLAGS_ACTIVE;
+ rb->watchdog.rptr_sample = rb->rptr;
+ }
+ else
+ {
+ // clear rptr sample for interval n
+ rb->watchdog.flags &= ~GSL_FLAGS_ACTIVE;
+ }
+
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_watchdog.\n" );
+}
+
+//----------------------------------------------------------------------------
+
+#ifdef _DEBUG
+
+OSINLINE void
+kgsl_ringbuffer_checkregister(unsigned int reg, int pmodecheck)
+{
+ if (pmodecheck)
+ {
+ // check for register protection mode violation
+ if (reg <= (GSL_RB_PROTECTED_MODE_CONTROL & 0x3FFF))
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Register protection mode violation.\n" );
+ KOS_ASSERT(0);
+ }
+ }
+
+ // range check register offset
+ if (reg > (gsl_driver.device[GSL_DEVICE_YAMATO-1].regspace.sizebytes >> 2))
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Register out of range.\n" );
+ KOS_ASSERT(0);
+ }
+}
+
+//----------------------------------------------------------------------------
+
+void
+kgsl_ringbuffer_checkpm4type0(unsigned int header, unsigned int** cmds, int pmodeoff)
+{
+ pm4_type0 pm4header = *((pm4_type0*) &header);
+ unsigned int reg;
+
+ if (pm4header.one_reg_wr)
+ {
+ reg = pm4header.base_index;
+ }
+ else
+ {
+ reg = pm4header.base_index + pm4header.count;
+ }
+
+ kgsl_ringbuffer_checkregister(reg, !pmodeoff);
+
+ *cmds += pm4header.count + 1;
+}
+
+//----------------------------------------------------------------------------
+
+void
+kgsl_ringbuffer_checkpm4type3(unsigned int header, unsigned int** cmds, int indirection, int pmodeoff)
+{
+ pm4_type3 pm4header = *((pm4_type3*) &header);
+ unsigned int *ordinal2 = *cmds;
+ unsigned int *ibcmds, *end;
+ unsigned int reg, length;
+
+ // check indirect buffer level
+ if (indirection > 2)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Only two levels of indirection supported.\n" );
+ KOS_ASSERT(0);
+ }
+
+ switch(pm4header.it_opcode)
+ {
+ case PM4_INDIRECT_BUFFER:
+ case PM4_INDIRECT_BUFFER_PFD:
+
+ // determine ib host base and end address
+ ibcmds = (unsigned int*) kgsl_sharedmem_convertaddr(*ordinal2, 0);
+ end = ibcmds + *(ordinal2 + 1);
+
+ // walk through the ib
+ while(ibcmds < end)
+ {
+ unsigned int tmpheader = *(ibcmds++);
+
+ switch(tmpheader & PM4_PKT_MASK)
+ {
+ case PM4_TYPE0_PKT:
+ kgsl_ringbuffer_checkpm4type0(tmpheader, &ibcmds, pmodeoff);
+ break;
+
+ case PM4_TYPE1_PKT:
+ case PM4_TYPE2_PKT:
+ break;
+
+ case PM4_TYPE3_PKT:
+ kgsl_ringbuffer_checkpm4type3(tmpheader, &ibcmds, (indirection + 1), pmodeoff);
+ break;
+ }
+ }
+ break;
+
+ case PM4_ME_INIT:
+
+ if(indirection != 0)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: ME INIT packet cannot reside in an ib.\n" );
+ KOS_ASSERT(0);
+ }
+ break;
+
+ case PM4_REG_RMW:
+
+ reg = (*ordinal2) & 0x1FFF;
+
+ kgsl_ringbuffer_checkregister(reg, !pmodeoff);
+
+ break;
+
+ case PM4_SET_CONSTANT:
+
+ if((((*ordinal2) >> 16) & 0xFF) == 0x4) // incremental register update
+ {
+ reg = 0x2000 + ((*ordinal2) & 0x3FF); // gfx decode space address starts at 0x2000
+ length = pm4header.count - 1;
+
+ kgsl_ringbuffer_checkregister(reg + length, 0);
+ }
+ break;
+
+ case PM4_LOAD_CONSTANT_CONTEXT:
+
+ if(((*(ordinal2 + 1) >> 16) & 0xFF) == 0x4) // incremental register update
+ {
+ reg = 0x2000 + (*(ordinal2 + 1) & 0x3FF); // gfx decode space address starts at 0x2000
+ length = *(ordinal2 + 2);
+
+ kgsl_ringbuffer_checkregister(reg + length, 0);
+ }
+ break;
+
+ case PM4_COND_WRITE:
+
+ if(((*ordinal2) & 0x00000100) == 0x0) // write to register
+ {
+ reg = *(ordinal2 + 4) & 0x3FFF;
+
+ kgsl_ringbuffer_checkregister(reg, !pmodeoff);
+ }
+ break;
+ }
+
+ *cmds += pm4header.count + 1;
+}
+
+//----------------------------------------------------------------------------
+
+void
+kgsl_ringbuffer_checkpm4(unsigned int* cmds, unsigned int sizedwords, int pmodeoff)
+{
+ unsigned int *ringcmds = cmds;
+ unsigned int *end = cmds + sizedwords;
+
+ while(ringcmds < end)
+ {
+ unsigned int header = *(ringcmds++);
+
+ switch(header & PM4_PKT_MASK)
+ {
+ case PM4_TYPE0_PKT:
+ kgsl_ringbuffer_checkpm4type0(header, &ringcmds, pmodeoff);
+ break;
+
+ case PM4_TYPE1_PKT:
+ case PM4_TYPE2_PKT:
+ break;
+
+ case PM4_TYPE3_PKT:
+ kgsl_ringbuffer_checkpm4type3(header, &ringcmds, 0, pmodeoff);
+ break;
+ }
+ }
+}
+
+#endif // _DEBUG
+
+//----------------------------------------------------------------------------
+
+static void
+kgsl_ringbuffer_submit(gsl_ringbuffer_t *rb)
+{
+ unsigned int value;
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE,
+ "--> static void kgsl_ringbuffer_submit(gsl_ringbuffer_t *rb=0x%08x)\n", rb );
+
+ KOS_ASSERT(rb->wptr != 0);
+
+ kgsl_device_active(rb->device);
+
+ GSL_RB_UPDATE_WPTR_POLLING(rb);
+
+ // send the wptr to the hw
+ rb->device->ftbl.device_regwrite(rb->device, mmCP_RB_WPTR, rb->wptr);
+
+ // force wptr register to be updated
+ do
+ {
+ rb->device->ftbl.device_regread(rb->device, mmCP_RB_WPTR, &value);
+ } while (value != rb->wptr);
+
+ rb->flags |= GSL_FLAGS_ACTIVE;
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_submit.\n" );
+}
+
+//----------------------------------------------------------------------------
+
+static int
+kgsl_ringbuffer_waitspace(gsl_ringbuffer_t *rb, unsigned int numcmds, int wptr_ahead)
+{
+ int nopcount;
+ unsigned int freecmds;
+ unsigned int *cmds;
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE,
+ "--> static int kgsl_ringbuffer_waitspace(gsl_ringbuffer_t *rb=0x%08x, unsigned int numcmds=%d, int wptr_ahead=%d)\n",
+ rb, numcmds, wptr_ahead );
+
+
+ // if wptr ahead, fill the remaining with NOPs
+ if (wptr_ahead)
+ {
+ nopcount = rb->sizedwords - rb->wptr - 1; // -1 for header
+
+ cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
+ GSL_RB_WRITE(cmds, pm4_nop_packet(nopcount));
+ rb->wptr++;
+
+ kgsl_ringbuffer_submit(rb);
+
+ rb->wptr = 0;
+
+ GSL_RB_STATS(rb->stats.wraps++);
+ }
+
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX, KGSL_DEBUG_DUMPX(BB_DUMP_RBWAIT, GSL_DEVICE_YAMATO, rb->wptr, numcmds, "kgsl_ringbuffer_waitspace"));
+
+ // wait for space in ringbuffer
+ for( ; ; )
+ {
+ GSL_RB_GET_READPTR(rb, &rb->rptr);
+
+ freecmds = rb->rptr - rb->wptr;
+
+ if ((freecmds == 0) || (freecmds > numcmds))
+ {
+ break;
+ }
+
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_waitspace. Return value %B\n", GSL_SUCCESS );
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+static unsigned int *
+kgsl_ringbuffer_addcmds(gsl_ringbuffer_t *rb, unsigned int numcmds)
+{
+ unsigned int *ptr;
+ int status = GSL_SUCCESS;
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE,
+ "--> static unsigned int* kgsl_ringbuffer_addcmds(gsl_ringbuffer_t *rb=0x%08x, unsigned int numcmds=%d)\n",
+ rb, numcmds );
+
+ KOS_ASSERT(numcmds < rb->sizedwords);
+
+ // update host copy of read pointer when running in safe mode
+ if (rb->device->flags & GSL_FLAGS_SAFEMODE)
+ {
+ GSL_RB_GET_READPTR(rb, &rb->rptr);
+ }
+
+ // check for available space
+ if (rb->wptr >= rb->rptr)
+ {
+ // wptr ahead or equal to rptr
+ if ((rb->wptr + numcmds) > (rb->sizedwords - GSL_RB_NOP_SIZEDWORDS)) // reserve dwords for nop packet
+ {
+ status = kgsl_ringbuffer_waitspace(rb, numcmds, 1);
+ }
+ }
+ else
+ {
+ // wptr behind rptr
+ if ((rb->wptr + numcmds) >= rb->rptr)
+ {
+ status = kgsl_ringbuffer_waitspace(rb, numcmds, 0);
+ }
+
+ // check for remaining space
+ if ((rb->wptr + numcmds) > (rb->sizedwords - GSL_RB_NOP_SIZEDWORDS)) // reserve dwords for nop packet
+ {
+ status = kgsl_ringbuffer_waitspace(rb, numcmds, 1);
+ }
+ }
+
+ ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
+ rb->wptr += numcmds;
+
+ if (status == GSL_SUCCESS)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_waitspace. Return value 0x%08x\n", ptr );
+ return (ptr);
+ }
+ else
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_waitspace. Return value 0x%08x\n", NULL );
+ return (NULL);
+ }
+}
+
+//----------------------------------------------------------------------------
+int
+kgsl_ringbuffer_start(gsl_ringbuffer_t *rb)
+{
+ int status;
+ cp_rb_cntl_u cp_rb_cntl;
+ int i;
+ unsigned int *cmds;
+ gsl_device_t *device = rb->device;
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE,
+ "--> static int kgsl_ringbuffer_start(gsl_ringbuffer_t *rb=0x%08x)\n", rb );
+
+ if (rb->flags & GSL_FLAGS_STARTED)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_start. Return value %B\n", GSL_SUCCESS );
+ return (GSL_SUCCESS);
+ }
+
+ // clear memptrs values
+ kgsl_sharedmem_set0(&rb->memptrs_desc, 0, 0, sizeof(gsl_rbmemptrs_t));
+
+ // clear ringbuffer
+ kgsl_sharedmem_set0(&rb->buffer_desc, 0, 0x12341234, (rb->sizedwords << 2));
+
+ // setup WPTR polling address
+ device->ftbl.device_regwrite(device, mmCP_RB_WPTR_BASE, (rb->memptrs_desc.gpuaddr + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
+
+ // setup WPTR delay
+ device->ftbl.device_regwrite(device, mmCP_RB_WPTR_DELAY, 0/*0x70000010*/);
+
+ // setup RB_CNTL
+ device->ftbl.device_regread(device, mmCP_RB_CNTL, (unsigned int *)&cp_rb_cntl);
+
+ cp_rb_cntl.f.rb_bufsz = gsl_ringbuffer_sizelog2quadwords(rb->sizedwords); // size of ringbuffer
+ cp_rb_cntl.f.rb_blksz = rb->blksizequadwords; // quadwords to read before updating mem RPTR
+ cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; // WPTR polling
+ cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE; // mem RPTR writebacks
+
+ device->ftbl.device_regwrite(device, mmCP_RB_CNTL, cp_rb_cntl.val);
+
+ // setup RB_BASE
+ device->ftbl.device_regwrite(device, mmCP_RB_BASE, rb->buffer_desc.gpuaddr);
+
+ // setup RPTR_ADDR
+ device->ftbl.device_regwrite(device, mmCP_RB_RPTR_ADDR, rb->memptrs_desc.gpuaddr + GSL_RB_MEMPTRS_RPTR_OFFSET);
+
+ // explicitly clear all cp interrupts when running in safe mode
+ if (rb->device->flags & GSL_FLAGS_SAFEMODE)
+ {
+ device->ftbl.device_regwrite(device, mmCP_INT_ACK, 0xFFFFFFFF);
+ }
+
+ // setup scratch/timestamp addr
+ device->ftbl.device_regwrite(device, mmSCRATCH_ADDR, device->memstore.gpuaddr + GSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));
+
+ // setup scratch/timestamp mask
+ device->ftbl.device_regwrite(device, mmSCRATCH_UMSK, GSL_RB_MEMPTRS_SCRATCH_MASK);
+
+ // load the CP ucode
+ device->ftbl.device_regwrite(device, mmCP_DEBUG, 0x02000000);
+ device->ftbl.device_regwrite(device, mmCP_ME_RAM_WADDR, 0);
+
+ for (i = 0; i < PM4_MICROCODE_SIZE; i++ )
+ {
+ device->ftbl.device_regwrite(device, mmCP_ME_RAM_DATA, aPM4_Microcode[i][0]);
+ device->ftbl.device_regwrite(device, mmCP_ME_RAM_DATA, aPM4_Microcode[i][1]);
+ device->ftbl.device_regwrite(device, mmCP_ME_RAM_DATA, aPM4_Microcode[i][2]);
+ }
+
+ // load the prefetch parser ucode
+ device->ftbl.device_regwrite(device, mmCP_PFP_UCODE_ADDR, 0);
+
+ for ( i = 0; i < PFP_MICROCODE_SIZE_NRT; i++ )
+ {
+ device->ftbl.device_regwrite(device, mmCP_PFP_UCODE_DATA, aPFP_Microcode_nrt[i]);
+ }
+
+ // queue thresholds ???
+ device->ftbl.device_regwrite(device, mmCP_QUEUE_THRESHOLDS, 0x000C0804);
+
+ // reset pointers
+ rb->rptr = 0;
+ rb->wptr = 0;
+
+ // init timestamp
+ rb->timestamp = 0;
+ GSL_RB_INIT_TIMESTAMP(rb);
+
+ // clear ME_HALT to start micro engine
+ device->ftbl.device_regwrite(device, mmCP_ME_CNTL, 0);
+
+ // ME_INIT
+ cmds = kgsl_ringbuffer_addcmds(rb, 19);
+
+ GSL_RB_WRITE(cmds, PM4_HDR_ME_INIT);
+ GSL_RB_WRITE(cmds, 0x000003ff); // All fields present (bits 9:0)
+ GSL_RB_WRITE(cmds, 0x00000000); // Disable/Enable Real-Time Stream processing (present but ignored)
+ GSL_RB_WRITE(cmds, 0x00000000); // Enable (2D to 3D) and (3D to 2D) implicit synchronization (present but ignored)
+ GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(mmRB_SURFACE_INFO));
+ GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(mmPA_SC_WINDOW_OFFSET));
+ GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(mmVGT_MAX_VTX_INDX));
+ GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(mmSQ_PROGRAM_CNTL));
+ GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(mmRB_DEPTHCONTROL));
+ GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(mmPA_SU_POINT_SIZE));
+ GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(mmPA_SC_LINE_CNTL));
+ GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(mmPA_SU_POLY_OFFSET_FRONT_SCALE));
+ GSL_RB_WRITE(cmds, 0x80000180); // Vertex and Pixel Shader Start Addresses in instructions (3 DWORDS per instruction)
+ GSL_RB_WRITE(cmds, 0x00000001); // Maximum Contexts
+ GSL_RB_WRITE(cmds, 0x00000000); // Write Confirm Interval and The CP will wait the wait_interval * 16 clocks between polling
+ GSL_RB_WRITE(cmds, 0x00000000); // NQ and External Memory Swap
+ GSL_RB_WRITE(cmds, GSL_RB_PROTECTED_MODE_CONTROL); // Protected mode error checking
+ GSL_RB_WRITE(cmds, 0x00000000); // Disable header dumping and Header dump address
+ GSL_RB_WRITE(cmds, 0x00000000); // Header dump size
+
+ KGSL_DEBUG(GSL_DBGFLAGS_PM4CHECK, kgsl_ringbuffer_checkpm4((unsigned int *)rb->buffer_desc.hostptr, 19, 1));
+ KGSL_DEBUG(GSL_DBGFLAGS_PM4, KGSL_DEBUG_DUMPPM4((unsigned int *)rb->buffer_desc.hostptr, 19));
+
+ kgsl_ringbuffer_submit(rb);
+
+ // idle device to validate ME INIT
+ status = device->ftbl.device_idle(device, GSL_TIMEOUT_DEFAULT);
+
+ if (status == GSL_SUCCESS)
+ {
+ rb->flags |= GSL_FLAGS_STARTED;
+ }
+
+ // enable cp interrupts
+ kgsl_intr_attach(&device->intr, GSL_INTR_YDX_CP_SW_INT, kgsl_cp_intrcallback, (void *) rb);
+ kgsl_intr_attach(&device->intr, GSL_INTR_YDX_CP_T0_PACKET_IN_IB, kgsl_cp_intrcallback, (void *) rb);
+ kgsl_intr_attach(&device->intr, GSL_INTR_YDX_CP_OPCODE_ERROR, kgsl_cp_intrcallback, (void *) rb);
+ kgsl_intr_attach(&device->intr, GSL_INTR_YDX_CP_PROTECTED_MODE_ERROR, kgsl_cp_intrcallback, (void *) rb);
+ kgsl_intr_attach(&device->intr, GSL_INTR_YDX_CP_RESERVED_BIT_ERROR, kgsl_cp_intrcallback, (void *) rb);
+ kgsl_intr_attach(&device->intr, GSL_INTR_YDX_CP_IB_ERROR, kgsl_cp_intrcallback, (void *) rb);
+ kgsl_intr_attach(&device->intr, GSL_INTR_YDX_CP_IB2_INT, kgsl_cp_intrcallback, (void *) rb);
+ kgsl_intr_attach(&device->intr, GSL_INTR_YDX_CP_IB1_INT, kgsl_cp_intrcallback, (void *) rb);
+ kgsl_intr_attach(&device->intr, GSL_INTR_YDX_CP_RING_BUFFER, kgsl_cp_intrcallback, (void *) rb);
+ kgsl_intr_enable(&device->intr, GSL_INTR_YDX_CP_SW_INT);
+ kgsl_intr_enable(&device->intr, GSL_INTR_YDX_CP_T0_PACKET_IN_IB);
+ kgsl_intr_enable(&device->intr, GSL_INTR_YDX_CP_OPCODE_ERROR);
+ kgsl_intr_enable(&device->intr, GSL_INTR_YDX_CP_PROTECTED_MODE_ERROR);
+ kgsl_intr_enable(&device->intr, GSL_INTR_YDX_CP_RESERVED_BIT_ERROR);
+ kgsl_intr_enable(&device->intr, GSL_INTR_YDX_CP_IB_ERROR);
+ kgsl_intr_enable(&device->intr, GSL_INTR_YDX_CP_IB2_INT);
+ kgsl_intr_enable(&device->intr, GSL_INTR_YDX_CP_IB1_INT);
+ kgsl_intr_enable(&device->intr, GSL_INTR_YDX_CP_RING_BUFFER);
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_start. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_ringbuffer_stop(gsl_ringbuffer_t *rb)
+{
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE,
+ "--> static int kgsl_ringbuffer_stop(gsl_ringbuffer_t *rb=0x%08x)\n", rb );
+
+ if (rb->flags & GSL_FLAGS_STARTED)
+ {
+ // disable cp interrupts
+ kgsl_intr_detach(&rb->device->intr, GSL_INTR_YDX_CP_SW_INT);
+ kgsl_intr_detach(&rb->device->intr, GSL_INTR_YDX_CP_T0_PACKET_IN_IB);
+ kgsl_intr_detach(&rb->device->intr, GSL_INTR_YDX_CP_OPCODE_ERROR);
+ kgsl_intr_detach(&rb->device->intr, GSL_INTR_YDX_CP_PROTECTED_MODE_ERROR);
+ kgsl_intr_detach(&rb->device->intr, GSL_INTR_YDX_CP_RESERVED_BIT_ERROR);
+ kgsl_intr_detach(&rb->device->intr, GSL_INTR_YDX_CP_IB_ERROR);
+ kgsl_intr_detach(&rb->device->intr, GSL_INTR_YDX_CP_IB2_INT);
+ kgsl_intr_detach(&rb->device->intr, GSL_INTR_YDX_CP_IB1_INT);
+ kgsl_intr_detach(&rb->device->intr, GSL_INTR_YDX_CP_RING_BUFFER);
+
+ // ME_HALT
+ rb->device->ftbl.device_regwrite(rb->device, mmCP_ME_CNTL, 0x10000000);
+
+ rb->flags &= ~GSL_FLAGS_STARTED;
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_stop. Return value %B\n", GSL_SUCCESS );
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_ringbuffer_init(gsl_device_t *device)
+{
+ int status;
+ gsl_flags_t flags;
+ gsl_ringbuffer_t *rb = &device->ringbuffer;
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_ringbuffer_init(gsl_device_t *device=0x%08x)\n", device );
+
+ rb->device = device;
+ rb->sizedwords = (2 << gsl_cfg_rb_sizelog2quadwords);
+ rb->blksizequadwords = gsl_cfg_rb_blksizequadwords;
+
+ // allocate memory for ringbuffer, needs to be double octword aligned
+ // align on page from contiguous physical memory
+ flags = (GSL_MEMFLAGS_ALIGNPAGE | GSL_MEMFLAGS_CONPHYS | GSL_MEMFLAGS_STRICTREQUEST);
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX, flags = (GSL_MEMFLAGS_ALIGNPAGE | GSL_MEMFLAGS_STRICTREQUEST)); /* set MMU table for ringbuffer */
+
+ status = kgsl_sharedmem_alloc0(device->id, flags, (rb->sizedwords << 2), &rb->buffer_desc);
+
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX, KGSL_DEBUG_DUMPX(BB_DUMP_RINGBUF_SET, (unsigned int)rb->buffer_desc.gpuaddr, (unsigned int)rb->buffer_desc.hostptr, 0, "kgsl_ringbuffer_init"));
+
+ if (status != GSL_SUCCESS)
+ {
+ kgsl_ringbuffer_close(rb);
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_init. Return value %B\n", status );
+ return (status);
+ }
+
+ // allocate memory for polling and timestamps
+ flags = (GSL_MEMFLAGS_ALIGN32 | GSL_MEMFLAGS_CONPHYS);
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX, flags = GSL_MEMFLAGS_ALIGN32);
+
+ status = kgsl_sharedmem_alloc0(device->id, flags, sizeof(gsl_rbmemptrs_t), &rb->memptrs_desc);
+
+ if (status != GSL_SUCCESS)
+ {
+ kgsl_ringbuffer_close(rb);
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_init. Return value %B\n", status );
+ return (status);
+ }
+
+ // overlay structure on memptrs memory
+ rb->memptrs = (gsl_rbmemptrs_t *)rb->memptrs_desc.hostptr;
+
+ rb->flags |= GSL_FLAGS_INITIALIZED;
+
+ // validate command stream data when running in safe mode
+ if (device->flags & GSL_FLAGS_SAFEMODE)
+ {
+ gsl_driver.flags_debug |= GSL_DBGFLAGS_PM4CHECK;
+ }
+
+ // start ringbuffer
+ status = kgsl_ringbuffer_start(rb);
+
+ if (status != GSL_SUCCESS)
+ {
+ kgsl_ringbuffer_close(rb);
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_init. Return value %B\n", status );
+ return (status);
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_init. Return value %B\n", GSL_SUCCESS );
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_ringbuffer_close(gsl_ringbuffer_t *rb)
+{
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_ringbuffer_close(gsl_ringbuffer_t *rb=0x%08x)\n", rb );
+
+ // stop ringbuffer
+ kgsl_ringbuffer_stop(rb);
+
+ // free buffer
+ if (rb->buffer_desc.hostptr)
+ {
+ kgsl_sharedmem_free0(&rb->buffer_desc, GSL_CALLER_PROCESSID_GET());
+ }
+
+ // free memory pointers
+ if (rb->memptrs_desc.hostptr)
+ {
+ kgsl_sharedmem_free0(&rb->memptrs_desc, GSL_CALLER_PROCESSID_GET());
+ }
+
+ rb->flags &= ~GSL_FLAGS_INITIALIZED;
+
+ kos_memset(rb, 0, sizeof(gsl_ringbuffer_t));
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_close. Return value %B\n", GSL_SUCCESS );
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+gsl_timestamp_t
+kgsl_ringbuffer_issuecmds(gsl_device_t *device, int pmodeoff, unsigned int *cmds, int sizedwords, unsigned int pid)
+{
+ gsl_ringbuffer_t *rb = &device->ringbuffer;
+ unsigned int pmodesizedwords;
+ unsigned int *ringcmds;
+ unsigned int timestamp;
+
+ pmodeoff = 0;
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE,
+ "--> gsl_timestamp_t kgsl_ringbuffer_issuecmds(gsl_device_t *device=0x%08x, int pmodeoff=%d, unsigned int *cmds=0x%08x, int sizedwords=%d, unsigned int pid=0x%08x)\n",
+ device, pmodeoff, cmds, sizedwords, pid );
+
+ if (!(device->ringbuffer.flags & GSL_FLAGS_STARTED))
+ {
+ return (0);
+ }
+
+ // set mmu pagetable
+ kgsl_mmu_setpagetable(device, pid);
+
+ KGSL_DEBUG(GSL_DBGFLAGS_PM4CHECK, kgsl_ringbuffer_checkpm4(cmds, sizedwords, pmodeoff));
+ KGSL_DEBUG(GSL_DBGFLAGS_PM4, KGSL_DEBUG_DUMPPM4(cmds, sizedwords));
+
+ // reserve space to temporarily turn off protected mode error checking if needed
+ pmodesizedwords = pmodeoff ? 8 : 0;
+
+#if defined GSL_RB_TIMESTAMP_INTERUPT
+ pmodesizedwords += 2;
+#endif
+ // allocate space in ringbuffer
+ ringcmds = kgsl_ringbuffer_addcmds(rb, pmodesizedwords + sizedwords + 6);
+
+ if (pmodeoff)
+ {
+ // disable protected mode error checking
+ *ringcmds++ = pm4_type3_packet(PM4_ME_INIT, 2);
+ *ringcmds++ = 0x00000080;
+ *ringcmds++ = 0x00000000;
+ }
+
+ // copy the cmds to the ringbuffer
+ kos_memcpy(ringcmds, cmds, (sizedwords << 2));
+
+ ringcmds += sizedwords;
+
+ if (pmodeoff)
+ {
+ *ringcmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *ringcmds++ = 0;
+
+ // re-enable protected mode error checking
+ *ringcmds++ = pm4_type3_packet(PM4_ME_INIT, 2);
+ *ringcmds++ = 0x00000080;
+ *ringcmds++ = GSL_RB_PROTECTED_MODE_CONTROL;
+ }
+
+ // increment timestamp
+ rb->timestamp++;
+ timestamp = rb->timestamp;
+
+ // start-of-pipeline and end-of-pipeline timestamps
+ *ringcmds++ = pm4_type0_packet(mmCP_TIMESTAMP, 1);
+ *ringcmds++ = rb->timestamp;
+ *ringcmds++ = pm4_type3_packet(PM4_EVENT_WRITE, 3);
+ *ringcmds++ = CACHE_FLUSH_TS;
+ *ringcmds++ = device->memstore.gpuaddr + GSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp);
+ *ringcmds++ = rb->timestamp;
+
+#if defined GSL_RB_TIMESTAMP_INTERUPT
+ *ringcmds++ = pm4_type3_packet(PM4_INTERRUPT, 1);
+ *ringcmds++ = 0x80000000;
+#endif
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX, KGSL_DEBUG_DUMPX(BB_DUMP_MEMWRITE, (unsigned int)((char*)ringcmds - ((pmodesizedwords + sizedwords + 6) << 2)), (unsigned int)((char*)ringcmds - ((pmodesizedwords + sizedwords + 6) << 2)), (pmodesizedwords + sizedwords + 6) << 2, "kgsl_ringbuffer_issuecmds"));
+
+ // issue the commands
+ kgsl_ringbuffer_submit(rb);
+
+ // stats
+ GSL_RB_STATS(rb->stats.wordstotal += sizedwords);
+ GSL_RB_STATS(rb->stats.issues++);
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_issuecmds. Return value %d\n", timestamp );
+
+ // return timestamp of issued commands
+ return (timestamp);
+}
+
+//----------------------------------------------------------------------------
+int
+kgsl_ringbuffer_issueibcmds(gsl_device_t *device, int drawctxt_index, gpuaddr_t ibaddr, int sizedwords, gsl_timestamp_t *timestamp, gsl_flags_t flags)
+{
+ unsigned int link[3];
+ int dumpx_swap;
+ (void)dumpx_swap; // used only when BB_DUMPX is defined
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE,
+ "--> gsl_timestamp_t kgsl_ringbuffer_issueibcmds(gsl_device_t device=%0x%08x, int drawctxt_index=%d, gpuaddr_t ibaddr=0x%08x, int sizedwords=%d, gsl_timestamp_t *timestamp=0x%08x)\n",
+ device, drawctxt_index, ibaddr, sizedwords, timestamp );
+
+ if (!(device->ringbuffer.flags & GSL_FLAGS_STARTED))
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_issueibcmds. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ KOS_ASSERT(ibaddr);
+ KOS_ASSERT(sizedwords);
+
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX, dumpx_swap = kgsl_dumpx_parse_ibs(ibaddr, sizedwords));
+
+ // context switch if needed
+ kgsl_drawctxt_switch(device, &device->drawctxt[drawctxt_index], flags);
+
+ link[0] = PM4_HDR_INDIRECT_BUFFER_PFD;
+ link[1] = ibaddr;
+ link[2] = sizedwords;
+
+ *timestamp = kgsl_ringbuffer_issuecmds(device, 0, &link[0], 3, GSL_CALLER_PROCESSID_GET());
+
+ // idle device when running in safe mode
+ if (device->flags & GSL_FLAGS_SAFEMODE)
+ {
+ device->ftbl.device_idle(device, GSL_TIMEOUT_DEFAULT);
+ }
+ else
+ {
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX,
+ {
+ // insert wait for idle after every IB1
+ // this is conservative but works reliably and is ok even for performance simulations
+ device->ftbl.device_idle(device, GSL_TIMEOUT_DEFAULT);
+ });
+ }
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX,
+ {
+ if(dumpx_swap)
+ {
+ KGSL_DEBUG_DUMPX( BB_DUMP_EXPORT_CBUF, 0, 0, 0, "resolve");
+ KGSL_DEBUG_DUMPX( BB_DUMP_FLUSH,0,0,0," ");
+ }
+ });
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_issueibcmds. Return value %B\n", GSL_SUCCESS );
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+#ifdef _DEBUG
+static void
+kgsl_ringbuffer_debug(gsl_ringbuffer_t *rb, gsl_rb_debug_t *rb_debug)
+{
+ kos_memset(rb_debug, 0, sizeof(gsl_rb_debug_t));
+
+ rb_debug->pm4_ucode_rel = PM4_MICROCODE_VERSION;
+ rb_debug->pfp_ucode_rel = PFP_MICROCODE_VERSION;
+
+ rb->device->ftbl.device_regread(rb->device, mmCP_RB_BASE, (unsigned int *)&rb_debug->cp_rb_base);
+ rb->device->ftbl.device_regread(rb->device, mmCP_RB_CNTL, (unsigned int *)&rb_debug->cp_rb_cntl);
+ rb->device->ftbl.device_regread(rb->device, mmCP_RB_RPTR_ADDR, (unsigned int *)&rb_debug->cp_rb_rptr_addr);
+ rb->device->ftbl.device_regread(rb->device, mmCP_RB_RPTR, (unsigned int *)&rb_debug->cp_rb_rptr);
+ rb->device->ftbl.device_regread(rb->device, mmCP_RB_WPTR, (unsigned int *)&rb_debug->cp_rb_wptr);
+ rb->device->ftbl.device_regread(rb->device, mmCP_RB_WPTR_BASE, (unsigned int *)&rb_debug->cp_rb_wptr_base);
+ rb->device->ftbl.device_regread(rb->device, mmSCRATCH_UMSK, (unsigned int *)&rb_debug->scratch_umsk);
+ rb->device->ftbl.device_regread(rb->device, mmSCRATCH_ADDR, (unsigned int *)&rb_debug->scratch_addr);
+ rb->device->ftbl.device_regread(rb->device, mmCP_ME_CNTL, (unsigned int *)&rb_debug->cp_me_cntl);
+ rb->device->ftbl.device_regread(rb->device, mmCP_ME_STATUS, (unsigned int *)&rb_debug->cp_me_status);
+ rb->device->ftbl.device_regread(rb->device, mmCP_DEBUG, (unsigned int *)&rb_debug->cp_debug);
+ rb->device->ftbl.device_regread(rb->device, mmCP_STAT, (unsigned int *)&rb_debug->cp_stat);
+ rb->device->ftbl.device_regread(rb->device, mmRBBM_STATUS, (unsigned int *)&rb_debug->rbbm_status);
+ rb_debug->sop_timestamp = kgsl_cmdstream_readtimestamp(rb->device->id, GSL_TIMESTAMP_CONSUMED);
+ rb_debug->eop_timestamp = kgsl_cmdstream_readtimestamp(rb->device->id, GSL_TIMESTAMP_RETIRED);
+}
+#endif
+
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_ringbuffer_querystats(gsl_ringbuffer_t *rb, gsl_rbstats_t *stats)
+{
+#ifdef GSL_STATS_RINGBUFFER
+ KOS_ASSERT(stats);
+
+ if (!(rb->flags & GSL_FLAGS_STARTED))
+ {
+ return (GSL_FAILURE);
+ }
+
+ kos_memcpy(stats, &rb->stats, sizeof(gsl_rbstats_t));
+
+ return (GSL_SUCCESS);
+#else
+ // unreferenced formal parameters
+ (void) rb;
+ (void) stats;
+
+ return (GSL_FAILURE_NOTSUPPORTED);
+#endif // GSL_STATS_RINGBUFFER
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_ringbuffer_bist(gsl_ringbuffer_t *rb)
+{
+ unsigned int *cmds;
+ unsigned int temp, k, j;
+ int status;
+ int i;
+#ifdef _DEBUG
+ gsl_rb_debug_t rb_debug;
+#endif
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_ringbuffer_bist(gsl_ringbuffer_t *rb=0x%08x)\n", rb );
+
+ if (!(rb->flags & GSL_FLAGS_STARTED))
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_bist. Return value %d\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ // simple nop submit
+ cmds = kgsl_ringbuffer_addcmds(rb, 2);
+ if (!cmds)
+ {
+#ifdef _DEBUG
+ kgsl_ringbuffer_debug(rb, &rb_debug);
+#endif
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_bist. Return value %d\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ GSL_RB_WRITE(cmds, pm4_nop_packet(1));
+ GSL_RB_WRITE(cmds, 0xDEADBEEF);
+
+ kgsl_ringbuffer_submit(rb);
+
+ status = rb->device->ftbl.device_idle(rb->device, GSL_TIMEOUT_DEFAULT);
+
+ if (status != GSL_SUCCESS)
+ {
+#ifdef _DEBUG
+ kgsl_ringbuffer_debug(rb, &rb_debug);
+#endif
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_bist. Return value %d\n", status );
+ return (status);
+ }
+
+ // simple scratch submit
+ cmds = kgsl_ringbuffer_addcmds(rb, 2);
+ if (!cmds)
+ {
+#ifdef _DEBUG
+ kgsl_ringbuffer_debug(rb, &rb_debug);
+#endif
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_bist. Return value %d\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ GSL_RB_WRITE(cmds, pm4_type0_packet(mmSCRATCH_REG7, 1));
+ GSL_RB_WRITE(cmds, 0xFEEDF00D);
+
+ kgsl_ringbuffer_submit(rb);
+
+ status = rb->device->ftbl.device_idle(rb->device, GSL_TIMEOUT_DEFAULT);
+
+ if (status != GSL_SUCCESS)
+ {
+#ifdef _DEBUG
+ kgsl_ringbuffer_debug(rb, &rb_debug);
+#endif
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_bist. Return value %d\n", status );
+ return (status);
+ }
+
+ rb->device->ftbl.device_regread(rb->device, mmSCRATCH_REG7, &temp);
+
+ if (temp != 0xFEEDF00D)
+ {
+#ifdef _DEBUG
+ kgsl_ringbuffer_debug(rb, &rb_debug);
+#endif
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_bist. Return value %d\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ // simple wraps
+ for (i = 0; i < 256; i+=2)
+ {
+ j = ((rb->sizedwords >> 2) - 256) + i;
+
+ cmds = kgsl_ringbuffer_addcmds(rb, j);
+ if (!cmds)
+ {
+#ifdef _DEBUG
+ kgsl_ringbuffer_debug(rb, &rb_debug);
+#endif
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_bist. Return value %d\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ k = 0;
+
+ while (k < j)
+ {
+ k+=2;
+ GSL_RB_WRITE(cmds, pm4_type0_packet(mmSCRATCH_REG7, 1));
+ GSL_RB_WRITE(cmds, k);
+ }
+
+ kgsl_ringbuffer_submit(rb);
+
+ status = rb->device->ftbl.device_idle(rb->device, GSL_TIMEOUT_DEFAULT);
+
+ if (status != GSL_SUCCESS)
+ {
+#ifdef _DEBUG
+ kgsl_ringbuffer_debug(rb, &rb_debug);
+#endif
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_bist. Return value %d\n", status );
+ return (status);
+ }
+
+ rb->device->ftbl.device_regread(rb->device, mmSCRATCH_REG7, &temp);
+
+ if (temp != k)
+ {
+#ifdef _DEBUG
+ kgsl_ringbuffer_debug(rb, &rb_debug);
+#endif
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_bist. Return value %d\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+ }
+
+ // max size submits, TODO do this at least with regreads
+ for (i = 0; i < 256; i++)
+ {
+ cmds = kgsl_ringbuffer_addcmds(rb, (rb->sizedwords >> 2));
+ if (!cmds)
+ {
+#ifdef _DEBUG
+ kgsl_ringbuffer_debug(rb, &rb_debug);
+#endif
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_bist. Return value %d\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ GSL_RB_WRITE(cmds, pm4_nop_packet((rb->sizedwords >> 2) - 1));
+
+ kgsl_ringbuffer_submit(rb);
+
+ status = rb->device->ftbl.device_idle(rb->device, GSL_TIMEOUT_DEFAULT);
+
+ if (status != GSL_SUCCESS)
+ {
+#ifdef _DEBUG
+ kgsl_ringbuffer_debug(rb, &rb_debug);
+#endif
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_bist. Return value %d\n", status );
+ return (status);
+ }
+ }
+
+ // submit load with randomness
+
+#ifdef GSL_RB_USE_MEM_TIMESTAMP
+ // scratch memptr validate
+#endif // GSL_RB_USE_MEM_TIMESTAMP
+
+#ifdef GSL_RB_USE_MEM_RPTR
+ // rptr memptr validate
+#endif // GSL_RB_USE_MEM_RPTR
+
+#ifdef GSL_RB_USE_WPTR_POLLING
+ // wptr memptr validate
+#endif // GSL_RB_USE_WPTR_POLLING
+
+ kgsl_log_write( KGSL_LOG_GROUP_COMMAND | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_ringbuffer_bist. Return value %d\n", GSL_SUCCESS );
+
+ return (GSL_SUCCESS);
+}
+
+#endif
+
diff --git a/drivers/mxc/amd-gpu/common/gsl_sharedmem.c b/drivers/mxc/amd-gpu/common/gsl_sharedmem.c
new file mode 100644
index 000000000000..51e66f97c52e
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/gsl_sharedmem.c
@@ -0,0 +1,937 @@
+/* Copyright (c) 2002,2007-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "gsl.h"
+#include "gsl_hal.h"
+
+/////////////////////////////////////////////////////////////////////////////
+// macros
+//////////////////////////////////////////////////////////////////////////////
+#define GSL_SHMEM_APERTURE_MARK(aperture_id) \
+ (shmem->priv |= (((aperture_id + 1) << GSL_APERTURE_SHIFT) & GSL_APERTURE_MASK))
+
+#define GSL_SHMEM_APERTURE_ISMARKED(aperture_id) \
+ (((shmem->priv & GSL_APERTURE_MASK) >> GSL_APERTURE_SHIFT) & (aperture_id + 1))
+
+#define GSL_MEMFLAGS_APERTURE_GET(flags, aperture_id) \
+ aperture_id = (gsl_apertureid_t)((flags & GSL_MEMFLAGS_APERTURE_MASK) >> GSL_MEMFLAGS_APERTURE_SHIFT); \
+ KOS_ASSERT(aperture_id < GSL_APERTURE_MAX);
+
+#define GSL_MEMFLAGS_CHANNEL_GET(flags, channel_id) \
+ channel_id = (gsl_channelid_t)((flags & GSL_MEMFLAGS_CHANNEL_MASK) >> GSL_MEMFLAGS_CHANNEL_SHIFT); \
+ KOS_ASSERT(channel_id < GSL_CHANNEL_MAX);
+
+#define GSL_MEMDESC_APERTURE_SET(memdesc, aperture_index) \
+ memdesc->priv = (memdesc->priv & ~GSL_APERTURE_MASK) | ((aperture_index << GSL_APERTURE_SHIFT) & GSL_APERTURE_MASK);
+
+#define GSL_MEMDESC_DEVICE_SET(memdesc, device_id) \
+ memdesc->priv = (memdesc->priv & ~GSL_DEVICEID_MASK) | ((device_id << GSL_DEVICEID_SHIFT) & GSL_DEVICEID_MASK);
+
+#define GSL_MEMDESC_EXTALLOC_SET(memdesc, flag) \
+ memdesc->priv = (memdesc->priv & ~GSL_EXTALLOC_MASK) | ((flag << GSL_EXTALLOC_SHIFT) & GSL_EXTALLOC_MASK);
+
+#define GSL_MEMDESC_APERTURE_GET(memdesc, aperture_index) \
+ KOS_ASSERT(memdesc); \
+ aperture_index = ((memdesc->priv & GSL_APERTURE_MASK) >> GSL_APERTURE_SHIFT); \
+ KOS_ASSERT(aperture_index < GSL_SHMEM_MAX_APERTURES);
+
+#define GSL_MEMDESC_DEVICE_GET(memdesc, device_id) \
+ KOS_ASSERT(memdesc); \
+ device_id = (gsl_deviceid_t)((memdesc->priv & GSL_DEVICEID_MASK) >> GSL_DEVICEID_SHIFT); \
+ KOS_ASSERT(device_id <= GSL_DEVICE_MAX);
+
+#define GSL_MEMDESC_EXTALLOC_ISMARKED(memdesc) \
+ ((memdesc->priv & GSL_EXTALLOC_MASK) >> GSL_EXTALLOC_SHIFT)
+
+
+//////////////////////////////////////////////////////////////////////////////
+// aperture index in shared memory object
+//////////////////////////////////////////////////////////////////////////////
+OSINLINE int
+kgsl_sharedmem_getapertureindex(gsl_sharedmem_t *shmem, gsl_apertureid_t aperture_id, gsl_channelid_t channel_id)
+{
+ KOS_ASSERT(shmem->aperturelookup[aperture_id][channel_id] < shmem->numapertures);
+
+ return (shmem->aperturelookup[aperture_id][channel_id]);
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// functions
+//////////////////////////////////////////////////////////////////////////////
+
+int
+kgsl_sharedmem_init(gsl_sharedmem_t *shmem)
+{
+ int i;
+ int status;
+ gsl_shmemconfig_t config;
+ int mmu_virtualized;
+ gsl_apertureid_t aperture_id;
+ gsl_channelid_t channel_id;
+ unsigned int hostbaseaddr;
+ gpuaddr_t gpubaseaddr;
+ int sizebytes;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "--> int kgsl_sharedmem_init(gsl_sharedmem_t *shmem=0x%08x)\n", shmem );
+
+ if (shmem->flags & GSL_FLAGS_INITIALIZED)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_init. Return value %B\n", GSL_SUCCESS );
+ return (GSL_SUCCESS);
+ }
+
+ status = kgsl_hal_getshmemconfig(&config);
+ if (status != GSL_SUCCESS)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Unable to get sharedmem config.\n" );
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_init. Return value %B\n", status );
+ return (status);
+ }
+
+ shmem->numapertures = config.numapertures;
+
+ for (i = 0; i < shmem->numapertures; i++)
+ {
+ aperture_id = config.apertures[i].id;
+ channel_id = config.apertures[i].channel;
+ hostbaseaddr = config.apertures[i].hostbase;
+ gpubaseaddr = config.apertures[i].gpubase;
+ sizebytes = config.apertures[i].sizebytes;
+ mmu_virtualized = 0;
+
+ // handle mmu virtualized aperture
+ if (aperture_id == GSL_APERTURE_MMU)
+ {
+ mmu_virtualized = 1;
+ aperture_id = GSL_APERTURE_EMEM;
+ }
+
+ // make sure aligned to page size
+ KOS_ASSERT((gpubaseaddr & ((1 << GSL_PAGESIZE_SHIFT) - 1)) == 0);
+
+ // make a multiple of page size
+ sizebytes = (sizebytes & ~((1 << GSL_PAGESIZE_SHIFT) - 1));
+
+ if (sizebytes > 0)
+ {
+ shmem->apertures[i].memarena = kgsl_memarena_create(aperture_id, mmu_virtualized, hostbaseaddr, gpubaseaddr, sizebytes);
+
+ if (!shmem->apertures[i].memarena)
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Unable to allocate memarena.\n" );
+ kgsl_sharedmem_close(shmem);
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_init. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ shmem->apertures[i].id = aperture_id;
+ shmem->apertures[i].channel = channel_id;
+ shmem->apertures[i].numbanks = 1;
+
+ // create aperture lookup table
+ if (GSL_SHMEM_APERTURE_ISMARKED(aperture_id))
+ {
+ // update "current aperture_id"/"current channel_id" index
+ shmem->aperturelookup[aperture_id][channel_id] = i;
+ }
+ else
+ {
+ // initialize "current aperture_id"/"channel_id" indexes
+ for (channel_id = GSL_CHANNEL_1; channel_id < GSL_CHANNEL_MAX; channel_id++)
+ {
+ shmem->aperturelookup[aperture_id][channel_id] = i;
+ }
+
+ GSL_SHMEM_APERTURE_MARK(aperture_id);
+ }
+ }
+ }
+
+ shmem->flags |= GSL_FLAGS_INITIALIZED;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_init. Return value %B\n", GSL_SUCCESS );
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_sharedmem_close(gsl_sharedmem_t *shmem)
+{
+ int i;
+ int result = GSL_SUCCESS;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "--> int kgsl_sharedmem_close(gsl_sharedmem_t *shmem=0x%08x)\n", shmem );
+
+ if (shmem->flags & GSL_FLAGS_INITIALIZED)
+ {
+ for (i = 0; i < shmem->numapertures; i++)
+ {
+ if (shmem->apertures[i].memarena)
+ {
+ result = kgsl_memarena_destroy(shmem->apertures[i].memarena);
+ }
+ }
+
+ kos_memset(shmem, 0, sizeof(gsl_sharedmem_t));
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_close. Return value %B\n", result );
+
+ return (result);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_sharedmem_alloc0(gsl_deviceid_t device_id, gsl_flags_t flags, int sizebytes, gsl_memdesc_t *memdesc)
+{
+ gsl_apertureid_t aperture_id;
+ gsl_channelid_t channel_id;
+ gsl_deviceid_t tmp_id;
+ int aperture_index, org_index;
+ int result = GSL_FAILURE;
+ gsl_mmu_t *mmu = NULL;
+ gsl_sharedmem_t *shmem = &gsl_driver.shmem;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_sharedmem_alloc(gsl_deviceid_t device_id=%D, gsl_flags_t flags=0x%08x, int sizebytes=%d, gsl_memdesc_t *memdesc=%M)\n",
+ device_id, flags, sizebytes, memdesc );
+
+ KOS_ASSERT(sizebytes);
+ KOS_ASSERT(memdesc);
+
+ GSL_MEMFLAGS_APERTURE_GET(flags, aperture_id);
+ GSL_MEMFLAGS_CHANNEL_GET(flags, channel_id);
+
+ kos_memset(memdesc, 0, sizeof(gsl_memdesc_t));
+
+ if (!(shmem->flags & GSL_FLAGS_INITIALIZED))
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Shared memory not initialized.\n" );
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_alloc. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ // execute pending device action
+ tmp_id = (device_id != GSL_DEVICE_ANY) ? device_id : device_id+1;
+ for ( ; tmp_id <= GSL_DEVICE_MAX; tmp_id++)
+ {
+ if (gsl_driver.device[tmp_id-1].flags & GSL_FLAGS_INITIALIZED)
+ {
+ kgsl_device_runpending(&gsl_driver.device[tmp_id-1]);
+
+ if (tmp_id == device_id)
+ {
+ break;
+ }
+ }
+ }
+
+ // convert any device to an actual existing device
+ if (device_id == GSL_DEVICE_ANY)
+ {
+ for ( ; ; )
+ {
+ device_id++;
+
+ if (device_id <= GSL_DEVICE_MAX)
+ {
+ if (gsl_driver.device[device_id-1].flags & GSL_FLAGS_INITIALIZED)
+ {
+ break;
+ }
+ }
+ else
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Invalid device.\n" );
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_alloc. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+ }
+ }
+
+ KOS_ASSERT(device_id > GSL_DEVICE_ANY && device_id <= GSL_DEVICE_MAX);
+
+ // get mmu reference
+ mmu = &gsl_driver.device[device_id-1].mmu;
+
+ aperture_index = kgsl_sharedmem_getapertureindex(shmem, aperture_id, channel_id);
+
+ //do not proceed if it is a strict request, the aperture requested is not present, and the MMU is enabled
+ if (!((flags & GSL_MEMFLAGS_STRICTREQUEST) && aperture_id != shmem->apertures[aperture_index].id && kgsl_mmu_isenabled(mmu)))
+ {
+ // do allocation
+ result = kgsl_memarena_alloc(shmem->apertures[aperture_index].memarena, flags, sizebytes, memdesc);
+
+ // if allocation failed
+ if (result != GSL_SUCCESS)
+ {
+ org_index = aperture_index;
+
+ // then failover to other channels within the current aperture
+ for (channel_id = GSL_CHANNEL_1; channel_id < GSL_CHANNEL_MAX; channel_id++)
+ {
+ aperture_index = kgsl_sharedmem_getapertureindex(shmem, aperture_id, channel_id);
+
+ if (aperture_index != org_index)
+ {
+ // do allocation
+ result = kgsl_memarena_alloc(shmem->apertures[aperture_index].memarena, flags, sizebytes, memdesc);
+
+ if (result == GSL_SUCCESS)
+ {
+ break;
+ }
+ }
+ }
+
+ // if allocation still has not succeeded, then failover to EMEM/MMU aperture, but
+ // not if it's a strict request and the MMU is enabled
+ if (result != GSL_SUCCESS && aperture_id != GSL_APERTURE_EMEM
+ && !((flags & GSL_MEMFLAGS_STRICTREQUEST) && kgsl_mmu_isenabled(mmu)))
+ {
+ aperture_id = GSL_APERTURE_EMEM;
+
+ // try every channel
+ for (channel_id = GSL_CHANNEL_1; channel_id < GSL_CHANNEL_MAX; channel_id++)
+ {
+ aperture_index = kgsl_sharedmem_getapertureindex(shmem, aperture_id, channel_id);
+
+ if (aperture_index != org_index)
+ {
+ // do allocation
+ result = kgsl_memarena_alloc(shmem->apertures[aperture_index].memarena, flags, sizebytes, memdesc);
+
+ if (result == GSL_SUCCESS)
+ {
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (result == GSL_SUCCESS)
+ {
+ GSL_MEMDESC_APERTURE_SET(memdesc, aperture_index);
+ GSL_MEMDESC_DEVICE_SET(memdesc, device_id);
+
+ if (kgsl_memarena_isvirtualized(shmem->apertures[aperture_index].memarena))
+ {
+ gsl_scatterlist_t scatterlist;
+
+ scatterlist.contiguous = 0;
+ scatterlist.num = memdesc->size / GSL_PAGESIZE;
+
+ if (memdesc->size & (GSL_PAGESIZE-1))
+ {
+ scatterlist.num++;
+ }
+
+ scatterlist.pages = kos_malloc(sizeof(unsigned int) * scatterlist.num);
+ if (scatterlist.pages)
+ {
+ // allocate physical pages
+ result = kgsl_hal_allocphysical(memdesc->gpuaddr, scatterlist.num, scatterlist.pages);
+ if (result == GSL_SUCCESS)
+ {
+ result = kgsl_mmu_map(mmu, memdesc->gpuaddr, &scatterlist, flags, GSL_CALLER_PROCESSID_GET());
+ if (result != GSL_SUCCESS)
+ {
+ kgsl_hal_freephysical(memdesc->gpuaddr, scatterlist.num, scatterlist.pages);
+ }
+ }
+
+ kos_free(scatterlist.pages);
+ }
+ else
+ {
+ result = GSL_FAILURE;
+ }
+
+ if (result != GSL_SUCCESS)
+ {
+ kgsl_memarena_free(shmem->apertures[aperture_index].memarena, memdesc);
+ }
+ }
+ }
+
+ KGSL_DEBUG_TBDUMP_SETMEM( memdesc->gpuaddr, 0, memdesc->size );
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_alloc. Return value %B\n", result );
+
+ return (result);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_sharedmem_alloc(gsl_deviceid_t device_id, gsl_flags_t flags, int sizebytes, gsl_memdesc_t *memdesc)
+{
+ int status = GSL_SUCCESS;
+ GSL_API_MUTEX_LOCK();
+ status = kgsl_sharedmem_alloc0(device_id, flags, sizebytes, memdesc);
+ GSL_API_MUTEX_UNLOCK();
+ return status;
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_sharedmem_free0(gsl_memdesc_t *memdesc, unsigned int pid)
+{
+ int status = GSL_SUCCESS;
+ int aperture_index;
+ gsl_deviceid_t device_id;
+ gsl_sharedmem_t *shmem;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "--> int kgsl_sharedmem_free(gsl_memdesc_t *memdesc=%M)\n", memdesc );
+
+ GSL_MEMDESC_APERTURE_GET(memdesc, aperture_index);
+ GSL_MEMDESC_DEVICE_GET(memdesc, device_id);
+
+ shmem = &gsl_driver.shmem;
+
+ if (shmem->flags & GSL_FLAGS_INITIALIZED)
+ {
+ if (kgsl_memarena_isvirtualized(shmem->apertures[aperture_index].memarena))
+ {
+ status |= kgsl_mmu_unmap(&gsl_driver.device[device_id-1].mmu, memdesc->gpuaddr, memdesc->size, pid);
+
+ if (!GSL_MEMDESC_EXTALLOC_ISMARKED(memdesc))
+ {
+ status |= kgsl_hal_freephysical(memdesc->gpuaddr, memdesc->size / GSL_PAGESIZE, NULL);
+ }
+ }
+
+ kgsl_memarena_free(shmem->apertures[aperture_index].memarena, memdesc);
+
+ // clear descriptor
+ kos_memset(memdesc, 0, sizeof(gsl_memdesc_t));
+ }
+ else
+ {
+ status = GSL_FAILURE;
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_free. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_sharedmem_free(gsl_memdesc_t *memdesc)
+{
+ int status = GSL_SUCCESS;
+ GSL_API_MUTEX_LOCK();
+ status = kgsl_sharedmem_free0(memdesc, GSL_CALLER_PROCESSID_GET());
+ GSL_API_MUTEX_UNLOCK();
+ return status;
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_sharedmem_read0(const gsl_memdesc_t *memdesc, void *dst, unsigned int offsetbytes, unsigned int sizebytes, unsigned int touserspace)
+{
+ int aperture_index;
+ gsl_sharedmem_t *shmem;
+ unsigned int gpuoffsetbytes;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_sharedmem_read(gsl_memdesc_t *memdesc=%M, void *dst=0x%08x, unsigned int offsetbytes=%d, unsigned int sizebytes=%d)\n",
+ memdesc, dst, offsetbytes, sizebytes );
+
+ GSL_MEMDESC_APERTURE_GET(memdesc, aperture_index);
+
+ if (GSL_MEMDESC_EXTALLOC_ISMARKED(memdesc))
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_read. Return value %B\n", GSL_FAILURE_BADPARAM );
+ return (GSL_FAILURE_BADPARAM);
+ }
+
+ shmem = &gsl_driver.shmem;
+
+ if (!(shmem->flags & GSL_FLAGS_INITIALIZED))
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Shared memory not initialized.\n" );
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_read. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ KOS_ASSERT(dst);
+ KOS_ASSERT(sizebytes);
+
+ if (memdesc->gpuaddr < shmem->apertures[aperture_index].memarena->gpubaseaddr)
+ {
+ return (GSL_FAILURE_BADPARAM);
+ }
+
+ if (memdesc->gpuaddr + sizebytes > shmem->apertures[aperture_index].memarena->gpubaseaddr + shmem->apertures[aperture_index].memarena->sizebytes)
+ {
+ return (GSL_FAILURE_BADPARAM);
+ }
+
+ gpuoffsetbytes = (memdesc->gpuaddr - shmem->apertures[aperture_index].memarena->gpubaseaddr) + offsetbytes;
+
+ GSL_HAL_MEM_READ(dst, shmem->apertures[aperture_index].memarena->hostbaseaddr, gpuoffsetbytes, sizebytes, touserspace);
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_read. Return value %B\n", GSL_SUCCESS );
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_sharedmem_read(const gsl_memdesc_t *memdesc, void *dst, unsigned int offsetbytes, unsigned int sizebytes, unsigned int touserspace)
+{
+ int status = GSL_SUCCESS;
+ GSL_API_MUTEX_LOCK();
+ status = kgsl_sharedmem_read0(memdesc, dst, offsetbytes, sizebytes, touserspace);
+ GSL_API_MUTEX_UNLOCK();
+ return status;
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_sharedmem_write0(const gsl_memdesc_t *memdesc, unsigned int offsetbytes, void *src, unsigned int sizebytes, unsigned int fromuserspace)
+{
+ int aperture_index;
+ gsl_sharedmem_t *shmem;
+ unsigned int gpuoffsetbytes;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_sharedmem_write(gsl_memdesc_t *memdesc=%M, unsigned int offsetbytes=%d, void *src=0x%08x, unsigned int sizebytes=%d)\n",
+ memdesc, offsetbytes, src, sizebytes );
+
+ GSL_MEMDESC_APERTURE_GET(memdesc, aperture_index);
+
+ if (GSL_MEMDESC_EXTALLOC_ISMARKED(memdesc))
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_write. Return value %B\n", GSL_FAILURE_BADPARAM );
+ return (GSL_FAILURE_BADPARAM);
+ }
+
+ shmem = &gsl_driver.shmem;
+
+ if (!(shmem->flags & GSL_FLAGS_INITIALIZED))
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Shared memory not initialized.\n" );
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_write. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ KOS_ASSERT(src);
+ KOS_ASSERT(sizebytes);
+ KOS_ASSERT(memdesc->gpuaddr >= shmem->apertures[aperture_index].memarena->gpubaseaddr);
+ KOS_ASSERT((memdesc->gpuaddr + sizebytes) <= (shmem->apertures[aperture_index].memarena->gpubaseaddr + shmem->apertures[aperture_index].memarena->sizebytes));
+
+ gpuoffsetbytes = (memdesc->gpuaddr - shmem->apertures[aperture_index].memarena->gpubaseaddr) + offsetbytes;
+
+ GSL_HAL_MEM_WRITE(shmem->apertures[aperture_index].memarena->hostbaseaddr, gpuoffsetbytes, src, sizebytes, fromuserspace);
+
+ KGSL_DEBUG(GSL_DBGFLAGS_PM4MEM, KGSL_DEBUG_DUMPMEMWRITE((memdesc->gpuaddr + offsetbytes), sizebytes, src));
+
+ KGSL_DEBUG_TBDUMP_SYNCMEM( (memdesc->gpuaddr + offsetbytes), src, sizebytes );
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_write. Return value %B\n", GSL_SUCCESS );
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_sharedmem_write(const gsl_memdesc_t *memdesc, unsigned int offsetbytes, void *src, unsigned int sizebytes, unsigned int fromuserspace)
+{
+ int status = GSL_SUCCESS;
+ GSL_API_MUTEX_LOCK();
+ status = kgsl_sharedmem_write0(memdesc, offsetbytes, src, sizebytes, fromuserspace);
+ GSL_API_MUTEX_UNLOCK();
+ return status;
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_sharedmem_set0(const gsl_memdesc_t *memdesc, unsigned int offsetbytes, unsigned int value, unsigned int sizebytes)
+{
+ int aperture_index;
+ gsl_sharedmem_t *shmem;
+ unsigned int gpuoffsetbytes;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_sharedmem_set(gsl_memdesc_t *memdesc=%M, unsigned int offsetbytes=%d, unsigned int value=0x%08x, unsigned int sizebytes=%d)\n",
+ memdesc, offsetbytes, value, sizebytes );
+
+ GSL_MEMDESC_APERTURE_GET(memdesc, aperture_index);
+
+ if (GSL_MEMDESC_EXTALLOC_ISMARKED(memdesc))
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_set. Return value %B\n", GSL_FAILURE_BADPARAM );
+ return (GSL_FAILURE_BADPARAM);
+ }
+
+ shmem = &gsl_driver.shmem;
+
+ if (!(shmem->flags & GSL_FLAGS_INITIALIZED))
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Shared memory not initialized.\n" );
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_set. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+
+ KOS_ASSERT(sizebytes);
+ KOS_ASSERT(memdesc->gpuaddr >= shmem->apertures[aperture_index].memarena->gpubaseaddr);
+ KOS_ASSERT((memdesc->gpuaddr + sizebytes) <= (shmem->apertures[aperture_index].memarena->gpubaseaddr + shmem->apertures[aperture_index].memarena->sizebytes));
+
+ gpuoffsetbytes = (memdesc->gpuaddr - shmem->apertures[aperture_index].memarena->gpubaseaddr) + offsetbytes;
+
+ GSL_HAL_MEM_SET(shmem->apertures[aperture_index].memarena->hostbaseaddr, gpuoffsetbytes, value, sizebytes);
+
+ KGSL_DEBUG(GSL_DBGFLAGS_PM4MEM, KGSL_DEBUG_DUMPMEMSET((memdesc->gpuaddr + offsetbytes), sizebytes, value));
+
+ KGSL_DEBUG_TBDUMP_SETMEM( (memdesc->gpuaddr + offsetbytes), value, sizebytes );
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_set. Return value %B\n", GSL_SUCCESS );
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_sharedmem_set(const gsl_memdesc_t *memdesc, unsigned int offsetbytes, unsigned int value, unsigned int sizebytes)
+{
+ int status = GSL_SUCCESS;
+ GSL_API_MUTEX_LOCK();
+ status = kgsl_sharedmem_set0(memdesc, offsetbytes, value, sizebytes);
+ GSL_API_MUTEX_UNLOCK();
+ return status;
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API unsigned int
+kgsl_sharedmem_largestfreeblock(gsl_deviceid_t device_id, gsl_flags_t flags)
+{
+ gsl_apertureid_t aperture_id;
+ gsl_channelid_t channel_id;
+ int aperture_index;
+ unsigned int result = 0;
+ gsl_sharedmem_t *shmem;
+
+ // device_id is ignored at this level, it would be used with per-device memarena's
+
+ // unreferenced formal parameter
+ (void) device_id;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_sharedmem_largestfreeblock(gsl_deviceid_t device_id=%D, gsl_flags_t flags=0x%08x)\n",
+ device_id, flags );
+
+ GSL_MEMFLAGS_APERTURE_GET(flags, aperture_id);
+ GSL_MEMFLAGS_CHANNEL_GET(flags, channel_id);
+
+ GSL_API_MUTEX_LOCK();
+
+ shmem = &gsl_driver.shmem;
+
+ if (!(shmem->flags & GSL_FLAGS_INITIALIZED))
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Shared memory not initialized.\n" );
+ GSL_API_MUTEX_UNLOCK();
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_largestfreeblock. Return value %d\n", 0 );
+ return (0);
+ }
+
+ aperture_index = kgsl_sharedmem_getapertureindex(shmem, aperture_id, channel_id);
+
+ if (aperture_id == shmem->apertures[aperture_index].id)
+ {
+ result = kgsl_memarena_getlargestfreeblock(shmem->apertures[aperture_index].memarena, flags);
+ }
+
+ GSL_API_MUTEX_UNLOCK();
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_largestfreeblock. Return value %d\n", result );
+
+ return (result);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_sharedmem_map(gsl_deviceid_t device_id, gsl_flags_t flags, const gsl_scatterlist_t *scatterlist, gsl_memdesc_t *memdesc)
+{
+ int status = GSL_FAILURE;
+ gsl_sharedmem_t *shmem = &gsl_driver.shmem;
+ int aperture_index;
+ gsl_deviceid_t tmp_id;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_sharedmem_map(gsl_deviceid_t device_id=%D, gsl_flags_t flags=0x%08x, gsl_scatterlist_t scatterlist=%M, gsl_memdesc_t *memdesc=%M)\n",
+ device_id, flags, memdesc, scatterlist );
+
+ // execute pending device action
+ tmp_id = (device_id != GSL_DEVICE_ANY) ? device_id : device_id+1;
+ for ( ; tmp_id <= GSL_DEVICE_MAX; tmp_id++)
+ {
+ if (gsl_driver.device[tmp_id-1].flags & GSL_FLAGS_INITIALIZED)
+ {
+ kgsl_device_runpending(&gsl_driver.device[tmp_id-1]);
+
+ if (tmp_id == device_id)
+ {
+ break;
+ }
+ }
+ }
+
+ // convert any device to an actual existing device
+ if (device_id == GSL_DEVICE_ANY)
+ {
+ for ( ; ; )
+ {
+ device_id++;
+
+ if (device_id <= GSL_DEVICE_MAX)
+ {
+ if (gsl_driver.device[device_id-1].flags & GSL_FLAGS_INITIALIZED)
+ {
+ break;
+ }
+ }
+ else
+ {
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Invalid device.\n" );
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_map. Return value %B\n", GSL_FAILURE );
+ return (GSL_FAILURE);
+ }
+ }
+ }
+
+ KOS_ASSERT(device_id > GSL_DEVICE_ANY && device_id <= GSL_DEVICE_MAX);
+
+ if (shmem->flags & GSL_FLAGS_INITIALIZED)
+ {
+ aperture_index = kgsl_sharedmem_getapertureindex(shmem, GSL_APERTURE_EMEM, GSL_CHANNEL_1);
+
+ if (kgsl_memarena_isvirtualized(shmem->apertures[aperture_index].memarena))
+ {
+ KOS_ASSERT(scatterlist->num);
+ KOS_ASSERT(scatterlist->pages);
+
+ status = kgsl_memarena_alloc(shmem->apertures[aperture_index].memarena, flags, scatterlist->num *GSL_PAGESIZE, memdesc);
+ if (status == GSL_SUCCESS)
+ {
+ GSL_MEMDESC_APERTURE_SET(memdesc, aperture_index);
+ GSL_MEMDESC_DEVICE_SET(memdesc, device_id);
+
+ // mark descriptor's memory as externally allocated -- i.e. outside GSL
+ GSL_MEMDESC_EXTALLOC_SET(memdesc, 1);
+
+ status = kgsl_mmu_map(&gsl_driver.device[device_id-1].mmu, memdesc->gpuaddr, scatterlist, flags, GSL_CALLER_PROCESSID_GET());
+ if (status != GSL_SUCCESS)
+ {
+ kgsl_memarena_free(shmem->apertures[aperture_index].memarena, memdesc);
+ }
+ }
+ }
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_map. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_sharedmem_unmap(gsl_memdesc_t *memdesc)
+{
+ return (kgsl_sharedmem_free0(memdesc, GSL_CALLER_PROCESSID_GET()));
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_sharedmem_getmap(const gsl_memdesc_t *memdesc, gsl_scatterlist_t *scatterlist)
+{
+ int status = GSL_SUCCESS;
+ int aperture_index;
+ gsl_deviceid_t device_id;
+ gsl_sharedmem_t *shmem;
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
+ "--> int kgsl_sharedmem_getmap(gsl_memdesc_t *memdesc=%M, gsl_scatterlist_t scatterlist=%M)\n",
+ memdesc, scatterlist );
+
+ GSL_MEMDESC_APERTURE_GET(memdesc, aperture_index);
+ GSL_MEMDESC_DEVICE_GET(memdesc, device_id);
+
+ shmem = &gsl_driver.shmem;
+
+ if (shmem->flags & GSL_FLAGS_INITIALIZED)
+ {
+ KOS_ASSERT(scatterlist->num);
+ KOS_ASSERT(scatterlist->pages);
+ KOS_ASSERT(memdesc->gpuaddr >= shmem->apertures[aperture_index].memarena->gpubaseaddr);
+ KOS_ASSERT((memdesc->gpuaddr + memdesc->size) <= (shmem->apertures[aperture_index].memarena->gpubaseaddr + shmem->apertures[aperture_index].memarena->sizebytes));
+
+ kos_memset(scatterlist->pages, 0, sizeof(unsigned int) * scatterlist->num);
+
+ if (kgsl_memarena_isvirtualized(shmem->apertures[aperture_index].memarena))
+ {
+ status = kgsl_mmu_getmap(&gsl_driver.device[device_id-1].mmu, memdesc->gpuaddr, memdesc->size, scatterlist, GSL_CALLER_PROCESSID_GET());
+ }
+ else
+ {
+ // coalesce physically contiguous pages into a single scatter list entry
+ scatterlist->pages[0] = memdesc->gpuaddr;
+ scatterlist->contiguous = 1;
+ }
+ }
+
+ kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_getmap. Return value %B\n", status );
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_sharedmem_querystats(gsl_sharedmem_t *shmem, gsl_sharedmem_stats_t *stats)
+{
+#ifdef GSL_STATS_MEM
+ int status = GSL_SUCCESS;
+ int i;
+
+ KOS_ASSERT(stats);
+
+ if (shmem->flags & GSL_FLAGS_INITIALIZED)
+ {
+ for (i = 0; i < shmem->numapertures; i++)
+ {
+ if (shmem->apertures[i].memarena)
+ {
+ stats->apertures[i].id = shmem->apertures[i].id;
+ stats->apertures[i].channel = shmem->apertures[i].channel;
+
+ status |= kgsl_memarena_querystats(shmem->apertures[i].memarena, &stats->apertures[i].memarena);
+ }
+ }
+ }
+ else
+ {
+ kos_memset(stats, 0, sizeof(gsl_sharedmem_stats_t));
+ }
+
+ return (status);
+#else
+ // unreferenced formal parameters
+ (void) shmem;
+ (void) stats;
+
+ return (GSL_FAILURE_NOTSUPPORTED);
+#endif // GSL_STATS_MEM
+}
+
+//----------------------------------------------------------------------------
+
+unsigned int
+kgsl_sharedmem_convertaddr(unsigned int addr, int type)
+{
+ gsl_sharedmem_t *shmem = &gsl_driver.shmem;
+ unsigned int cvtaddr = 0;
+ unsigned int gpubaseaddr, hostbaseaddr, sizebytes;
+ int i;
+
+ if ((shmem->flags & GSL_FLAGS_INITIALIZED))
+ {
+ for (i = 0; i < shmem->numapertures; i++)
+ {
+ hostbaseaddr = shmem->apertures[i].memarena->hostbaseaddr;
+ gpubaseaddr = shmem->apertures[i].memarena->gpubaseaddr;
+ sizebytes = shmem->apertures[i].memarena->sizebytes;
+
+ // convert from gpu to host
+ if (type == 0)
+ {
+ if (addr >= gpubaseaddr && addr < (gpubaseaddr + sizebytes))
+ {
+ cvtaddr = hostbaseaddr + (addr - gpubaseaddr);
+ break;
+ }
+ }
+ // convert from host to gpu
+ else if (type == 1)
+ {
+ if (addr >= hostbaseaddr && addr < (hostbaseaddr + sizebytes))
+ {
+ cvtaddr = gpubaseaddr + (addr - hostbaseaddr);
+ break;
+ }
+ }
+ }
+ }
+
+ return (cvtaddr);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_sharedmem_cacheoperation(const gsl_memdesc_t *memdesc, unsigned int offsetbytes, unsigned int sizebytes, unsigned int operation)
+{
+ int status = GSL_FAILURE;
+
+ /* unreferenced formal parameter */
+ (void)memdesc;
+ (void)offsetbytes;
+ (void)sizebytes;
+ (void)operation;
+
+ /* do cache operation */
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+KGSL_API int
+kgsl_sharedmem_fromhostpointer(gsl_deviceid_t device_id, gsl_memdesc_t *memdesc, void* hostptr)
+{
+ int status = GSL_FAILURE;
+
+ memdesc->gpuaddr = (gpuaddr_t)hostptr; /* map physical address with hostptr */
+ memdesc->hostptr = hostptr; /* set virtual address also in memdesc */
+
+ /* unreferenced formal parameter */
+ (void)device_id;
+
+ return (status);
+}
diff --git a/drivers/mxc/amd-gpu/common/gsl_tbdump.c b/drivers/mxc/amd-gpu/common/gsl_tbdump.c
new file mode 100644
index 000000000000..e22cf894f7b2
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/gsl_tbdump.c
@@ -0,0 +1,228 @@
+/* Copyright (c) 2008-2010, Advanced Micro Devices. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <stdio.h>
+#ifdef WIN32
+#include <windows.h>
+#endif
+#include "gsl.h"
+#include "gsl_tbdump.h"
+#include "kos_libapi.h"
+
+#ifdef TBDUMP
+
+typedef struct TBDump_
+{
+ void* file;
+} TBDump;
+
+
+static TBDump g_tb;
+static oshandle_t tbdump_mutex = 0;
+#define TBDUMP_MUTEX_LOCK() if( tbdump_mutex ) kos_mutex_lock( tbdump_mutex )
+#define TBDUMP_MUTEX_UNLOCK() if( tbdump_mutex ) kos_mutex_unlock( tbdump_mutex )
+
+/* ------------------------------------------------------------------------ */
+/* ------------------------------------------------------------------------ */
+/* ------------------------------------------------------------------------ */
+
+static void tbdump_printline(const char* format, ...)
+{
+ if(g_tb.file)
+ {
+ va_list va;
+ va_start(va, format);
+ vfprintf((FILE*)g_tb.file, format, va);
+ va_end(va);
+ fprintf((FILE*)g_tb.file, "\n");
+ }
+}
+
+static void tbdump_printinfo(const char* message )
+{
+ tbdump_printline("15 %s", message);
+}
+
+static void tbdump_getmemhex(char* buffer, unsigned int addr, unsigned int sizewords)
+{
+ unsigned int i = 0;
+ static const char* hexChars = "0123456789abcdef";
+ unsigned char* ptr = (unsigned char*)addr;
+
+ for (i = 0; i < sizewords; i++)
+ {
+ buffer[(sizewords - i) * 2 - 1] = hexChars[ptr[i] & 0x0f];
+ buffer[(sizewords - i) * 2 - 2] = hexChars[ptr[i] >> 4];
+ }
+ buffer[sizewords * 2] = '\0';
+}
+
+/* ------------------------------------------------------------------------ */
+
+void tbdump_open(char* filename)
+{
+ if( !tbdump_mutex ) tbdump_mutex = kos_mutex_create( "TBDUMP_MUTEX" );
+
+ kos_memset( &g_tb, 0, sizeof( g_tb ) );
+
+ g_tb.file = kos_fopen( filename, "wt" );
+
+ tbdump_printinfo("reset");
+ tbdump_printline("0");
+ tbdump_printline("1 00000000 00000eff");
+
+ /* Enable interrupts */
+ tbdump_printline("1 00000000 00000003");
+}
+
+void tbdump_close()
+{
+ TBDUMP_MUTEX_LOCK();
+
+ kos_fclose( g_tb.file );
+ g_tb.file = 0;
+
+ TBDUMP_MUTEX_UNLOCK();
+
+ if( tbdump_mutex ) kos_mutex_free( tbdump_mutex );
+}
+
+/* ------------------------------------------------------------------------ */
+
+void tbdump_syncmem(unsigned int addr, unsigned int src, unsigned int sizebytes)
+{
+ /* Align starting address and size */
+ unsigned int beg = addr;
+ unsigned int end = addr+sizebytes;
+ char buffer[65];
+
+ TBDUMP_MUTEX_LOCK();
+
+ beg = (beg+15) & ~15;
+ end &= ~15;
+
+ if( sizebytes <= 16 )
+ {
+ tbdump_getmemhex(buffer, src, 16);
+
+ tbdump_printline("19 %08x %i 1 %s", addr, sizebytes, buffer);
+
+ TBDUMP_MUTEX_UNLOCK();
+ return;
+ }
+
+ /* Handle unaligned start */
+ if( beg != addr )
+ {
+ tbdump_getmemhex(buffer, src, 16);
+
+ tbdump_printline("19 %08x %i 1 %s", addr, beg-addr, buffer);
+
+ src += beg-addr;
+ }
+
+ /* Dump the memory writes */
+ while( beg < end )
+ {
+ tbdump_getmemhex(buffer, src, 16);
+
+ tbdump_printline("2 %08x %s", beg, buffer);
+
+ beg += 16;
+ src += 16;
+ }
+
+ /* Handle unaligned end */
+ if( end != addr+sizebytes )
+ {
+ tbdump_getmemhex(buffer, src, 16);
+
+ tbdump_printline("19 %08x %i 1 %s", end, (addr+sizebytes)-end, buffer);
+ }
+
+ TBDUMP_MUTEX_UNLOCK();
+}
+
+/* ------------------------------------------------------------------------ */
+
+void tbdump_setmem(unsigned int addr, unsigned int value, unsigned int sizebytes)
+{
+ TBDUMP_MUTEX_LOCK();
+
+ tbdump_printline("19 %08x 4 %i %032x", addr, (sizebytes+3)/4, value );
+
+ TBDUMP_MUTEX_UNLOCK();
+}
+
+/* ------------------------------------------------------------------------ */
+
+void tbdump_slavewrite(unsigned int addr, unsigned int value)
+{
+ TBDUMP_MUTEX_LOCK();
+
+ tbdump_printline("1 %08x %08x", addr, value);
+
+ TBDUMP_MUTEX_UNLOCK();
+}
+
+/* ------------------------------------------------------------------------ */
+
+
+KGSL_API int
+kgsl_tbdump_waitirq()
+{
+ if(!g_tb.file) return GSL_FAILURE;
+
+ TBDUMP_MUTEX_LOCK();
+
+ tbdump_printinfo("wait irq");
+ tbdump_printline("10");
+
+ /* ACK IRQ */
+ tbdump_printline("1 00000418 00000003");
+ tbdump_printline("18 00000018 00000000 # slave read & assert");
+
+ TBDUMP_MUTEX_UNLOCK();
+
+ return GSL_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------ */
+
+KGSL_API int
+kgsl_tbdump_exportbmp(const void* addr, unsigned int format, unsigned int stride, unsigned int width, unsigned int height)
+{
+ static char filename[20];
+ static int numframe = 0;
+
+ if(!g_tb.file) return GSL_FAILURE;
+
+ TBDUMP_MUTEX_LOCK();
+ #pragma warning(disable:4996)
+ sprintf( filename, "tbdump_%08d.bmp", numframe++ );
+
+ tbdump_printline("13 %s %d %08x %d %d %d 0", filename, format, (unsigned int)addr, stride, width, height);
+
+ TBDUMP_MUTEX_UNLOCK();
+
+ return GSL_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#endif /* TBDUMP */
diff --git a/drivers/mxc/amd-gpu/common/gsl_yamato.c b/drivers/mxc/amd-gpu/common/gsl_yamato.c
new file mode 100644
index 000000000000..d74c9efe2f36
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/gsl_yamato.c
@@ -0,0 +1,886 @@
+/* Copyright (c) 2002,2007-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "gsl.h"
+#include "gsl_hal.h"
+#ifdef _LINUX
+#include <linux/sched.h>
+#endif
+
+#ifdef GSL_BLD_YAMATO
+
+#include "gsl_ringbuffer.h"
+#include "gsl_drawctxt.h"
+
+//////////////////////////////////////////////////////////////////////////////
+// functions
+//////////////////////////////////////////////////////////////////////////////
+
+static int
+kgsl_yamato_gmeminit(gsl_device_t *device)
+{
+ rb_edram_info_u rb_edram_info = {0};
+ unsigned int gmem_size;
+ unsigned int edram_value = 0;
+
+ // make sure edram range is aligned to size
+ KOS_ASSERT((device->gmemspace.gpu_base & (device->gmemspace.sizebytes - 1)) == 0);
+
+ // get edram_size value equivalent
+ gmem_size = (device->gmemspace.sizebytes >> 14);
+ while (gmem_size >>= 1)
+ {
+ edram_value++;
+ }
+
+ rb_edram_info.f.edram_size = edram_value;
+ rb_edram_info.f.edram_mapping_mode = 0; // EDRAM_MAP_UPPER
+ rb_edram_info.f.edram_range = (device->gmemspace.gpu_base >> 14); // must be aligned to size
+
+ device->ftbl.device_regwrite(device, mmRB_EDRAM_INFO, (unsigned int)rb_edram_info.val);
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+static int
+kgsl_yamato_gmemclose(gsl_device_t *device)
+{
+ device->ftbl.device_regwrite(device, mmRB_EDRAM_INFO, 0x00000000);
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+void
+kgsl_yamato_rbbmintrcallback(gsl_intrid_t id, void *cookie)
+{
+ gsl_device_t *device = (gsl_device_t *) cookie;
+
+ switch(id)
+ {
+ // error condition interrupt
+ case GSL_INTR_YDX_RBBM_READ_ERROR:
+
+ device->ftbl.device_destroy(device);
+ break;
+
+ // non-error condition interrupt
+ case GSL_INTR_YDX_RBBM_DISPLAY_UPDATE:
+ case GSL_INTR_YDX_RBBM_GUI_IDLE:
+
+ kos_event_signal(device->intr.evnt[id]);
+ break;
+
+ default:
+
+ break;
+ }
+}
+
+//----------------------------------------------------------------------------
+
+void
+kgsl_yamato_cpintrcallback(gsl_intrid_t id, void *cookie)
+{
+ gsl_device_t *device = (gsl_device_t *) cookie;
+
+ switch(id)
+ {
+ case GSL_INTR_YDX_CP_RING_BUFFER:
+#ifndef _LINUX
+ kos_event_signal(device->timestamp_event);
+#else
+ wake_up_interruptible_all(&(device->timestamp_waitq));
+#endif
+ break;
+ default:
+ break;
+ }
+}
+//----------------------------------------------------------------------------
+
+void
+kgsl_yamato_sqintrcallback(gsl_intrid_t id, void *cookie)
+{
+ (void) cookie; // unreferenced formal parameter
+ /*gsl_device_t *device = (gsl_device_t *) cookie;*/
+
+ switch(id)
+ {
+ // error condition interrupt
+ case GSL_INTR_YDX_SQ_PS_WATCHDOG:
+ case GSL_INTR_YDX_SQ_VS_WATCHDOG:
+
+ // todo: take appropriate action
+
+ break;
+
+ default:
+
+ break;
+ }
+}
+
+//----------------------------------------------------------------------------
+
+#ifdef _DEBUG
+
+static int
+kgsl_yamato_bist(gsl_device_t *device)
+{
+ int status = GSL_FAILURE;
+ unsigned int link[2];
+
+ if (!(device->flags & GSL_FLAGS_STARTED))
+ {
+ return (GSL_FAILURE);
+ }
+
+ status = kgsl_ringbuffer_bist(&device->ringbuffer);
+ if (status != GSL_SUCCESS)
+ {
+ return (status);
+ }
+
+ // interrupt bist
+ link[0] = pm4_type3_packet(PM4_INTERRUPT, 1);
+ link[1] = CP_INT_CNTL__RB_INT_MASK;
+ kgsl_ringbuffer_issuecmds(device, 1, &link[0], 2, GSL_CALLER_PROCESSID_GET());
+
+ status = kgsl_mmu_bist(&device->mmu);
+ if (status != GSL_SUCCESS)
+ {
+ return (status);
+ }
+
+ return (status);
+}
+#endif
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_yamato_isr(gsl_device_t *device)
+{
+ unsigned int status;
+#ifdef _DEBUG
+ mh_mmu_page_fault_u page_fault = {0};
+ mh_axi_error_u axi_error = {0};
+ mh_clnt_axi_id_reuse_u clnt_axi_id_reuse = {0};
+ rbbm_read_error_u read_error = {0};
+#endif // DEBUG
+
+ // determine if yamato is interrupting, and if so, which block
+ device->ftbl.device_regread(device, mmMASTER_INT_SIGNAL, &status);
+
+ if (status & MASTER_INT_SIGNAL__MH_INT_STAT)
+ {
+#ifdef _DEBUG
+ // obtain mh error information
+ device->ftbl.device_regread(device, mmMH_MMU_PAGE_FAULT, (unsigned int *)&page_fault);
+ device->ftbl.device_regread(device, mmMH_AXI_ERROR, (unsigned int *)&axi_error);
+ device->ftbl.device_regread(device, mmMH_CLNT_AXI_ID_REUSE, (unsigned int *)&clnt_axi_id_reuse);
+#endif // DEBUG
+
+ kgsl_intr_decode(device, GSL_INTR_BLOCK_YDX_MH);
+ }
+
+ if (status & MASTER_INT_SIGNAL__CP_INT_STAT)
+ {
+ kgsl_intr_decode(device, GSL_INTR_BLOCK_YDX_CP);
+ }
+
+ if (status & MASTER_INT_SIGNAL__RBBM_INT_STAT)
+ {
+#ifdef _DEBUG
+ // obtain rbbm error information
+ device->ftbl.device_regread(device, mmRBBM_READ_ERROR, (unsigned int *)&read_error);
+#endif // DEBUG
+
+ kgsl_intr_decode(device, GSL_INTR_BLOCK_YDX_RBBM);
+ }
+
+ if (status & MASTER_INT_SIGNAL__SQ_INT_STAT)
+ {
+ kgsl_intr_decode(device, GSL_INTR_BLOCK_YDX_SQ);
+ }
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_yamato_tlbinvalidate(gsl_device_t *device, unsigned int reg_invalidate, unsigned int pid)
+{
+ unsigned int link[2];
+ mh_mmu_invalidate_u mh_mmu_invalidate = {0};
+
+ mh_mmu_invalidate.f.invalidate_all = 1;
+ mh_mmu_invalidate.f.invalidate_tc = 1;
+
+ // if possible, invalidate via command stream, otherwise via direct register writes
+ if (device->flags & GSL_FLAGS_STARTED)
+ {
+ link[0] = pm4_type0_packet(reg_invalidate, 1);
+ link[1] = mh_mmu_invalidate.val;
+
+ kgsl_ringbuffer_issuecmds(device, 1, &link[0], 2, pid);
+ }
+ else
+ {
+
+ device->ftbl.device_regwrite(device, reg_invalidate, mh_mmu_invalidate.val);
+ }
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_yamato_setpagetable(gsl_device_t *device, unsigned int reg_ptbase, gpuaddr_t ptbase, unsigned int pid)
+{
+ unsigned int link[25];
+
+ // if there is an active draw context, set via command stream,
+ if (device->flags & GSL_FLAGS_STARTED)
+ {
+ // wait for graphics pipe to be idle
+ link[0] = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ link[1] = 0x00000000;
+
+ // set page table base
+ link[2] = pm4_type0_packet(reg_ptbase, 1);
+ link[3] = ptbase;
+
+ // HW workaround: to resolve MMU page fault interrupts caused by the VGT. It prevents
+ // the CP PFP from filling the VGT DMA request fifo too early, thereby ensuring that
+ // the VGT will not fetch vertex/bin data until after the page table base register
+ // has been updated.
+ //
+ // Two null DRAW_INDX_BIN packets are inserted right after the page table base update,
+ // followed by a wait for idle. The null packets will fill up the VGT DMA request
+ // fifo and prevent any further vertex/bin updates from occurring until the wait
+ // has finished.
+ link[4] = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ link[5] = (0x4 << 16) | (mmPA_SU_SC_MODE_CNTL - 0x2000);
+ link[6] = 0; // disable faceness generation
+ link[7] = pm4_type3_packet(PM4_SET_BIN_BASE_OFFSET, 1);
+ link[8] = device->mmu.dummyspace.gpuaddr;
+ link[9] = pm4_type3_packet(PM4_DRAW_INDX_BIN, 6);
+ link[10] = 0; // viz query info
+ link[11] = 0x0003C004; // draw indicator
+ link[12] = 0; // bin base
+ link[13] = 3; // bin size
+ link[14] = device->mmu.dummyspace.gpuaddr; // dma base
+ link[15] = 6; // dma size
+ link[16] = pm4_type3_packet(PM4_DRAW_INDX_BIN, 6);
+ link[17] = 0; // viz query info
+ link[18] = 0x0003C004; // draw indicator
+ link[19] = 0; // bin base
+ link[20] = 3; // bin size
+ link[21] = device->mmu.dummyspace.gpuaddr; // dma base
+ link[22] = 6; // dma size
+ link[23] = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ link[24] = 0x00000000;
+
+ kgsl_ringbuffer_issuecmds(device, 1, &link[0], 25, pid);
+ }
+ else
+ {
+ device->ftbl.device_idle(device, GSL_TIMEOUT_DEFAULT);
+ device->ftbl.device_regwrite(device, reg_ptbase, ptbase);
+ }
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_yamato_init(gsl_device_t *device)
+{
+ int status = GSL_FAILURE;
+
+ device->flags |= GSL_FLAGS_INITIALIZED;
+
+ kgsl_hal_setpowerstate(device->id, GSL_PWRFLAGS_POWER_ON, 100);
+
+ //We need to make sure all blocks are powered up and clocked before
+ //issuing a soft reset. The overrides will be turned off (set to 0)
+ //later in kgsl_yamato_start.
+ device->ftbl.device_regwrite(device, mmRBBM_PM_OVERRIDE1, 0xfffffffe);
+ device->ftbl.device_regwrite(device, mmRBBM_PM_OVERRIDE2, 0xffffffff);
+
+ // soft reset
+ device->ftbl.device_regwrite(device, mmRBBM_SOFT_RESET, 0xFFFFFFFF);
+ kos_sleep(50);
+ device->ftbl.device_regwrite(device, mmRBBM_SOFT_RESET, 0x00000000);
+
+ // RBBM control
+ device->ftbl.device_regwrite(device, mmRBBM_CNTL, 0x00004442);
+
+ // setup MH arbiter
+ device->ftbl.device_regwrite(device, mmMH_ARBITER_CONFIG, *(unsigned int *) &gsl_cfg_yamato_mharb);
+
+ // SQ_*_PROGRAM
+ device->ftbl.device_regwrite(device, mmSQ_VS_PROGRAM, 0x00000000);
+ device->ftbl.device_regwrite(device, mmSQ_PS_PROGRAM, 0x00000000);
+
+ // init interrupt
+ status = kgsl_intr_init(device);
+ if (status != GSL_SUCCESS)
+ {
+ device->ftbl.device_stop(device);
+ return (status);
+ }
+
+ // init mmu
+ status = kgsl_mmu_init(device);
+ if (status != GSL_SUCCESS)
+ {
+ device->ftbl.device_stop(device);
+ return (status);
+ }
+
+ return(status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_yamato_close(gsl_device_t *device)
+{
+ if (device->refcnt == 0)
+ {
+ // shutdown mmu
+ kgsl_mmu_close(device);
+
+ // shutdown interrupt
+ kgsl_intr_close(device);
+
+ kgsl_hal_setpowerstate(device->id, GSL_PWRFLAGS_POWER_OFF, 0);
+
+ device->flags &= ~GSL_FLAGS_INITIALIZED;
+ }
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_yamato_destroy(gsl_device_t *device)
+{
+ int i;
+ unsigned int pid;
+
+#ifdef _DEBUG
+ // for now, signal catastrophic failure in a brute force way
+ KOS_ASSERT(0);
+#endif // _DEBUG
+
+ // todo: - hard reset core?
+
+ kgsl_drawctxt_destroyall(device);
+
+ for (i = 0; i < GSL_CALLER_PROCESS_MAX; i++)
+ {
+ pid = device->callerprocess[i];
+ if (pid)
+ {
+ device->ftbl.device_stop(device);
+ kgsl_driver_destroy(pid);
+
+ // todo: terminate client process?
+ }
+ }
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_yamato_start(gsl_device_t *device, gsl_flags_t flags)
+{
+ int status = GSL_FAILURE;
+ unsigned int pm1, pm2;
+
+ KGSL_DEBUG(GSL_DBGFLAGS_PM4, KGSL_DEBUG_DUMPFBSTART(device));
+
+ (void) flags; // unreferenced formal parameter
+
+ kgsl_hal_setpowerstate(device->id, GSL_PWRFLAGS_CLK_ON, 100);
+
+ // default power management override when running in safe mode
+ pm1 = (device->flags & GSL_FLAGS_SAFEMODE) ? 0xFFFFFFFE : 0x00000000;
+ pm2 = (device->flags & GSL_FLAGS_SAFEMODE) ? 0x000000FF : 0x00000000;
+ device->ftbl.device_regwrite(device, mmRBBM_PM_OVERRIDE1, pm1);
+ device->ftbl.device_regwrite(device, mmRBBM_PM_OVERRIDE2, pm2);
+
+ // enable rbbm interrupts
+ kgsl_intr_attach(&device->intr, GSL_INTR_YDX_RBBM_READ_ERROR, kgsl_yamato_rbbmintrcallback, (void *) device);
+ kgsl_intr_attach(&device->intr, GSL_INTR_YDX_RBBM_DISPLAY_UPDATE, kgsl_yamato_rbbmintrcallback, (void *) device);
+ kgsl_intr_attach(&device->intr, GSL_INTR_YDX_RBBM_GUI_IDLE, kgsl_yamato_rbbmintrcallback, (void *) device);
+ kgsl_intr_enable(&device->intr, GSL_INTR_YDX_RBBM_READ_ERROR);
+ kgsl_intr_enable(&device->intr, GSL_INTR_YDX_RBBM_DISPLAY_UPDATE);
+#if defined GSL_RB_TIMESTAMP_INTERUPT
+ kgsl_intr_attach(&device->intr, GSL_INTR_YDX_CP_RING_BUFFER, kgsl_yamato_cpintrcallback, (void *) device);
+ kgsl_intr_enable(&device->intr, GSL_INTR_YDX_CP_RING_BUFFER);
+#endif
+
+ //kgsl_intr_enable(&device->intr, GSL_INTR_YDX_RBBM_GUI_IDLE);
+
+ // enable sq interrupts
+ kgsl_intr_attach(&device->intr, GSL_INTR_YDX_SQ_PS_WATCHDOG, kgsl_yamato_sqintrcallback, (void *) device);
+ kgsl_intr_attach(&device->intr, GSL_INTR_YDX_SQ_VS_WATCHDOG, kgsl_yamato_sqintrcallback, (void *) device);
+ //kgsl_intr_enable(&device->intr, GSL_INTR_YDX_SQ_PS_WATCHDOG);
+ //kgsl_intr_enable(&device->intr, GSL_INTR_YDX_SQ_VS_WATCHDOG);
+
+ // init gmem
+ kgsl_yamato_gmeminit(device);
+
+ // init ring buffer
+ status = kgsl_ringbuffer_init(device);
+ if (status != GSL_SUCCESS)
+ {
+ device->ftbl.device_stop(device);
+ return (status);
+ }
+
+ // init draw context
+ status = kgsl_drawctxt_init(device);
+ if (status != GSL_SUCCESS)
+ {
+ device->ftbl.device_stop(device);
+ return (status);
+ }
+
+ device->flags |= GSL_FLAGS_STARTED;
+
+ KGSL_DEBUG(GSL_DBGFLAGS_BIST, kgsl_yamato_bist(device));
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_yamato_stop(gsl_device_t *device)
+{
+ // disable rbbm interrupts
+ kgsl_intr_detach(&device->intr, GSL_INTR_YDX_RBBM_READ_ERROR);
+ kgsl_intr_detach(&device->intr, GSL_INTR_YDX_RBBM_DISPLAY_UPDATE);
+ kgsl_intr_detach(&device->intr, GSL_INTR_YDX_RBBM_GUI_IDLE);
+#if defined GSL_RB_TIMESTAMP_INTERUPT
+ kgsl_intr_detach(&device->intr, GSL_INTR_YDX_CP_RING_BUFFER);
+#endif
+
+ // disable sq interrupts
+ kgsl_intr_detach(&device->intr, GSL_INTR_YDX_SQ_PS_WATCHDOG);
+ kgsl_intr_detach(&device->intr, GSL_INTR_YDX_SQ_VS_WATCHDOG);
+
+ kgsl_drawctxt_close(device);
+
+ // shutdown ringbuffer
+ kgsl_ringbuffer_close(&device->ringbuffer);
+
+ // shutdown gmem
+ kgsl_yamato_gmemclose(device);
+
+ if(device->refcnt == 0)
+ {
+ kgsl_hal_setpowerstate(device->id, GSL_PWRFLAGS_CLK_OFF, 0);
+ }
+
+ device->flags &= ~GSL_FLAGS_STARTED;
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_yamato_getproperty(gsl_device_t *device, gsl_property_type_t type, void *value, unsigned int sizebytes)
+{
+ int status = GSL_FAILURE;
+
+#ifndef _DEBUG
+ (void) sizebytes; // unreferenced formal parameter
+#endif
+
+ if (type == GSL_PROP_DEVICE_INFO)
+ {
+ gsl_devinfo_t *devinfo = (gsl_devinfo_t *) value;
+
+ KOS_ASSERT(sizebytes == sizeof(gsl_devinfo_t));
+
+ devinfo->device_id = device->id;
+ devinfo->chip_id = (gsl_chipid_t)device->chip_id;
+ devinfo->mmu_enabled = kgsl_mmu_isenabled(&device->mmu);
+ devinfo->gmem_hostbaseaddr = device->gmemspace.mmio_virt_base;
+ devinfo->gmem_gpubaseaddr = device->gmemspace.gpu_base;
+ devinfo->gmem_sizebytes = device->gmemspace.sizebytes;
+
+ status = GSL_SUCCESS;
+ }
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_yamato_setproperty(gsl_device_t *device, gsl_property_type_t type, void *value, unsigned int sizebytes)
+{
+ int status = GSL_FAILURE;
+
+#ifndef _DEBUG
+ (void) sizebytes; // unreferenced formal parameter
+#endif
+
+ if (type == GSL_PROP_DEVICE_POWER)
+ {
+ gsl_powerprop_t *power = (gsl_powerprop_t *) value;
+
+ KOS_ASSERT(sizebytes == sizeof(gsl_powerprop_t));
+
+ if (!(device->flags & GSL_FLAGS_SAFEMODE))
+ {
+ if (power->flags & GSL_PWRFLAGS_OVERRIDE_ON)
+ {
+ device->ftbl.device_regwrite(device, mmRBBM_PM_OVERRIDE1, 0xfffffffe);
+ device->ftbl.device_regwrite(device, mmRBBM_PM_OVERRIDE2, 0xffffffff);
+ }
+ else if (power->flags & GSL_PWRFLAGS_OVERRIDE_OFF)
+ {
+ device->ftbl.device_regwrite(device, mmRBBM_PM_OVERRIDE1, 0x00000000);
+ device->ftbl.device_regwrite(device, mmRBBM_PM_OVERRIDE2, 0x00000000);
+ }
+ else
+ {
+ kgsl_hal_setpowerstate(device->id, power->flags, power->value);
+ }
+ }
+
+ status = GSL_SUCCESS;
+ }
+ else if (type == GSL_PROP_DEVICE_DMI)
+ {
+ gsl_dmiprop_t *dmi = (gsl_dmiprop_t *) value;
+
+ KOS_ASSERT(sizebytes == sizeof(gsl_dmiprop_t));
+
+ //
+ // In order to enable DMI, it must not already be enabled.
+ //
+ switch (dmi->flags)
+ {
+ case GSL_DMIFLAGS_ENABLE_SINGLE:
+ case GSL_DMIFLAGS_ENABLE_DOUBLE:
+ if (!gsl_driver.dmi_state)
+ {
+ gsl_driver.dmi_state = OS_TRUE;
+ gsl_driver.dmi_mode = dmi->flags;
+ gsl_driver.dmi_frame = -1;
+ status = GSL_SUCCESS;
+ }
+ break;
+ case GSL_DMIFLAGS_DISABLE:
+ //
+ // To disable, we must be enabled.
+ //
+ if (gsl_driver.dmi_state)
+ {
+ gsl_driver.dmi_state = OS_FALSE;
+ gsl_driver.dmi_mode = -1;
+ gsl_driver.dmi_frame = -2;
+ status = GSL_SUCCESS;
+ }
+ break;
+ case GSL_DMIFLAGS_NEXT_BUFFER:
+ //
+ // Going to the next buffer is dependent upon what mod we are in with respect to single, double, or triple buffering.
+ // DMI must also be enabled.
+ //
+ if (gsl_driver.dmi_state)
+ {
+ unsigned int cmdbuf[10];
+ unsigned int *cmds = &cmdbuf[0];
+ int size;
+
+ if (gsl_driver.dmi_frame == -1)
+ {
+ size = 8;
+
+ *cmds++ = pm4_type0_packet(mmRBBM_DSPLY, 1);
+ switch (gsl_driver.dmi_mode)
+ {
+ case GSL_DMIFLAGS_ENABLE_SINGLE:
+ gsl_driver.dmi_max_frame = 1;
+ *cmds++ = 0x041000410;
+ break;
+ case GSL_DMIFLAGS_ENABLE_DOUBLE:
+ gsl_driver.dmi_max_frame = 2;
+ *cmds++ = 0x041000510;
+ break;
+ case GSL_DMIFLAGS_ENABLE_TRIPLE:
+ gsl_driver.dmi_max_frame = 3;
+ *cmds++ = 0x041000610;
+ break;
+ }
+ }
+ else
+ {
+ size = 6;
+ }
+
+
+ //
+ // Wait for 3D core to be idle and wait for vsync
+ //
+ *cmds++ = pm4_type0_packet(mmWAIT_UNTIL, 1);
+ *cmds++ = 0x00008000; // 3d idle
+ // *cmds++ = 0x00008008; // 3d idle & vsync
+
+ //
+ // Update the render latest register.
+ //
+ *cmds++ = pm4_type0_packet(mmRBBM_RENDER_LATEST, 1);
+ switch (gsl_driver.dmi_frame)
+ {
+ case 0:
+ //
+ // Render frame 0
+ //
+ *cmds++ = 0;
+ //
+ // Wait for our max frame # indicator to be de-asserted
+ //
+ *cmds++ = pm4_type0_packet(mmWAIT_UNTIL, 1);
+ *cmds++ = 0x00000008 << gsl_driver.dmi_max_frame;
+ gsl_driver.dmi_frame = 1;
+ break;
+ case -1:
+ case 1:
+ //
+ // Render frame 1
+ //
+ *cmds++ = 1;
+ *cmds++ = pm4_type0_packet(mmWAIT_UNTIL, 1);
+ *cmds++ = 0x00000010; // Wait for frame 0 to be deasserted
+ gsl_driver.dmi_frame = 2;
+ break;
+ case 2:
+ //
+ // Render frame 2
+ //
+ *cmds++ = 2;
+ *cmds++ = pm4_type0_packet(mmWAIT_UNTIL, 1);
+ *cmds++ = 0x00000020; // Wait for frame 1 to be deasserted
+ gsl_driver.dmi_frame = 0;
+ break;
+ }
+
+ // issue the commands
+ kgsl_ringbuffer_issuecmds(device, 1, &cmdbuf[0], size, GSL_CALLER_PROCESSID_GET());
+
+ gsl_driver.dmi_frame %= gsl_driver.dmi_max_frame;
+ status = GSL_SUCCESS;
+ }
+ break;
+ default:
+ status = GSL_FAILURE;
+ break;
+ }
+ }
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_yamato_idle(gsl_device_t *device, unsigned int timeout)
+{
+ int status = GSL_FAILURE;
+ gsl_ringbuffer_t *rb = &device->ringbuffer;
+ rbbm_status_u rbbm_status;
+
+ (void) timeout; // unreferenced formal parameter
+
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX, KGSL_DEBUG_DUMPX(BB_DUMP_REGPOLL, device->id, mmRBBM_STATUS, 0x80000000, "kgsl_yamato_idle"));
+
+ // first, wait until the CP has consumed all the commands in the ring buffer
+ if (rb->flags & GSL_FLAGS_STARTED)
+ {
+ do
+ {
+ GSL_RB_GET_READPTR(rb, &rb->rptr);
+
+ } while (rb->rptr != rb->wptr);
+ }
+
+ // now, wait for the GPU to finish its operations
+ for ( ; ; )
+ {
+ device->ftbl.device_regread(device, mmRBBM_STATUS, (unsigned int *)&rbbm_status);
+
+ if (!(rbbm_status.val & 0x80000000))
+ {
+ status = GSL_SUCCESS;
+ break;
+ }
+
+ }
+
+ return (status);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_yamato_regread(gsl_device_t *device, unsigned int offsetwords, unsigned int *value)
+{
+ KGSL_DEBUG(GSL_DBGFLAGS_DUMPX,
+ {
+ if (!(gsl_driver.flags_debug & GSL_DBGFLAGS_DUMPX_WITHOUT_IFH))
+ {
+ if(offsetwords == mmCP_RB_RPTR || offsetwords == mmCP_RB_WPTR)
+ {
+ *value = device->ringbuffer.wptr;
+ return (GSL_SUCCESS);
+ }
+ }
+ });
+
+ GSL_HAL_REG_READ(device->id, (unsigned int) device->regspace.mmio_virt_base, offsetwords, value);
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_yamato_regwrite(gsl_device_t *device, unsigned int offsetwords, unsigned int value)
+{
+ KGSL_DEBUG(GSL_DBGFLAGS_PM4, KGSL_DEBUG_DUMPREGWRITE(offsetwords, value));
+
+ GSL_HAL_REG_WRITE(device->id, (unsigned int) device->regspace.mmio_virt_base, offsetwords, value);
+
+ // idle device when running in safe mode
+ if (device->flags & GSL_FLAGS_SAFEMODE)
+ {
+ device->ftbl.device_idle(device, GSL_TIMEOUT_DEFAULT);
+ }
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_yamato_waitirq(gsl_device_t *device, gsl_intrid_t intr_id, unsigned int *count, unsigned int timeout)
+{
+ int status = GSL_FAILURE_NOTSUPPORTED;
+
+ if (intr_id == GSL_INTR_YDX_CP_IB1_INT || intr_id == GSL_INTR_YDX_CP_IB2_INT ||
+ intr_id == GSL_INTR_YDX_CP_SW_INT || intr_id == GSL_INTR_YDX_RBBM_DISPLAY_UPDATE)
+ {
+ if (kgsl_intr_isenabled(&device->intr, intr_id) == GSL_SUCCESS)
+ {
+ // wait until intr completion event is received
+ if (kos_event_wait(device->intr.evnt[intr_id], timeout) == OS_SUCCESS)
+ {
+ *count = 1;
+ status = GSL_SUCCESS;
+ }
+ else
+ {
+ status = GSL_FAILURE_TIMEOUT;
+ }
+ }
+ }
+
+ return (status);
+}
+
+int
+kgsl_yamato_waittimestamp(gsl_device_t *device, gsl_timestamp_t timestamp, unsigned int timeout)
+{
+#if defined GSL_RB_TIMESTAMP_INTERUPT
+#ifndef _LINUX
+ return kos_event_wait( device->timestamp_event, timeout );
+#else
+ int status = wait_event_interruptible_timeout(device->timestamp_waitq,
+ kgsl_cmdstream_check_timestamp(device->id, timestamp),
+ msecs_to_jiffies(timeout));
+ if (status > 0)
+ return GSL_SUCCESS;
+ else
+ return GSL_FAILURE;
+#endif
+#else
+ return (GSL_SUCCESS);
+#endif
+}
+//----------------------------------------------------------------------------
+
+int
+kgsl_yamato_runpending(gsl_device_t *device)
+{
+ (void) device;
+
+ return (GSL_SUCCESS);
+}
+
+//----------------------------------------------------------------------------
+
+int
+kgsl_yamato_getfunctable(gsl_functable_t *ftbl)
+{
+ ftbl->device_init = kgsl_yamato_init;
+ ftbl->device_close = kgsl_yamato_close;
+ ftbl->device_destroy = kgsl_yamato_destroy;
+ ftbl->device_start = kgsl_yamato_start;
+ ftbl->device_stop = kgsl_yamato_stop;
+ ftbl->device_getproperty = kgsl_yamato_getproperty;
+ ftbl->device_setproperty = kgsl_yamato_setproperty;
+ ftbl->device_idle = kgsl_yamato_idle;
+ ftbl->device_waittimestamp = kgsl_yamato_waittimestamp;
+ ftbl->device_regread = kgsl_yamato_regread;
+ ftbl->device_regwrite = kgsl_yamato_regwrite;
+ ftbl->device_waitirq = kgsl_yamato_waitirq;
+ ftbl->device_runpending = kgsl_yamato_runpending;
+ ftbl->intr_isr = kgsl_yamato_isr;
+ ftbl->mmu_tlbinvalidate = kgsl_yamato_tlbinvalidate;
+ ftbl->mmu_setpagetable = kgsl_yamato_setpagetable;
+ ftbl->cmdstream_issueibcmds = kgsl_ringbuffer_issueibcmds;
+ ftbl->context_create = kgsl_drawctxt_create;
+ ftbl->context_destroy = kgsl_drawctxt_destroy;
+
+ return (GSL_SUCCESS);
+}
+
+#endif
+
diff --git a/drivers/mxc/amd-gpu/common/pfp_microcode_nrt.inl b/drivers/mxc/amd-gpu/common/pfp_microcode_nrt.inl
new file mode 100644
index 000000000000..dfe61295e9ef
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/pfp_microcode_nrt.inl
@@ -0,0 +1,327 @@
+/* Copyright (c) 2008-2010, QUALCOMM Incorporated. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of QUALCOMM Incorporated nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef PFP_MICROCODE_NRT_H
+#define PFP_MICROCODE_NRT_H
+
+#define PFP_MICROCODE_VERSION 308308
+
+#define PFP_MICROCODE_SIZE_NRT 288
+
+uint32 aPFP_Microcode_nrt[PFP_MICROCODE_SIZE_NRT]={
+0xc60400,
+0x7e424b,
+0xa00000,
+0x7e828b,
+0x800001,
+0xc60400,
+0xcc4003,
+0x800000,
+0xd60003,
+0x800000,
+0xc62c00,
+0xc80c35,
+0x98c000,
+0xc80c35,
+0x880000,
+0xc80c1d,
+0x84000b,
+0xc60800,
+0x98c007,
+0xc61000,
+0x978003,
+0xcc4003,
+0xd60004,
+0x800000,
+0xcd0003,
+0x9783e8,
+0xc60400,
+0x800000,
+0xc60400,
+0x84000b,
+0xc60800,
+0x98c00c,
+0xc61000,
+0xcc4003,
+0xc61400,
+0xc61800,
+0x7d6d40,
+0xcd401e,
+0x978003,
+0xcd801e,
+0xd60004,
+0x800000,
+0xcd0003,
+0x800000,
+0xd6001f,
+0x84000b,
+0xc60800,
+0x98c007,
+0xc60c00,
+0xcc4003,
+0xcc8003,
+0xccc003,
+0x800000,
+0xd60003,
+0x800000,
+0xd6001f,
+0xc60800,
+0x348c08,
+0x98c006,
+0xc80c1e,
+0x98c000,
+0xc80c1e,
+0x800041,
+0xcc8007,
+0xcc8008,
+0xcc4003,
+0x800000,
+0xcc8003,
+0xc60400,
+0x1a9c07,
+0xca8821,
+0x95c3b9,
+0xc8102c,
+0x98800a,
+0x329418,
+0x9a4004,
+0xcc6810,
+0x042401,
+0xd00143,
+0xd00162,
+0xcd0002,
+0x7d514c,
+0xcd4003,
+0x9b8007,
+0x06a801,
+0x964003,
+0xc28000,
+0xcf4003,
+0x800001,
+0xc60400,
+0x800045,
+0xc60400,
+0x800001,
+0xc60400,
+0xc60800,
+0xc60c00,
+0xc8102d,
+0x349402,
+0x99000b,
+0xc8182e,
+0xcd4002,
+0xcd8002,
+0xd001e3,
+0xd001c3,
+0xccc003,
+0xcc801c,
+0xcd801d,
+0x800001,
+0xc60400,
+0xd00203,
+0x800000,
+0xd001c3,
+0xc8081f,
+0xc60c00,
+0xc80c20,
+0x988000,
+0xc8081f,
+0xcc4003,
+0xccc003,
+0xd60003,
+0xccc022,
+0xcc001f,
+0x800000,
+0xcc001f,
+0xc81c2f,
+0xc60400,
+0xc60800,
+0xc60c00,
+0xc81030,
+0x99c000,
+0xc81c2f,
+0xcc8021,
+0xcc4020,
+0x990011,
+0xc107ff,
+0xd00223,
+0xd00243,
+0x345402,
+0x7cb18b,
+0x7d95cc,
+0xcdc002,
+0xccc002,
+0xd00263,
+0x978005,
+0xccc003,
+0xc60800,
+0x80008a,
+0xc60c00,
+0x800000,
+0xd00283,
+0x97836b,
+0xc60400,
+0xd6001f,
+0x800001,
+0xc60400,
+0xd2000d,
+0xcc000d,
+0x800000,
+0xcc000d,
+0xc60800,
+0xc60c00,
+0xca1433,
+0xd022a0,
+0xcce000,
+0x99435c,
+0xcce005,
+0x800000,
+0x062001,
+0xc60800,
+0xc60c00,
+0xd202c3,
+0xcc8003,
+0xccc003,
+0xcce027,
+0x800000,
+0x062001,
+0xca0831,
+0x9883ff,
+0xca0831,
+0xd6001f,
+0x800001,
+0xc60400,
+0x0a2001,
+0x800001,
+0xc60400,
+0xd20009,
+0xd2000a,
+0xcc001f,
+0x800000,
+0xcc001f,
+0xd2000b,
+0xd2000c,
+0xcc001f,
+0x800000,
+0xcc001f,
+0xcc0023,
+0xcc4003,
+0xce0003,
+0x800000,
+0xd60003,
+0xd00303,
+0xcc0024,
+0xcc4003,
+0x800000,
+0xd60003,
+0xd00323,
+0xcc0025,
+0xcc4003,
+0x800000,
+0xd60003,
+0xd00343,
+0xcc0026,
+0xcc4003,
+0x800000,
+0xd60003,
+0x800000,
+0xd6001f,
+0x280401,
+0xd20001,
+0xcc4001,
+0xcc4006,
+0x8400e7,
+0xc40802,
+0xc40c02,
+0xcc402b,
+0x98831f,
+0xc63800,
+0x8400e7,
+0xcf802b,
+0x800000,
+0xd6001f,
+0xcc001f,
+0x880000,
+0xcc001f,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x000000,
+0x0100c8,
+0x0200cd,
+0x0300d2,
+0x050004,
+0x1000d7,
+0x1700b6,
+0x220010,
+0x230038,
+0x250044,
+0x27005e,
+0x2d0070,
+0x2e007c,
+0x4b0009,
+0x34001d,
+0x36002d,
+0x3700a8,
+0x3b009b,
+0x3f009f,
+0x4400d9,
+0x4800c3,
+0x5000b9,
+0x5100be,
+0x5500c9,
+0x5600ce,
+0x5700d3,
+0x5d00b0,
+0x000006,
+0x000006,
+0x000006,
+0x000006,
+0x000006,
+0x000006,
+};
+
+#endif
diff --git a/drivers/mxc/amd-gpu/common/pm4_microcode.inl b/drivers/mxc/amd-gpu/common/pm4_microcode.inl
new file mode 100644
index 000000000000..03f6f4cd35e4
--- /dev/null
+++ b/drivers/mxc/amd-gpu/common/pm4_microcode.inl
@@ -0,0 +1,815 @@
+/* Copyright (c) 2008-2010, QUALCOMM Incorporated. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of QUALCOMM Incorporated nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef PM4_MICROCODE_H
+#define PM4_MICROCODE_H
+
+#define PM4_MICROCODE_VERSION 300684
+
+#define PM4_MICROCODE_SIZE 768
+
+
+#ifdef _PRIMLIB_INCLUDE
+extern uint32 aPM4_Microcode[PM4_MICROCODE_SIZE][3];
+#else
+uint32 aPM4_Microcode[PM4_MICROCODE_SIZE][3]={
+ { 0x00000000, 0xc0200400, 0x000 },
+ { 0x00000000, 0x00a0000a, 0x000 },
+ { 0x000001f3, 0x00204411, 0x000 },
+ { 0x01000000, 0x00204811, 0x000 },
+ { 0x00000000, 0x00400000, 0x004 },
+ { 0x0000ffff, 0x00284621, 0x000 },
+ { 0x00000000, 0xd9004800, 0x000 },
+ { 0x00000000, 0x00400000, 0x000 },
+ { 0x00000000, 0x34e00000, 0x000 },
+ { 0x00000000, 0x00600000, 0x28c },
+ { 0x0000ffff, 0xc0280a20, 0x000 },
+ { 0x00000000, 0x00294582, 0x000 },
+ { 0x00000000, 0xd9004800, 0x000 },
+ { 0x00000000, 0x00400000, 0x000 },
+ { 0x00000000, 0x00600000, 0x28c },
+ { 0x0000ffff, 0xc0284620, 0x000 },
+ { 0x00000000, 0xd9004800, 0x000 },
+ { 0x00000000, 0x00400000, 0x000 },
+ { 0x00000000, 0x00600000, 0x2a8 },
+ { 0x00000000, 0xc0200c00, 0x000 },
+ { 0x000021fc, 0x0029462c, 0x000 },
+ { 0x00000000, 0x00404803, 0x021 },
+ { 0x00000000, 0x00600000, 0x2a8 },
+ { 0x00000000, 0xc0200000, 0x000 },
+ { 0x00000000, 0xc0200c00, 0x000 },
+ { 0x000021fc, 0x0029462c, 0x000 },
+ { 0x00000000, 0x00204803, 0x000 },
+ { 0x00003fff, 0x002f022f, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x000 },
+ { 0x0000a1fd, 0x0029462c, 0x000 },
+ { 0x00000000, 0xd9004800, 0x000 },
+ { 0x00000000, 0x00400000, 0x021 },
+ { 0x00000000, 0x00400000, 0x000 },
+ { 0x00001000, 0x00281223, 0x000 },
+ { 0x00001000, 0x002f0224, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x000 },
+ { 0x000021f9, 0x0029462c, 0x000 },
+ { 0x0000000e, 0x00404811, 0x000 },
+ { 0x00000394, 0x00204411, 0x000 },
+ { 0x00000001, 0xc0404811, 0x000 },
+ { 0x00000000, 0x00600000, 0x2a8 },
+ { 0x000021f9, 0x0029462c, 0x000 },
+ { 0x00000008, 0xc0210a20, 0x000 },
+ { 0x00000000, 0x14e00000, 0x02d },
+ { 0x00000007, 0x00404811, 0x000 },
+ { 0x00000008, 0x00404811, 0x000 },
+ { 0x0000001f, 0x40280a20, 0x000 },
+ { 0x0000001b, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x043 },
+ { 0x00000002, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x04a },
+ { 0x00000003, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x051 },
+ { 0x00000004, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x058 },
+ { 0x00000014, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x058 },
+ { 0x00000015, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x060 },
+ { 0x000021f9, 0x0029462c, 0x000 },
+ { 0x00000000, 0xc0404802, 0x000 },
+ { 0x0000001f, 0x40280a20, 0x000 },
+ { 0x0000001b, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x043 },
+ { 0x00000002, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x04a },
+ { 0x00000000, 0x00400000, 0x051 },
+ { 0x0000001f, 0xc0210e20, 0x000 },
+ { 0x00000612, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204803, 0x000 },
+ { 0x00000000, 0xc0204800, 0x000 },
+ { 0x00000000, 0xc0204800, 0x000 },
+ { 0x000021f9, 0x0029462c, 0x000 },
+ { 0x00000000, 0x00404802, 0x000 },
+ { 0x0000001e, 0xc0210e20, 0x000 },
+ { 0x00000600, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204803, 0x000 },
+ { 0x00000000, 0xc0204800, 0x000 },
+ { 0x00000000, 0xc0204800, 0x000 },
+ { 0x000021f9, 0x0029462c, 0x000 },
+ { 0x00000000, 0x00404802, 0x000 },
+ { 0x0000001e, 0xc0210e20, 0x000 },
+ { 0x00000605, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204803, 0x000 },
+ { 0x00000000, 0xc0204800, 0x000 },
+ { 0x00000000, 0xc0204800, 0x000 },
+ { 0x000021f9, 0x0029462c, 0x000 },
+ { 0x00000000, 0x00404802, 0x000 },
+ { 0x0000001f, 0x40280a20, 0x000 },
+ { 0x0000001f, 0xc0210e20, 0x000 },
+ { 0x0000060a, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204803, 0x000 },
+ { 0x00000000, 0xc0204800, 0x000 },
+ { 0x00000000, 0xc0204800, 0x000 },
+ { 0x000021f9, 0x0029462c, 0x000 },
+ { 0x00000000, 0x00404802, 0x000 },
+ { 0x0000001f, 0xc0680a20, 0x2a8 },
+ { 0x000021f9, 0x0029462c, 0x000 },
+ { 0x00000000, 0x00404802, 0x000 },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000001, 0x00204811, 0x000 },
+ { 0x00001fff, 0x40280a20, 0x000 },
+ { 0x80000000, 0x40280e20, 0x000 },
+ { 0x40000000, 0xc0281220, 0x000 },
+ { 0x00040000, 0x00694622, 0x2b2 },
+ { 0x00000000, 0x00201410, 0x000 },
+ { 0x00000000, 0x002f0223, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x06d },
+ { 0x00000000, 0xc0401800, 0x070 },
+ { 0x00001fff, 0xc0281a20, 0x000 },
+ { 0x00040000, 0x00694626, 0x2b2 },
+ { 0x00000000, 0x00201810, 0x000 },
+ { 0x00000000, 0x002f0224, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x073 },
+ { 0x00000000, 0xc0401c00, 0x076 },
+ { 0x00001fff, 0xc0281e20, 0x000 },
+ { 0x00040000, 0x00694627, 0x2b2 },
+ { 0x00000000, 0x00201c10, 0x000 },
+ { 0x00000000, 0x00204402, 0x000 },
+ { 0x00000000, 0x002820c5, 0x000 },
+ { 0x00000000, 0x004948e8, 0x000 },
+ { 0x00000000, 0x00600000, 0x28c },
+ { 0x00000010, 0x40210a20, 0x000 },
+ { 0x000000ff, 0x00280a22, 0x000 },
+ { 0x000007ff, 0x40280e20, 0x000 },
+ { 0x00000002, 0x00221e23, 0x000 },
+ { 0x00000005, 0xc0211220, 0x000 },
+ { 0x00080000, 0x00281224, 0x000 },
+ { 0x00000013, 0x00210224, 0x000 },
+ { 0x00000000, 0x14c00000, 0x084 },
+ { 0xa100ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204811, 0x000 },
+ { 0x00000000, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x088 },
+ { 0x00000000, 0x0020162d, 0x000 },
+ { 0x00004000, 0x00500e23, 0x097 },
+ { 0x00000001, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x08c },
+ { 0x00000001, 0x0020162d, 0x000 },
+ { 0x00004800, 0x00500e23, 0x097 },
+ { 0x00000002, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x090 },
+ { 0x00000003, 0x0020162d, 0x000 },
+ { 0x00004900, 0x00500e23, 0x097 },
+ { 0x00000003, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x094 },
+ { 0x00000002, 0x0020162d, 0x000 },
+ { 0x00004908, 0x00500e23, 0x097 },
+ { 0x00000012, 0x0020162d, 0x000 },
+ { 0x00002000, 0x00300e23, 0x000 },
+ { 0x00000000, 0x00290d83, 0x000 },
+ { 0x9400ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x002948e5, 0x000 },
+ { 0x00000000, 0x00294483, 0x000 },
+ { 0x00000000, 0x40201800, 0x000 },
+ { 0x00000000, 0xd9004800, 0x000 },
+ { 0x00000013, 0x00210224, 0x000 },
+ { 0x00000000, 0x14c00000, 0x000 },
+ { 0x9400ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x002948e5, 0x000 },
+ { 0x9300ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00404806, 0x000 },
+ { 0x00000000, 0x00600000, 0x28c },
+ { 0x00000000, 0xc0200800, 0x000 },
+ { 0x00000000, 0xc0201400, 0x000 },
+ { 0x0000001f, 0x00211a25, 0x000 },
+ { 0x00000000, 0x14e00000, 0x000 },
+ { 0x000007ff, 0x00280e25, 0x000 },
+ { 0x00000010, 0x00211225, 0x000 },
+ { 0x8300ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x002f0224, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x0ae },
+ { 0x00000000, 0x00203622, 0x000 },
+ { 0x00004000, 0x00504a23, 0x0bd },
+ { 0x00000001, 0x002f0224, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x0b2 },
+ { 0x00000001, 0x00203622, 0x000 },
+ { 0x00004800, 0x00504a23, 0x0bd },
+ { 0x00000002, 0x002f0224, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x0b6 },
+ { 0x00000003, 0x00203622, 0x000 },
+ { 0x00004900, 0x00504a23, 0x0bd },
+ { 0x00000003, 0x002f0224, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x0ba },
+ { 0x00000002, 0x00203622, 0x000 },
+ { 0x00004908, 0x00504a23, 0x0bd },
+ { 0x00000012, 0x00203622, 0x000 },
+ { 0x00000000, 0x00290d83, 0x000 },
+ { 0x00002000, 0x00304a23, 0x000 },
+ { 0x8400ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0xc0204800, 0x000 },
+ { 0x00000000, 0x21000000, 0x000 },
+ { 0x00000000, 0x00400000, 0x0a4 },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000001, 0x00204811, 0x000 },
+ { 0x00040578, 0x00604411, 0x2b2 },
+ { 0x00000000, 0xc0400000, 0x000 },
+ { 0x00000000, 0xc0200c00, 0x000 },
+ { 0x00000000, 0xc0201000, 0x000 },
+ { 0x00000000, 0xc0201400, 0x000 },
+ { 0x00000000, 0xc0201800, 0x000 },
+ { 0x00007f00, 0x00280a21, 0x000 },
+ { 0x00004500, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x0cd },
+ { 0x00000000, 0xc0201c00, 0x000 },
+ { 0x00000000, 0x17000000, 0x000 },
+ { 0x00000010, 0x00280a23, 0x000 },
+ { 0x00000010, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x0d5 },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000001, 0x00204811, 0x000 },
+ { 0x00040000, 0x00694624, 0x2b2 },
+ { 0x00000000, 0x00400000, 0x0d6 },
+ { 0x00000000, 0x00600000, 0x135 },
+ { 0x00000000, 0x002820d0, 0x000 },
+ { 0x00000007, 0x00280a23, 0x000 },
+ { 0x00000001, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x0dd },
+ { 0x00000000, 0x002f00a8, 0x000 },
+ { 0x00000000, 0x04e00000, 0x0f6 },
+ { 0x00000000, 0x00400000, 0x0fd },
+ { 0x00000002, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x0e2 },
+ { 0x00000000, 0x002f00a8, 0x000 },
+ { 0x00000000, 0x02e00000, 0x0f6 },
+ { 0x00000000, 0x00400000, 0x0fd },
+ { 0x00000003, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x0e7 },
+ { 0x00000000, 0x002f00a8, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x0f6 },
+ { 0x00000000, 0x00400000, 0x0fd },
+ { 0x00000004, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x0ec },
+ { 0x00000000, 0x002f00a8, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x0f6 },
+ { 0x00000000, 0x00400000, 0x0fd },
+ { 0x00000005, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x0f1 },
+ { 0x00000000, 0x002f00a8, 0x000 },
+ { 0x00000000, 0x06e00000, 0x0f6 },
+ { 0x00000000, 0x00400000, 0x0fd },
+ { 0x00000006, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x0f6 },
+ { 0x00000000, 0x002f00a8, 0x000 },
+ { 0x00000000, 0x08e00000, 0x0f6 },
+ { 0x00000000, 0x00400000, 0x0fd },
+ { 0x00007f00, 0x00280a21, 0x000 },
+ { 0x00004500, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x000 },
+ { 0x00000008, 0x00210a23, 0x000 },
+ { 0x00000000, 0x14e00000, 0x11b },
+ { 0x00000000, 0xc0204400, 0x000 },
+ { 0x00000000, 0xc0404800, 0x000 },
+ { 0x00007f00, 0x00280a21, 0x000 },
+ { 0x00004500, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x102 },
+ { 0x00000000, 0xc0200000, 0x000 },
+ { 0x00000000, 0xc0400000, 0x000 },
+ { 0x00000000, 0x00404c07, 0x0cd },
+ { 0x00000000, 0xc0201000, 0x000 },
+ { 0x00000000, 0xc0201400, 0x000 },
+ { 0x00000000, 0xc0201800, 0x000 },
+ { 0x00000000, 0xc0201c00, 0x000 },
+ { 0x00000000, 0x17000000, 0x000 },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000001, 0x00204811, 0x000 },
+ { 0x00040000, 0x00694624, 0x2b2 },
+ { 0x00000000, 0x002820d0, 0x000 },
+ { 0x00000000, 0x002f00a8, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x000 },
+ { 0x00000000, 0x00404c07, 0x107 },
+ { 0x00000000, 0xc0201000, 0x000 },
+ { 0x00000000, 0xc0201400, 0x000 },
+ { 0x00000000, 0xc0201800, 0x000 },
+ { 0x00000000, 0xc0201c00, 0x000 },
+ { 0x00000000, 0x17000000, 0x000 },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000001, 0x00204811, 0x000 },
+ { 0x00040000, 0x00694624, 0x2b2 },
+ { 0x00000000, 0x002820d0, 0x000 },
+ { 0x00000000, 0x002f00a8, 0x000 },
+ { 0x00000000, 0x06e00000, 0x000 },
+ { 0x00000000, 0x00404c07, 0x113 },
+ { 0x0000060d, 0x00204411, 0x000 },
+ { 0x00000000, 0xc0204800, 0x000 },
+ { 0x0000860e, 0x00204411, 0x000 },
+ { 0x00000000, 0xd9004800, 0x000 },
+ { 0x00000000, 0x00400000, 0x000 },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000009, 0x00204811, 0x000 },
+ { 0x0000060d, 0x00204411, 0x000 },
+ { 0x00000000, 0xc0204800, 0x000 },
+ { 0x00000000, 0x00404810, 0x000 },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000001, 0x00204811, 0x000 },
+ { 0x00000000, 0xc0200800, 0x000 },
+ { 0x00007fff, 0x00281a22, 0x000 },
+ { 0x00040000, 0x00694626, 0x2b2 },
+ { 0x00000000, 0x00200c10, 0x000 },
+ { 0x00000000, 0xc0201000, 0x000 },
+ { 0x80000000, 0x00281a22, 0x000 },
+ { 0x00000000, 0x002f0226, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x132 },
+ { 0x00000000, 0x00600000, 0x135 },
+ { 0x00000000, 0x00201c10, 0x000 },
+ { 0x00000000, 0x00300c67, 0x000 },
+ { 0x0000060d, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204804, 0x000 },
+ { 0x00000000, 0x00404803, 0x000 },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204811, 0x000 },
+ { 0xa400ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204811, 0x000 },
+ { 0x000001ea, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204804, 0x000 },
+ { 0x00000000, 0x1ac00000, 0x13b },
+ { 0x9e00ffff, 0x00204411, 0x000 },
+ { 0xdeadbeef, 0x00204811, 0x000 },
+ { 0x00000000, 0x1ae00000, 0x13e },
+ { 0xa400ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x0080480b, 0x000 },
+ { 0x000001f3, 0x00204411, 0x000 },
+ { 0xe0000000, 0xc0484a20, 0x000 },
+ { 0x00000000, 0xd9000000, 0x000 },
+ { 0x00000000, 0x00400000, 0x000 },
+ { 0x00000000, 0xc0200c00, 0x000 },
+ { 0x8c00ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204803, 0x000 },
+ { 0x00000fff, 0x00281223, 0x000 },
+ { 0x0000000f, 0x00203624, 0x000 },
+ { 0x00000003, 0x00381224, 0x000 },
+ { 0x00005000, 0x00301224, 0x000 },
+ { 0x0000000e, 0x00203624, 0x000 },
+ { 0x8700ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204804, 0x000 },
+ { 0x00000001, 0x00331224, 0x000 },
+ { 0x8600ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204804, 0x000 },
+ { 0x0000001d, 0x00211223, 0x000 },
+ { 0x00000020, 0x00222091, 0x000 },
+ { 0x00000003, 0x00381228, 0x000 },
+ { 0x8800ffff, 0x00204411, 0x000 },
+ { 0x00004fff, 0x00304a24, 0x000 },
+ { 0x00000010, 0x00211623, 0x000 },
+ { 0x00000fff, 0x00281625, 0x000 },
+ { 0x00000fff, 0x00281a23, 0x000 },
+ { 0x00000000, 0x00331ca6, 0x000 },
+ { 0x8f00ffff, 0x00204411, 0x000 },
+ { 0x00000003, 0x00384a27, 0x000 },
+ { 0x00000010, 0x00211223, 0x000 },
+ { 0x00000fff, 0x00281224, 0x000 },
+ { 0x0000000d, 0x00203624, 0x000 },
+ { 0x8b00ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204804, 0x000 },
+ { 0x00000003, 0x00381224, 0x000 },
+ { 0x00005000, 0x00301224, 0x000 },
+ { 0x0000000c, 0x00203624, 0x000 },
+ { 0x8500ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204804, 0x000 },
+ { 0x00000000, 0x00331cc8, 0x000 },
+ { 0x9000ffff, 0x00204411, 0x000 },
+ { 0x00000003, 0x00384a27, 0x000 },
+ { 0x00300000, 0x00493a2e, 0x000 },
+ { 0x00000000, 0x00202c11, 0x000 },
+ { 0x00000001, 0x00303e2f, 0x000 },
+ { 0x00000000, 0xc0200800, 0x000 },
+ { 0x00000000, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x172 },
+ { 0x00000000, 0xd9000000, 0x000 },
+ { 0x00000000, 0x00400000, 0x000 },
+ { 0x00000000, 0x00600000, 0x28c },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000002, 0x00204811, 0x000 },
+ { 0x00000000, 0x002f0230, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x175 },
+ { 0x00000000, 0xc0200800, 0x000 },
+ { 0x00000009, 0x00210222, 0x000 },
+ { 0x00000000, 0x14c00000, 0x17d },
+ { 0x00000000, 0x00600000, 0x2af },
+ { 0x00000000, 0x00200c11, 0x000 },
+ { 0x00000016, 0x00203623, 0x000 },
+ { 0x00000000, 0x00210222, 0x000 },
+ { 0x00000000, 0x14c00000, 0x180 },
+ { 0x00000000, 0xc0200000, 0x000 },
+ { 0x00000001, 0x00210222, 0x000 },
+ { 0x00000000, 0x14c00000, 0x183 },
+ { 0x00000000, 0xc0200000, 0x000 },
+ { 0x00000002, 0x00210222, 0x000 },
+ { 0x00000000, 0x14c00000, 0x18d },
+ { 0x00000004, 0xc0203620, 0x000 },
+ { 0x00000005, 0xc0203620, 0x000 },
+ { 0x00000006, 0xc0203620, 0x000 },
+ { 0x00000007, 0xc0203620, 0x000 },
+ { 0x00000008, 0xc0203620, 0x000 },
+ { 0x00000009, 0xc0203620, 0x000 },
+ { 0x0000000a, 0xc0203620, 0x000 },
+ { 0x0000000b, 0xc0203620, 0x000 },
+ { 0x00000003, 0x00210222, 0x000 },
+ { 0x00000000, 0x14c00000, 0x1b5 },
+ { 0x00000000, 0xc0200c00, 0x000 },
+ { 0x8c00ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204803, 0x000 },
+ { 0x00000fff, 0x00281223, 0x000 },
+ { 0x0000000f, 0x00203624, 0x000 },
+ { 0x00000003, 0x00381224, 0x000 },
+ { 0x00005000, 0x00301224, 0x000 },
+ { 0x0000000e, 0x00203624, 0x000 },
+ { 0x8700ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204804, 0x000 },
+ { 0x00000001, 0x00331224, 0x000 },
+ { 0x8600ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204804, 0x000 },
+ { 0x0000001d, 0x00211223, 0x000 },
+ { 0x00000020, 0x00222091, 0x000 },
+ { 0x00000003, 0x00381228, 0x000 },
+ { 0x8800ffff, 0x00204411, 0x000 },
+ { 0x00004fff, 0x00304a24, 0x000 },
+ { 0x00000010, 0x00211623, 0x000 },
+ { 0x00000fff, 0x00281625, 0x000 },
+ { 0x00000fff, 0x00281a23, 0x000 },
+ { 0x00000000, 0x00331ca6, 0x000 },
+ { 0x8f00ffff, 0x00204411, 0x000 },
+ { 0x00000003, 0x00384a27, 0x000 },
+ { 0x00000010, 0x00211223, 0x000 },
+ { 0x00000fff, 0x00281224, 0x000 },
+ { 0x0000000d, 0x00203624, 0x000 },
+ { 0x8b00ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204804, 0x000 },
+ { 0x00000003, 0x00381224, 0x000 },
+ { 0x00005000, 0x00301224, 0x000 },
+ { 0x0000000c, 0x00203624, 0x000 },
+ { 0x8500ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204804, 0x000 },
+ { 0x00000000, 0x00331cc8, 0x000 },
+ { 0x9000ffff, 0x00204411, 0x000 },
+ { 0x00000003, 0x00384a27, 0x000 },
+ { 0x00300000, 0x00293a2e, 0x000 },
+ { 0x00000004, 0x00210222, 0x000 },
+ { 0x00000000, 0x14c00000, 0x1bd },
+ { 0xa300ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x40204800, 0x000 },
+ { 0x0000000a, 0xc0220e20, 0x000 },
+ { 0x00000011, 0x00203623, 0x000 },
+ { 0x000021f4, 0x00204411, 0x000 },
+ { 0x0000000a, 0x00614a2c, 0x2af },
+ { 0x00000005, 0x00210222, 0x000 },
+ { 0x00000000, 0x14c00000, 0x1c0 },
+ { 0x00000000, 0xc0200000, 0x000 },
+ { 0x00000006, 0x00210222, 0x000 },
+ { 0x00000000, 0x14c00000, 0x1c6 },
+ { 0x9c00ffff, 0x00204411, 0x000 },
+ { 0x0000001f, 0x40214a20, 0x000 },
+ { 0x9600ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0xc0204800, 0x000 },
+ { 0x00000007, 0x00210222, 0x000 },
+ { 0x00000000, 0x14c00000, 0x1d0 },
+ { 0x3fffffff, 0x00283a2e, 0x000 },
+ { 0xc0000000, 0x40280e20, 0x000 },
+ { 0x00000000, 0x0029386e, 0x000 },
+ { 0x18000000, 0x40280e20, 0x000 },
+ { 0x00000016, 0x00203623, 0x000 },
+ { 0xa400ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0xc0202c00, 0x000 },
+ { 0x00000000, 0x0020480b, 0x000 },
+ { 0x00000008, 0x00210222, 0x000 },
+ { 0x00000000, 0x14c00000, 0x1dc },
+ { 0x00000000, 0xc0200c00, 0x000 },
+ { 0x00000013, 0x00203623, 0x000 },
+ { 0x00000015, 0x00203623, 0x000 },
+ { 0x00000002, 0x40221220, 0x000 },
+ { 0x00000000, 0x00301083, 0x000 },
+ { 0x00000014, 0x00203624, 0x000 },
+ { 0x00000003, 0xc0210e20, 0x000 },
+ { 0x10000000, 0x00280e23, 0x000 },
+ { 0xefffffff, 0x00283a2e, 0x000 },
+ { 0x00000000, 0x0029386e, 0x000 },
+ { 0x00000000, 0x00400000, 0x000 },
+ { 0x00000000, 0x00600000, 0x28c },
+ { 0x00000000, 0xc0200800, 0x000 },
+ { 0x0000001f, 0x00210e22, 0x000 },
+ { 0x00000000, 0x14e00000, 0x000 },
+ { 0x000003ff, 0x00280e22, 0x000 },
+ { 0x00000018, 0x00211222, 0x000 },
+ { 0x00000004, 0x00301224, 0x000 },
+ { 0x00000000, 0x0020108d, 0x000 },
+ { 0x00002000, 0x00291224, 0x000 },
+ { 0x8300ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00294984, 0x000 },
+ { 0x8400ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204803, 0x000 },
+ { 0x00000000, 0x21000000, 0x000 },
+ { 0x00000000, 0x00400000, 0x1de },
+ { 0x8200ffff, 0x00204411, 0x000 },
+ { 0x00000001, 0x00204811, 0x000 },
+ { 0x00000000, 0xc0200800, 0x000 },
+ { 0x00003fff, 0x40280e20, 0x000 },
+ { 0x00000010, 0xc0211220, 0x000 },
+ { 0x00000000, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x1fb },
+ { 0x00000000, 0x2ae00000, 0x205 },
+ { 0x20000080, 0x00281e2e, 0x000 },
+ { 0x00000080, 0x002f0227, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x1f8 },
+ { 0x00000000, 0x00401c0c, 0x1f9 },
+ { 0x00000010, 0x00201e2d, 0x000 },
+ { 0x000021f9, 0x00294627, 0x000 },
+ { 0x00000000, 0x00404811, 0x205 },
+ { 0x00000001, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x23a },
+ { 0x00000000, 0x28e00000, 0x205 },
+ { 0x00800080, 0x00281e2e, 0x000 },
+ { 0x00000080, 0x002f0227, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x202 },
+ { 0x00000000, 0x00401c0c, 0x203 },
+ { 0x00000010, 0x00201e2d, 0x000 },
+ { 0x000021f9, 0x00294627, 0x000 },
+ { 0x00000001, 0x00204811, 0x000 },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x20c },
+ { 0x00000003, 0x00204811, 0x000 },
+ { 0x0000000c, 0x0020162d, 0x000 },
+ { 0x0000000d, 0x00201a2d, 0x000 },
+ { 0xffdfffff, 0x00483a2e, 0x210 },
+ { 0x00000004, 0x00204811, 0x000 },
+ { 0x0000000e, 0x0020162d, 0x000 },
+ { 0x0000000f, 0x00201a2d, 0x000 },
+ { 0xffefffff, 0x00283a2e, 0x000 },
+ { 0x00000000, 0x00201c10, 0x000 },
+ { 0x00000000, 0x002f0067, 0x000 },
+ { 0x00000000, 0x04e00000, 0x205 },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000006, 0x00204811, 0x000 },
+ { 0x8300ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204805, 0x000 },
+ { 0x8900ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204806, 0x000 },
+ { 0x8400ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204803, 0x000 },
+ { 0x00000000, 0x21000000, 0x000 },
+ { 0x00000000, 0x00601010, 0x28c },
+ { 0x0000000c, 0x00221e24, 0x000 },
+ { 0x00000000, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x22d },
+ { 0x20000000, 0x00293a2e, 0x000 },
+ { 0x000021f7, 0x0029462c, 0x000 },
+ { 0x00000000, 0x002948c7, 0x000 },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000005, 0x00204811, 0x000 },
+ { 0x0000000c, 0x00203630, 0x000 },
+ { 0x00000007, 0x00204811, 0x000 },
+ { 0x0000000d, 0x00203630, 0x000 },
+ { 0x9100ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204803, 0x000 },
+ { 0x00000000, 0x23000000, 0x000 },
+ { 0x8d00ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00404803, 0x240 },
+ { 0x00800000, 0x00293a2e, 0x000 },
+ { 0x000021f6, 0x0029462c, 0x000 },
+ { 0x00000000, 0x002948c7, 0x000 },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000005, 0x00204811, 0x000 },
+ { 0x0000000e, 0x00203630, 0x000 },
+ { 0x00000007, 0x00204811, 0x000 },
+ { 0x0000000f, 0x00203630, 0x000 },
+ { 0x9200ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204803, 0x000 },
+ { 0x00000000, 0x25000000, 0x000 },
+ { 0x8e00ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00404803, 0x240 },
+ { 0x8300ffff, 0x00204411, 0x000 },
+ { 0x00000003, 0x00381224, 0x000 },
+ { 0x00005000, 0x00304a24, 0x000 },
+ { 0x8400ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204803, 0x000 },
+ { 0x00000000, 0x21000000, 0x000 },
+ { 0x8200ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00404811, 0x000 },
+ { 0x00000003, 0x40280a20, 0x000 },
+ { 0xffffffe0, 0xc0280e20, 0x000 },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000001, 0x00204811, 0x000 },
+ { 0x00000001, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x24a },
+ { 0x000021f6, 0x0029122c, 0x000 },
+ { 0x00040000, 0x00494624, 0x24c },
+ { 0x000021f7, 0x0029122c, 0x000 },
+ { 0x00040000, 0x00294624, 0x000 },
+ { 0x00000000, 0x00600000, 0x2b2 },
+ { 0x00000000, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x252 },
+ { 0x00000001, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ce00000, 0x252 },
+ { 0x00000000, 0x00481630, 0x258 },
+ { 0x00000fff, 0x00281630, 0x000 },
+ { 0x0000000c, 0x00211a30, 0x000 },
+ { 0x00000fff, 0x00281a26, 0x000 },
+ { 0x00000000, 0x002f0226, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x258 },
+ { 0x00000000, 0xc0400000, 0x000 },
+ { 0x00040d02, 0x00604411, 0x2b2 },
+ { 0x00000000, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x25d },
+ { 0x00000010, 0x00211e30, 0x000 },
+ { 0x00000fff, 0x00482630, 0x267 },
+ { 0x00000001, 0x002f0222, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x261 },
+ { 0x00000fff, 0x00281e30, 0x000 },
+ { 0x00000200, 0x00402411, 0x267 },
+ { 0x00000000, 0x00281e30, 0x000 },
+ { 0x00000010, 0x00212630, 0x000 },
+ { 0x00000010, 0x00211a30, 0x000 },
+ { 0x00000000, 0x002f0226, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x258 },
+ { 0x00000000, 0xc0400000, 0x000 },
+ { 0x00000003, 0x00381625, 0x000 },
+ { 0x00000003, 0x00381a26, 0x000 },
+ { 0x00000003, 0x00381e27, 0x000 },
+ { 0x00000003, 0x00382629, 0x000 },
+ { 0x00005000, 0x00302629, 0x000 },
+ { 0x0000060d, 0x00204411, 0x000 },
+ { 0x00000000, 0xc0204800, 0x000 },
+ { 0x00000000, 0x00204806, 0x000 },
+ { 0x00005000, 0x00302225, 0x000 },
+ { 0x00040000, 0x00694628, 0x2b2 },
+ { 0x00000001, 0x00302228, 0x000 },
+ { 0x00000000, 0x00202810, 0x000 },
+ { 0x00040000, 0x00694628, 0x2b2 },
+ { 0x00000001, 0x00302228, 0x000 },
+ { 0x00000000, 0x00200810, 0x000 },
+ { 0x00040000, 0x00694628, 0x2b2 },
+ { 0x00000001, 0x00302228, 0x000 },
+ { 0x00000000, 0x00201410, 0x000 },
+ { 0x0000060d, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204803, 0x000 },
+ { 0x0000860e, 0x00204411, 0x000 },
+ { 0x00000000, 0x0020480a, 0x000 },
+ { 0x00000000, 0x00204802, 0x000 },
+ { 0x00000000, 0x00204805, 0x000 },
+ { 0x00000000, 0x002f0128, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x282 },
+ { 0x00005000, 0x00302227, 0x000 },
+ { 0x0000000c, 0x00300e23, 0x000 },
+ { 0x00000003, 0x00331a26, 0x000 },
+ { 0x00000000, 0x002f0226, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x270 },
+ { 0x00000000, 0x00400000, 0x000 },
+ { 0x000001f3, 0x00204411, 0x000 },
+ { 0x04000000, 0x00204811, 0x000 },
+ { 0x00000000, 0x00400000, 0x289 },
+ { 0x00000000, 0xc0600000, 0x28c },
+ { 0x00000000, 0x00400000, 0x000 },
+ { 0x00000000, 0x0ec00000, 0x28e },
+ { 0x00000000, 0x00800000, 0x000 },
+ { 0x000021f9, 0x0029462c, 0x000 },
+ { 0x00000005, 0x00204811, 0x000 },
+ { 0x00000000, 0x0020280c, 0x000 },
+ { 0x00000011, 0x0020262d, 0x000 },
+ { 0x00000000, 0x002f012c, 0x000 },
+ { 0x00000000, 0x0ae00000, 0x295 },
+ { 0x00000000, 0x00403011, 0x296 },
+ { 0x00000400, 0x0030322c, 0x000 },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000002, 0x00204811, 0x000 },
+ { 0x0000000a, 0x0021262c, 0x000 },
+ { 0x00000000, 0x00210130, 0x000 },
+ { 0x00000000, 0x14c00000, 0x29d },
+ { 0xa500ffff, 0x00204411, 0x000 },
+ { 0x00000001, 0x00404811, 0x299 },
+ { 0xa500ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204811, 0x000 },
+ { 0x000021f4, 0x0029462c, 0x000 },
+ { 0x0000000a, 0x00214a2a, 0x000 },
+ { 0xa200ffff, 0x00204411, 0x000 },
+ { 0x00000001, 0x00204811, 0x000 },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000002, 0x00204811, 0x000 },
+ { 0x00000000, 0x00210130, 0x000 },
+ { 0xdf7fffff, 0x00283a2e, 0x000 },
+ { 0x00000010, 0x0080362a, 0x000 },
+ { 0x9700ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x0020480c, 0x000 },
+ { 0xa200ffff, 0x00204411, 0x000 },
+ { 0x00000000, 0x00204811, 0x000 },
+ { 0x8100ffff, 0x00204411, 0x000 },
+ { 0x00000002, 0x00204811, 0x000 },
+ { 0x00000000, 0x00810130, 0x000 },
+ { 0x00000000, 0x00203011, 0x000 },
+ { 0x00000010, 0x0080362c, 0x000 },
+ { 0x00000000, 0xc0400000, 0x000 },
+ { 0x00000000, 0x1ac00000, 0x2b2 },
+ { 0x9f00ffff, 0x00204411, 0x000 },
+ { 0xdeadbeef, 0x00204811, 0x000 },
+ { 0x00000000, 0x1ae00000, 0x2b5 },
+ { 0x00000000, 0x00800000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00000000, 0x00000000, 0x000 },
+ { 0x00020143, 0x00020002, 0x000 },
+ { 0x00020002, 0x00020002, 0x000 },
+ { 0x00020002, 0x00020002, 0x000 },
+ { 0x00020002, 0x01dd0002, 0x000 },
+ { 0x006301ee, 0x00280012, 0x000 },
+ { 0x00020002, 0x00020026, 0x000 },
+ { 0x00020002, 0x01ec0002, 0x000 },
+ { 0x00790242, 0x00020002, 0x000 },
+ { 0x00020002, 0x00020002, 0x000 },
+ { 0x00200012, 0x00020016, 0x000 },
+ { 0x00020002, 0x00020002, 0x000 },
+ { 0x011b00c5, 0x00020125, 0x000 },
+ { 0x00020141, 0x00020002, 0x000 },
+ { 0x00c50002, 0x0143002e, 0x000 },
+ { 0x00a2016b, 0x00020145, 0x000 },
+ { 0x00020002, 0x01200002, 0x000 },
+ { 0x00020002, 0x010f0103, 0x000 },
+ { 0x00090002, 0x000e000e, 0x000 },
+ { 0x0058003d, 0x00600002, 0x000 },
+ { 0x000200c1, 0x0002028a, 0x000 },
+ { 0x00020002, 0x00020002, 0x000 },
+ { 0x00020002, 0x00020002, 0x000 },
+ { 0x00020002, 0x00020002, 0x000 },
+ { 0x00020002, 0x00020002, 0x000 },
+ { 0x00020002, 0x00020002, 0x000 },
+ { 0x00020002, 0x00020002, 0x000 },
+ { 0x00020002, 0x00020002, 0x000 },
+ { 0x000502b1, 0x00020008, 0x000 },
+};
+
+#endif
+static const uint32 ME_JUMP_TABLE_START = 740;
+static const uint32 ME_JUMP_TABLE_END = 768;
+
+#endif