summaryrefslogtreecommitdiff
path: root/security/tf_driver
diff options
context:
space:
mode:
authorJ. Aaron Gamble <jgamble@nvidia.com>2011-08-11 13:21:35 -0700
committerVarun Colbert <vcolbert@nvidia.com>2011-08-18 11:38:44 -0700
commit26d858aa4ecfa3f25fee16af655b28d87d56469a (patch)
treed7b6800129716680c4e70d944d0ae76a6c906fbf /security/tf_driver
parent04bb6c2b2ee1bc07bf6b0248bb64f41509415fb7 (diff)
security: tf_driver: secureos tf_driver update
New version of tf_driver supplied by Trusted Logic Change-Id: I57d28e467346e055d1c4c4f9ad75a49f670d646d Reviewed-on: http://git-master/r/46803 Reviewed-by: Scott Williams <scwilliams@nvidia.com> Reviewed-by: Daniel Willemsen <dwillemsen@nvidia.com> Tested-by: John Gamble <jgamble@nvidia.com>
Diffstat (limited to 'security/tf_driver')
-rw-r--r--security/tf_driver/Makefile10
-rw-r--r--security/tf_driver/s_version.h87
-rw-r--r--security/tf_driver/scx_protocol.h676
-rw-r--r--security/tf_driver/scxlnx_comm.c1756
-rw-r--r--security/tf_driver/scxlnx_comm_tz.c891
-rw-r--r--security/tf_driver/scxlnx_conn.c1530
-rw-r--r--security/tf_driver/scxlnx_conn.h91
-rw-r--r--security/tf_driver/scxlnx_device.c697
-rw-r--r--security/tf_driver/scxlnx_util.c1141
-rw-r--r--security/tf_driver/tf_comm.c1766
-rw-r--r--security/tf_driver/tf_comm.h (renamed from security/tf_driver/scxlnx_comm.h)134
-rw-r--r--security/tf_driver/tf_comm_tz.c885
-rw-r--r--security/tf_driver/tf_conn.c1566
-rw-r--r--security/tf_driver/tf_conn.h106
-rw-r--r--security/tf_driver/tf_defs.h (renamed from security/tf_driver/scxlnx_defs.h)282
-rw-r--r--security/tf_driver/tf_device.c749
-rw-r--r--security/tf_driver/tf_protocol.h688
-rw-r--r--security/tf_driver/tf_util.c1149
-rw-r--r--security/tf_driver/tf_util.h (renamed from security/tf_driver/scxlnx_util.h)62
19 files changed, 7198 insertions, 7068 deletions
diff --git a/security/tf_driver/Makefile b/security/tf_driver/Makefile
index 888d1d329746..dfadb7d97406 100644
--- a/security/tf_driver/Makefile
+++ b/security/tf_driver/Makefile
@@ -27,10 +27,10 @@ ifdef S_VERSION_BUILD
EXTRA_CFLAGS += -DS_VERSION_BUILD=$(S_VERSION_BUILD)
endif
-tf_driver-objs += scxlnx_util.o
-tf_driver-objs += scxlnx_conn.o
-tf_driver-objs += scxlnx_device.o
-tf_driver-objs += scxlnx_comm.o
-tf_driver-objs += scxlnx_comm_tz.o
+tf_driver-objs += tf_util.o
+tf_driver-objs += tf_conn.o
+tf_driver-objs += tf_device.o
+tf_driver-objs += tf_comm.o
+tf_driver-objs += tf_comm_tz.o
obj-$(CONFIG_TRUSTED_FOUNDATIONS) += tf_driver.o
diff --git a/security/tf_driver/s_version.h b/security/tf_driver/s_version.h
index f7368d797b4f..81cb62f2bc0e 100644
--- a/security/tf_driver/s_version.h
+++ b/security/tf_driver/s_version.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * Copyright (c) 2011 Trusted Logic S.A.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -23,13 +23,30 @@
/*
* Usage: define S_VERSION_BUILD on the compiler's command line.
*
- * Then, you get:
- * - S_VERSION_MAIN "X.Y"
+ * Then set:
+ * - S_VERSION_OS
+ * - S_VERSION_PLATFORM
+ * - S_VERSION_MAIN
+ * - S_VERSION_ENG is optional
+ * - S_VERSION_PATCH is optional
* - S_VERSION_BUILD = 0 if S_VERSION_BUILD not defined or empty
- * - S_VERSION_STRING = "TFO[O][P] X.Y.N " or "TFO[O][P] X.Y.N D "
- * - S_VERSION_RESOURCE = X,Y,0,N
*/
+#define S_VERSION_OS "A" /* "A" for all Android */
+#define S_VERSION_PLATFORM "B" /* "B" for Tegra3 */
+
+/*
+ * This version number must be updated for each new release
+ */
+#define S_VERSION_MAIN "01.01"
+
+/*
+* If this is a patch or engineering version use the following
+* defines to set the version number. Else set these values to 0.
+*/
+#define S_VERSION_ENG 0
+#define S_VERSION_PATCH 0
+
#ifdef S_VERSION_BUILD
/* TRICK: detect if S_VERSION is defined but empty */
#if 0 == S_VERSION_BUILD-0
@@ -44,58 +61,32 @@
#define __STRINGIFY(X) #X
#define __STRINGIFY2(X) __STRINGIFY(X)
-#if !defined(NDEBUG) || defined(_DEBUG)
-#define S_VERSION_VARIANT_DEBUG "D"
-#else
-#define S_VERSION_VARIANT_DEBUG " "
-#endif
-
-#ifdef STANDARD
-#define S_VERSION_VARIANT_STANDARD "S"
+#if S_VERSION_ENG != 0
+#define _S_VERSION_ENG "e" __STRINGIFY2(S_VERSION_ENG)
#else
-#define S_VERSION_VARIANT_STANDARD " "
+#define _S_VERSION_ENG ""
#endif
-#define S_VERSION_VARIANT S_VERSION_VARIANT_STANDARD S_VERSION_VARIANT_DEBUG " "
-
-/*
- * This version number must be updated for each new release
- */
-#define S_VERSION_MAIN "08.01"
-#define S_VERSION_RESOURCE 8,1,0,S_VERSION_BUILD
-
-/*
- * Products Versioning
- */
-#if defined(WIN32)
-
-/* Win32 Simulator and all Win32 Side Components */
-#define PRODUCT_NAME "TFOWX"
-
-#elif defined(__ANDROID32__)
-
-#define PRODUCT_NAME "UNKWN"
-
-#elif defined(LINUX)
-
-#if defined(__ARM_EABI__)
-/* arm architecture -> Cortex-A8 */
-#define PRODUCT_NAME "TFOLB"
+#if S_VERSION_PATCH != 0
+#define _S_VERSION_PATCH "p" __STRINGIFY2(S_VERSION_PATCH)
#else
-/* ix86 architecture -> Linux Simulator and all Linux Side Components */
-#define PRODUCT_NAME "TFOLX"
+#define _S_VERSION_PATCH ""
#endif
+#if !defined(NDEBUG) || defined(_DEBUG)
+#define S_VERSION_VARIANT "D "
#else
-
-/* Not OS specififc -> Cortex-A8 Secure Binary */
-#define PRODUCT_NAME "TFOXB"
-
+#define S_VERSION_VARIANT " "
#endif
#define S_VERSION_STRING \
- PRODUCT_NAME S_VERSION_MAIN "." \
- __STRINGIFY2(S_VERSION_BUILD) " " \
- S_VERSION_VARIANT
+ "TFN" \
+ S_VERSION_OS \
+ S_VERSION_PLATFORM \
+ S_VERSION_MAIN \
+ _S_VERSION_ENG \
+ _S_VERSION_PATCH \
+ "." __STRINGIFY2(S_VERSION_BUILD) " " \
+ S_VERSION_VARIANT
#endif /* __S_VERSION_H__ */
diff --git a/security/tf_driver/scx_protocol.h b/security/tf_driver/scx_protocol.h
deleted file mode 100644
index 06a0bb792769..000000000000
--- a/security/tf_driver/scx_protocol.h
+++ /dev/null
@@ -1,676 +0,0 @@
-/*
- * Copyright (c) 2006-2010 Trusted Logic S.A.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
- */
-
-#ifndef __SCX_PROTOCOL_H__
-#define __SCX_PROTOCOL_H__
-
-/*----------------------------------------------------------------------------
- *
- * This header file defines the structure used in the SChannel Protocol.
- * See your Product Reference Manual for a specification of the SChannel
- * protocol.
- *---------------------------------------------------------------------------*/
-
-/*
- * The driver interface version returned by the version ioctl
- */
-#define SCX_DRIVER_INTERFACE_VERSION 0x04000000
-
-/*
- * Protocol version handling
- */
-#define SCX_S_PROTOCOL_MAJOR_VERSION (0x06)
-#define GET_PROTOCOL_MAJOR_VERSION(a) (a >> 24)
-#define GET_PROTOCOL_MINOR_VERSION(a) ((a >> 16) & 0xFF)
-
-/*
- * The size, in bytes, of the L1 Shared Buffer.
- */
-#define SCX_COMM_BUFFER_SIZE (0x1000) /* 4kB*/
-
-/*
- * The S flag of the nConfigFlags_S register.
- */
-#define SCX_CONFIG_FLAG_S (1 << 3)
-
-/*
- * The TimeSlot field of the nSyncSerial_N register.
- */
-#define SCX_SYNC_SERIAL_TIMESLOT_N (1)
-
-/*
- * nStatus_S related defines.
- */
-#define SCX_STATUS_P_MASK (0X00000001)
-#define SCX_STATUS_POWER_STATE_SHIFT (3)
-#define SCX_STATUS_POWER_STATE_MASK (0x1F << SCX_STATUS_POWER_STATE_SHIFT)
-
-/*
- * Possible power states of the POWER_STATE field of the nStatus_S register
- */
-#define SCX_POWER_MODE_COLD_BOOT (0)
-#define SCX_POWER_MODE_WARM_BOOT (1)
-#define SCX_POWER_MODE_ACTIVE (3)
-#define SCX_POWER_MODE_READY_TO_SHUTDOWN (5)
-#define SCX_POWER_MODE_READY_TO_HIBERNATE (7)
-#define SCX_POWER_MODE_WAKEUP (8)
-#define SCX_POWER_MODE_PANIC (15)
-
-/*
- * Possible nCommand values for MANAGEMENT commands
- */
-#define SCX_MANAGEMENT_HIBERNATE (1)
-#define SCX_MANAGEMENT_SHUTDOWN (2)
-#define SCX_MANAGEMENT_PREPARE_FOR_CORE_OFF (3)
-#define SCX_MANAGEMENT_RESUME_FROM_CORE_OFF (4)
-
-/*
- * The capacity of the Normal Word message queue, in number of slots.
- */
-#define SCX_N_MESSAGE_QUEUE_CAPACITY (512)
-
-/*
- * The capacity of the Secure World message answer queue, in number of slots.
- */
-#define SCX_S_ANSWER_QUEUE_CAPACITY (256)
-
-/*
- * The value of the S-timeout register indicating an infinite timeout.
- */
-#define SCX_S_TIMEOUT_0_INFINITE (0xFFFFFFFF)
-#define SCX_S_TIMEOUT_1_INFINITE (0xFFFFFFFF)
-
-/*
- * The value of the S-timeout register indicating an immediate timeout.
- */
-#define SCX_S_TIMEOUT_0_IMMEDIATE (0x0)
-#define SCX_S_TIMEOUT_1_IMMEDIATE (0x0)
-
-/*
- * Identifies the get protocol version SMC.
- */
-#define SCX_SMC_GET_PROTOCOL_VERSION (0XFFFFFFFB)
-
-/*
- * Identifies the init SMC.
- */
-#define SCX_SMC_INIT (0XFFFFFFFF)
-
-/*
- * Identifies the reset irq SMC.
- */
-#define SCX_SMC_RESET_IRQ (0xFFFFFFFE)
-
-/*
- * Identifies the SET_W3B SMC.
- */
-#define SCX_SMC_WAKE_UP (0xFFFFFFFD)
-
-/*
- * Identifies the STOP SMC.
- */
-#define SCX_SMC_STOP (0xFFFFFFFC)
-
-/*
- * Identifies the n-yield SMC.
- */
-#define SCX_SMC_N_YIELD (0X00000003)
-
-
-/* Possible stop commands for SMC_STOP */
-#define SCSTOP_HIBERNATE (0xFFFFFFE1)
-#define SCSTOP_SHUTDOWN (0xFFFFFFE2)
-
-/*
- * representation of an UUID.
- */
-struct SCX_UUID {
- u32 time_low;
- u16 time_mid;
- u16 time_hi_and_version;
- u8 clock_seq_and_node[8];
-};
-
-
-/**
- * Command parameters.
- */
-struct SCX_COMMAND_PARAM_VALUE {
- u32 a;
- u32 b;
-};
-
-struct SCX_COMMAND_PARAM_TEMP_MEMREF {
- u32 nDescriptor; /* data pointer for exchange message.*/
- u32 nSize;
- u32 nOffset;
-};
-
-struct SCX_COMMAND_PARAM_MEMREF {
- u32 hBlock;
- u32 nSize;
- u32 nOffset;
-};
-
-union SCX_COMMAND_PARAM {
- struct SCX_COMMAND_PARAM_VALUE sValue;
- struct SCX_COMMAND_PARAM_TEMP_MEMREF sTempMemref;
- struct SCX_COMMAND_PARAM_MEMREF sMemref;
-};
-
-/**
- * Answer parameters.
- */
-struct SCX_ANSWER_PARAM_VALUE {
- u32 a;
- u32 b;
-};
-
-struct SCX_ANSWER_PARAM_SIZE {
- u32 _ignored;
- u32 nSize;
-};
-
-union SCX_ANSWER_PARAM {
- struct SCX_ANSWER_PARAM_SIZE sSize;
- struct SCX_ANSWER_PARAM_VALUE sValue;
-};
-
-/*
- * Descriptor tables capacity
- */
-#define SCX_MAX_W3B_COARSE_PAGES (2)
-#define SCX_MAX_COARSE_PAGES (8)
-#define SCX_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT (8)
-#define SCX_DESCRIPTOR_TABLE_CAPACITY \
- (1 << SCX_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT)
-#define SCX_DESCRIPTOR_TABLE_CAPACITY_MASK \
- (SCX_DESCRIPTOR_TABLE_CAPACITY - 1)
-/* Shared memories coarse pages can map up to 1MB */
-#define SCX_MAX_COARSE_PAGE_MAPPED_SIZE \
- (PAGE_SIZE * SCX_DESCRIPTOR_TABLE_CAPACITY)
-/* Shared memories cannot exceed 8MB */
-#define SCX_MAX_SHMEM_SIZE \
- (SCX_MAX_COARSE_PAGE_MAPPED_SIZE << 3)
-
-/*
- * Buffer size for version description fields
- */
-#define SCX_DESCRIPTION_BUFFER_LENGTH 64
-
-/*
- * Shared memory type flags.
- */
-#define SCX_SHMEM_TYPE_READ (0x00000001)
-#define SCX_SHMEM_TYPE_WRITE (0x00000002)
-
-/*
- * Shared mem flags
- */
-#define SCX_SHARED_MEM_FLAG_INPUT 1
-#define SCX_SHARED_MEM_FLAG_OUTPUT 2
-#define SCX_SHARED_MEM_FLAG_INOUT 3
-
-
-/*
- * Parameter types
- */
-#define SCX_PARAM_TYPE_NONE 0x0
-#define SCX_PARAM_TYPE_VALUE_INPUT 0x1
-#define SCX_PARAM_TYPE_VALUE_OUTPUT 0x2
-#define SCX_PARAM_TYPE_VALUE_INOUT 0x3
-#define SCX_PARAM_TYPE_MEMREF_TEMP_INPUT 0x5
-#define SCX_PARAM_TYPE_MEMREF_TEMP_OUTPUT 0x6
-#define SCX_PARAM_TYPE_MEMREF_TEMP_INOUT 0x7
-#define SCX_PARAM_TYPE_MEMREF_INPUT 0xD
-#define SCX_PARAM_TYPE_MEMREF_OUTPUT 0xE
-#define SCX_PARAM_TYPE_MEMREF_INOUT 0xF
-
-#define SCX_PARAM_TYPE_MEMREF_FLAG 0x4
-#define SCX_PARAM_TYPE_REGISTERED_MEMREF_FLAG 0x8
-
-
-#define SCX_MAKE_PARAM_TYPES(t0, t1, t2, t3) \
- ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
-#define SCX_GET_PARAM_TYPE(t, i) (((t) >> (4 * i)) & 0xF)
-
-/*
- * Login types.
- */
-#define SCX_LOGIN_PUBLIC 0x00000000
-#define SCX_LOGIN_USER 0x00000001
-#define SCX_LOGIN_GROUP 0x00000002
-#define SCX_LOGIN_APPLICATION 0x00000004
-#define SCX_LOGIN_APPLICATION_USER 0x00000005
-#define SCX_LOGIN_APPLICATION_GROUP 0x00000006
-#define SCX_LOGIN_AUTHENTICATION 0x80000000
-#define SCX_LOGIN_PRIVILEGED 0x80000002
-
-/* Login variants */
-
-#define SCX_LOGIN_VARIANT(mainType, os, variant) \
- ((mainType) | (1 << 27) | ((os) << 16) | ((variant) << 8))
-
-#define SCX_LOGIN_GET_MAIN_TYPE(type) \
- ((type) & ~SCX_LOGIN_VARIANT(0, 0xFF, 0xFF))
-
-#define SCX_LOGIN_OS_ANY 0x00
-#define SCX_LOGIN_OS_LINUX 0x01
-#define SCX_LOGIN_OS_ANDROID 0x04
-
-/* OS-independent variants */
-#define SCX_LOGIN_USER_NONE \
- SCX_LOGIN_VARIANT(SCX_LOGIN_USER, SCX_LOGIN_OS_ANY, 0xFF)
-#define SCX_LOGIN_GROUP_NONE \
- SCX_LOGIN_VARIANT(SCX_LOGIN_GROUP, SCX_LOGIN_OS_ANY, 0xFF)
-#define SCX_LOGIN_APPLICATION_USER_NONE \
- SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_USER, SCX_LOGIN_OS_ANY, 0xFF)
-#define SCX_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH \
- SCX_LOGIN_VARIANT(SCX_LOGIN_AUTHENTICATION, SCX_LOGIN_OS_ANY, 0x01)
-#define SCX_LOGIN_PRIVILEGED_KERNEL \
- SCX_LOGIN_VARIANT(SCX_LOGIN_PRIVILEGED, SCX_LOGIN_OS_ANY, 0x01)
-
-/* Linux variants */
-#define SCX_LOGIN_USER_LINUX_EUID \
- SCX_LOGIN_VARIANT(SCX_LOGIN_USER, SCX_LOGIN_OS_LINUX, 0x01)
-#define SCX_LOGIN_GROUP_LINUX_GID \
- SCX_LOGIN_VARIANT(SCX_LOGIN_GROUP, SCX_LOGIN_OS_LINUX, 0x01)
-#define SCX_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH \
- SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION, SCX_LOGIN_OS_LINUX, 0x01)
-#define SCX_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH \
- SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_USER, SCX_LOGIN_OS_LINUX, 0x01)
-#define SCX_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH \
- SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_GROUP, SCX_LOGIN_OS_LINUX, 0x01)
-
-/* Android variants */
-#define SCX_LOGIN_USER_ANDROID_EUID \
- SCX_LOGIN_VARIANT(SCX_LOGIN_USER, SCX_LOGIN_OS_ANDROID, 0x01)
-#define SCX_LOGIN_GROUP_ANDROID_GID \
- SCX_LOGIN_VARIANT(SCX_LOGIN_GROUP, SCX_LOGIN_OS_ANDROID, 0x01)
-#define SCX_LOGIN_APPLICATION_ANDROID_UID \
- SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION, SCX_LOGIN_OS_ANDROID, 0x01)
-#define SCX_LOGIN_APPLICATION_USER_ANDROID_UID_EUID \
- SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_USER, SCX_LOGIN_OS_ANDROID, \
- 0x01)
-#define SCX_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID \
- SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_GROUP, SCX_LOGIN_OS_ANDROID, \
- 0x01)
-
-/*
- * return origins
- */
-#define SCX_ORIGIN_COMMS 2
-#define SCX_ORIGIN_TEE 3
-#define SCX_ORIGIN_TRUSTED_APP 4
-/*
- * The SCX message types.
- */
-#define SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT 0x02
-#define SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT 0xFD
-#define SCX_MESSAGE_TYPE_REGISTER_SHARED_MEMORY 0xF7
-#define SCX_MESSAGE_TYPE_RELEASE_SHARED_MEMORY 0xF9
-#define SCX_MESSAGE_TYPE_OPEN_CLIENT_SESSION 0xF0
-#define SCX_MESSAGE_TYPE_CLOSE_CLIENT_SESSION 0xF2
-#define SCX_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND 0xF5
-#define SCX_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND 0xF4
-#define SCX_MESSAGE_TYPE_MANAGEMENT 0xFE
-
-
-/*
- * The error codes
- */
-#define S_SUCCESS 0x00000000
-#define S_ERROR_NO_DATA 0xFFFF000B
-#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C
-
-
-struct SCX_COMMAND_HEADER {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nMessageInfo;
- u32 nOperationID;
-};
-
-struct SCX_ANSWER_HEADER {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nMessageInfo;
- u32 nOperationID;
- u32 nErrorCode;
-};
-
-/*
- * CREATE_DEVICE_CONTEXT command message.
- */
-struct SCX_COMMAND_CREATE_DEVICE_CONTEXT {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nMessageInfo_RFU;
- u32 nOperationID;
- u32 nDeviceContextID;
-};
-
-/*
- * CREATE_DEVICE_CONTEXT answer message.
- */
-struct SCX_ANSWER_CREATE_DEVICE_CONTEXT {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nMessageInfo_RFU;
- /* an opaque Normal World identifier for the operation */
- u32 nOperationID;
- u32 nErrorCode;
- /* an opaque Normal World identifier for the device context */
- u32 hDeviceContext;
-};
-
-/*
- * DESTROY_DEVICE_CONTEXT command message.
- */
-struct SCX_COMMAND_DESTROY_DEVICE_CONTEXT {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nMessageInfo_RFU;
- u32 nOperationID;
- u32 hDeviceContext;
-};
-
-/*
- * DESTROY_DEVICE_CONTEXT answer message.
- */
-struct SCX_ANSWER_DESTROY_DEVICE_CONTEXT {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nMessageInfo_RFU;
- /* an opaque Normal World identifier for the operation */
- u32 nOperationID;
- u32 nErrorCode;
- u32 nDeviceContextID;
-};
-
-/*
- * OPEN_CLIENT_SESSION command message.
- */
-struct SCX_COMMAND_OPEN_CLIENT_SESSION {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nParamTypes;
- /* an opaque Normal World identifier for the operation */
- u32 nOperationID;
- u32 hDeviceContext;
- u32 nCancellationID;
- u64 sTimeout;
- struct SCX_UUID sDestinationUUID;
- union SCX_COMMAND_PARAM sParams[4];
- u32 nLoginType;
- /*
- * Size = 0 for public, [16] for group identification, [20] for
- * authentication
- */
- u8 sLoginData[20];
-};
-
-/*
- * OPEN_CLIENT_SESSION answer message.
- */
-struct SCX_ANSWER_OPEN_CLIENT_SESSION {
- u8 nMessageSize;
- u8 nMessageType;
- u8 nReturnOrigin;
- u8 __nReserved;
- /* an opaque Normal World identifier for the operation */
- u32 nOperationID;
- u32 nErrorCode;
- u32 hClientSession;
- union SCX_ANSWER_PARAM sAnswers[4];
-};
-
-/*
- * CLOSE_CLIENT_SESSION command message.
- */
-struct SCX_COMMAND_CLOSE_CLIENT_SESSION {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nMessageInfo_RFU;
- /* an opaque Normal World identifier for the operation */
- u32 nOperationID;
- u32 hDeviceContext;
- u32 hClientSession;
-};
-
-/*
- * CLOSE_CLIENT_SESSION answer message.
- */
-struct SCX_ANSWER_CLOSE_CLIENT_SESSION {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nMessageInfo_RFU;
- /* an opaque Normal World identifier for the operation */
- u32 nOperationID;
- u32 nErrorCode;
-};
-
-
-/*
- * REGISTER_SHARED_MEMORY command message
- */
-struct SCX_COMMAND_REGISTER_SHARED_MEMORY {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nMemoryFlags;
- u32 nOperationID;
- u32 hDeviceContext;
- u32 nBlockID;
- u32 nSharedMemSize;
- u32 nSharedMemStartOffset;
- u32 nSharedMemDescriptors[SCX_MAX_COARSE_PAGES];
-};
-
-/*
- * REGISTER_SHARED_MEMORY answer message.
- */
-struct SCX_ANSWER_REGISTER_SHARED_MEMORY {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nMessageInfo_RFU;
- /* an opaque Normal World identifier for the operation */
- u32 nOperationID;
- u32 nErrorCode;
- u32 hBlock;
-};
-
-/*
- * RELEASE_SHARED_MEMORY command message.
- */
-struct SCX_COMMAND_RELEASE_SHARED_MEMORY {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nMessageInfo_RFU;
- /* an opaque Normal World identifier for the operation */
- u32 nOperationID;
- u32 hDeviceContext;
- u32 hBlock;
-};
-
-/*
- * RELEASE_SHARED_MEMORY answer message.
- */
-struct SCX_ANSWER_RELEASE_SHARED_MEMORY {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nMessageInfo_RFU;
- u32 nOperationID;
- u32 nErrorCode;
- u32 nBlockID;
-};
-
-/*
- * INVOKE_CLIENT_COMMAND command message.
- */
-struct SCX_COMMAND_INVOKE_CLIENT_COMMAND {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nParamTypes;
- u32 nOperationID;
- u32 hDeviceContext;
- u32 hClientSession;
- u64 sTimeout;
- u32 nCancellationID;
- u32 nClientCommandIdentifier;
- union SCX_COMMAND_PARAM sParams[4];
-};
-
-/*
- * INVOKE_CLIENT_COMMAND command answer.
- */
-struct SCX_ANSWER_INVOKE_CLIENT_COMMAND {
- u8 nMessageSize;
- u8 nMessageType;
- u8 nReturnOrigin;
- u8 __nReserved;
- u32 nOperationID;
- u32 nErrorCode;
- union SCX_ANSWER_PARAM sAnswers[4];
-};
-
-/*
- * CANCEL_CLIENT_OPERATION command message.
- */
-struct SCX_COMMAND_CANCEL_CLIENT_OPERATION {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nMessageInfo_RFU;
- /* an opaque Normal World identifier for the operation */
- u32 nOperationID;
- u32 hDeviceContext;
- u32 hClientSession;
- u32 nCancellationID;
-};
-
-struct SCX_ANSWER_CANCEL_CLIENT_OPERATION {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nMessageInfo_RFU;
- u32 nOperationID;
- u32 nErrorCode;
-};
-
-/*
- * MANAGEMENT command message.
- */
-struct SCX_COMMAND_MANAGEMENT {
- u8 nMessageSize;
- u8 nMessageType;
- u16 nCommand;
- u32 nOperationID;
- u32 nW3BSize;
- u32 nW3BStartOffset;
- u32 nSharedMemDescriptors[1];
-};
-
-/*
- * POWER_MANAGEMENT answer message.
- * The message does not provide message specific parameters.
- * Therefore no need to define a specific answer structure
- */
-
-/*
- * Structure for L2 messages
- */
-union SCX_COMMAND_MESSAGE {
- struct SCX_COMMAND_HEADER sHeader;
- struct SCX_COMMAND_CREATE_DEVICE_CONTEXT sCreateDeviceContextMessage;
- struct SCX_COMMAND_DESTROY_DEVICE_CONTEXT sDestroyDeviceContextMessage;
- struct SCX_COMMAND_OPEN_CLIENT_SESSION sOpenClientSessionMessage;
- struct SCX_COMMAND_CLOSE_CLIENT_SESSION sCloseClientSessionMessage;
- struct SCX_COMMAND_REGISTER_SHARED_MEMORY sRegisterSharedMemoryMessage;
- struct SCX_COMMAND_RELEASE_SHARED_MEMORY sReleaseSharedMemoryMessage;
- struct SCX_COMMAND_INVOKE_CLIENT_COMMAND sInvokeClientCommandMessage;
- struct SCX_COMMAND_CANCEL_CLIENT_OPERATION
- sCancelClientOperationMessage;
- struct SCX_COMMAND_MANAGEMENT sManagementMessage;
-};
-
-/*
- * Structure for any L2 answer
- */
-
-union SCX_ANSWER_MESSAGE {
- struct SCX_ANSWER_HEADER sHeader;
- struct SCX_ANSWER_CREATE_DEVICE_CONTEXT sCreateDeviceContextAnswer;
- struct SCX_ANSWER_OPEN_CLIENT_SESSION sOpenClientSessionAnswer;
- struct SCX_ANSWER_CLOSE_CLIENT_SESSION sCloseClientSessionAnswer;
- struct SCX_ANSWER_REGISTER_SHARED_MEMORY sRegisterSharedMemoryAnswer;
- struct SCX_ANSWER_RELEASE_SHARED_MEMORY sReleaseSharedMemoryAnswer;
- struct SCX_ANSWER_INVOKE_CLIENT_COMMAND sInvokeClientCommandAnswer;
- struct SCX_ANSWER_DESTROY_DEVICE_CONTEXT sDestroyDeviceContextAnswer;
- struct SCX_ANSWER_CANCEL_CLIENT_OPERATION sCancelClientOperationAnswer;
-};
-
-/* Structure of the Communication Buffer */
-struct SCHANNEL_C1S_BUFFER {
- u32 nConfigFlags_S;
- u32 nW3BSizeMax_S;
- u32 nReserved0;
- u32 nW3BSizeCurrent_S;
- u8 sReserved1[48];
- u8 sVersionDescription[SCX_DESCRIPTION_BUFFER_LENGTH];
- u32 nStatus_S;
- u32 sReserved2;
- u32 nSyncSerial_N;
- u32 nSyncSerial_S;
- u64 sTime_N[2];
- u64 sTimeout_S[2];
- u32 nFirstCommand;
- u32 nFirstFreeCommand;
- u32 nFirstAnswer;
- u32 nFirstFreeAnswer;
- u32 nW3BDescriptors[128];
- #ifdef CONFIG_TF_ZEBRA
- u8 sRPCTraceBuffer[140];
- u8 sRPCShortcutBuffer[180];
- #else
- u8 sReserved3[320];
- #endif
- u32 sCommandQueue[SCX_N_MESSAGE_QUEUE_CAPACITY];
- u32 sAnswerQueue[SCX_S_ANSWER_QUEUE_CAPACITY];
-};
-
-
-/*
- * SCX_VERSION_INFORMATION_BUFFER structure description
- * Description of the sVersionBuffer handed over from user space to kernel space
- * This field is filled by the driver during a CREATE_DEVICE_CONTEXT ioctl
- * and handed back to user space
- */
-struct SCX_VERSION_INFORMATION_BUFFER {
- u8 sDriverDescription[65];
- u8 sSecureWorldDescription[65];
-};
-
-
-/* The IOCTLs the driver supports */
-#include <linux/ioctl.h>
-
-#define IOCTL_SCX_GET_VERSION _IO('z', 0)
-#define IOCTL_SCX_EXCHANGE _IOWR('z', 1, union SCX_COMMAND_MESSAGE)
-#define IOCTL_SCX_GET_DESCRIPTION _IOR('z', 2, \
- struct SCX_VERSION_INFORMATION_BUFFER)
-
-#endif /* !defined(__SCX_PROTOCOL_H__) */
diff --git a/security/tf_driver/scxlnx_comm.c b/security/tf_driver/scxlnx_comm.c
deleted file mode 100644
index f3b4cb8d487f..000000000000
--- a/security/tf_driver/scxlnx_comm.c
+++ /dev/null
@@ -1,1756 +0,0 @@
-/*
- * Copyright (c) 2006-2010 Trusted Logic S.A.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
- */
-
-#include <asm/div64.h>
-#include <asm/system.h>
-#include <linux/version.h>
-#include <asm/cputype.h>
-#include <linux/interrupt.h>
-#include <linux/page-flags.h>
-#include <linux/pagemap.h>
-#include <linux/vmalloc.h>
-#include <linux/jiffies.h>
-#include <linux/freezer.h>
-
-#include "scxlnx_defs.h"
-#include "scxlnx_comm.h"
-#include "scx_protocol.h"
-#include "scxlnx_util.h"
-#include "scxlnx_conn.h"
-
-#ifdef CONFIG_TF_ZEBRA
-#include "scxlnx_zebra.h"
-#endif
-
-/*---------------------------------------------------------------------------
- * Internal Constants
- *---------------------------------------------------------------------------*/
-
-/*
- * shared memories descriptor constants
- */
-#define DESCRIPTOR_B_MASK (1 << 2)
-#define DESCRIPTOR_C_MASK (1 << 3)
-#define DESCRIPTOR_S_MASK (1 << 10)
-
-#define L1_COARSE_DESCRIPTOR_BASE (0x00000001)
-#define L1_COARSE_DESCRIPTOR_ADDR_MASK (0xFFFFFC00)
-#define L1_COARSE_DESCRIPTOR_V13_12_SHIFT (5)
-
-#define L2_PAGE_DESCRIPTOR_BASE (0x00000003)
-#define L2_PAGE_DESCRIPTOR_AP_APX_READ (0x220)
-#define L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE (0x30)
-
-#define L2_INIT_DESCRIPTOR_BASE (0x00000003)
-#define L2_INIT_DESCRIPTOR_V13_12_SHIFT (4)
-
-/*
- * Reject an attempt to share a strongly-Ordered or Device memory
- * Strongly-Ordered: TEX=0b000, C=0, B=0
- * Shared Device: TEX=0b000, C=0, B=1
- * Non-Shared Device: TEX=0b010, C=0, B=0
- */
-#define L2_TEX_C_B_MASK \
- ((1<<8) | (1<<7) | (1<<6) | (1<<3) | (1<<2))
-#define L2_TEX_C_B_STRONGLY_ORDERED \
- ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (0<<2))
-#define L2_TEX_C_B_SHARED_DEVICE \
- ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (1<<2))
-#define L2_TEX_C_B_NON_SHARED_DEVICE \
- ((0<<8) | (1<<7) | (0<<6) | (0<<3) | (0<<2))
-
-#define CACHE_S(x) ((x) & (1 << 24))
-#define CACHE_DSIZE(x) (((x) >> 12) & 4095)
-
-#define TIME_IMMEDIATE ((u64) 0x0000000000000000ULL)
-#define TIME_INFINITE ((u64) 0xFFFFFFFFFFFFFFFFULL)
-
-/*---------------------------------------------------------------------------
- * atomic operation definitions
- *---------------------------------------------------------------------------*/
-
-/*
- * Atomically updates the nSyncSerial_N and sTime_N register
- * nSyncSerial_N and sTime_N modifications are thread safe
- */
-void SCXLNXCommSetCurrentTime(struct SCXLNX_COMM *pComm)
-{
- u32 nNewSyncSerial;
- struct timeval now;
- u64 sTime64;
-
- /*
- * lock the structure while updating the L1 shared memory fields
- */
- spin_lock(&pComm->lock);
-
- /* read nSyncSerial_N and change the TimeSlot bit field */
- nNewSyncSerial =
- SCXLNXCommReadReg32(&pComm->pBuffer->nSyncSerial_N) + 1;
-
- do_gettimeofday(&now);
- sTime64 = now.tv_sec;
- sTime64 = (sTime64 * 1000) + (now.tv_usec / 1000);
-
- /* Write the new sTime and nSyncSerial into shared memory */
- SCXLNXCommWriteReg64(&pComm->pBuffer->sTime_N[nNewSyncSerial &
- SCX_SYNC_SERIAL_TIMESLOT_N], sTime64);
- SCXLNXCommWriteReg32(&pComm->pBuffer->nSyncSerial_N,
- nNewSyncSerial);
-
- spin_unlock(&pComm->lock);
-}
-
-/*
- * Performs the specific read timeout operation
- * The difficulty here is to read atomically 2 u32
- * values from the L1 shared buffer.
- * This is guaranteed by reading before and after the operation
- * the timeslot given by the Secure World
- */
-static inline void SCXLNXCommReadTimeout(struct SCXLNX_COMM *pComm, u64 *pTime)
-{
- u32 nSyncSerial_S_initial = 0;
- u32 nSyncSerial_S_final = 1;
- u64 sTime;
-
- spin_lock(&pComm->lock);
-
- while (nSyncSerial_S_initial != nSyncSerial_S_final) {
- nSyncSerial_S_initial = SCXLNXCommReadReg32(
- &pComm->pBuffer->nSyncSerial_S);
- sTime = SCXLNXCommReadReg64(
- &pComm->pBuffer->sTimeout_S[nSyncSerial_S_initial&1]);
-
- nSyncSerial_S_final = SCXLNXCommReadReg32(
- &pComm->pBuffer->nSyncSerial_S);
- }
-
- spin_unlock(&pComm->lock);
-
- *pTime = sTime;
-}
-
-/*----------------------------------------------------------------------------
- * SIGKILL signal handling
- *----------------------------------------------------------------------------*/
-
-static bool sigkill_pending(void)
-{
- if (signal_pending(current)) {
- dprintk(KERN_INFO "A signal is pending\n");
- if (sigismember(&current->pending.signal, SIGKILL)) {
- dprintk(KERN_INFO "A SIGKILL is pending\n");
- return true;
- } else if (sigismember(
- &current->signal->shared_pending.signal, SIGKILL)) {
- dprintk(KERN_INFO "A SIGKILL is pending (shared)\n");
- return true;
- }
- }
- return false;
-}
-
-/*----------------------------------------------------------------------------
- * Shared memory related operations
- *----------------------------------------------------------------------------*/
-
-struct SCXLNX_COARSE_PAGE_TABLE *SCXLNXAllocateCoarsePageTable(
- struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext,
- u32 nType)
-{
- struct SCXLNX_COARSE_PAGE_TABLE *pCoarsePageTable = NULL;
-
- spin_lock(&(pAllocationContext->lock));
-
- if (!(list_empty(&(pAllocationContext->sFreeCoarsePageTables)))) {
- /*
- * The free list can provide us a coarse page table
- * descriptor
- */
- pCoarsePageTable = list_entry(
- pAllocationContext->sFreeCoarsePageTables.next,
- struct SCXLNX_COARSE_PAGE_TABLE, list);
- list_del(&(pCoarsePageTable->list));
-
- pCoarsePageTable->pParent->nReferenceCount++;
- } else {
- /* no array of coarse page tables, create a new one */
- struct SCXLNX_COARSE_PAGE_TABLE_ARRAY *pArray;
- void *pPage;
- int i;
-
- spin_unlock(&(pAllocationContext->lock));
-
- /* first allocate a new page descriptor */
- pArray = internal_kmalloc(sizeof(*pArray), GFP_KERNEL);
- if (pArray == NULL) {
- dprintk(KERN_ERR "SCXLNXAllocateCoarsePageTable(%p):"
- " failed to allocate a table array\n",
- pAllocationContext);
- return NULL;
- }
-
- pArray->nType = nType;
- INIT_LIST_HEAD(&(pArray->list));
-
- /* now allocate the actual page the page descriptor describes */
- pPage = (void *) internal_get_zeroed_page(GFP_KERNEL);
- if (pPage == NULL) {
- dprintk(KERN_ERR "SCXLNXAllocateCoarsePageTable(%p):"
- " failed allocate a page\n",
- pAllocationContext);
- internal_kfree(pArray);
- return NULL;
- }
-
- spin_lock(&(pAllocationContext->lock));
-
- /* initialize the coarse page table descriptors */
- for (i = 0; i < 4; i++) {
- INIT_LIST_HEAD(&(pArray->sCoarsePageTables[i].list));
- pArray->sCoarsePageTables[i].pDescriptors =
- pPage + (i * SIZE_1KB);
- pArray->sCoarsePageTables[i].pParent = pArray;
-
- if (i == 0) {
- /*
- * the first element is kept for the current
- * coarse page table allocation
- */
- pCoarsePageTable =
- &(pArray->sCoarsePageTables[i]);
- pArray->nReferenceCount++;
- } else {
- /*
- * The other elements are added to the free list
- */
- list_add(&(pArray->sCoarsePageTables[i].list),
- &(pAllocationContext->
- sFreeCoarsePageTables));
- }
- }
-
- list_add(&(pArray->list),
- &(pAllocationContext->sCoarsePageTableArrays));
- }
- spin_unlock(&(pAllocationContext->lock));
-
- return pCoarsePageTable;
-}
-
-
-void SCXLNXFreeCoarsePageTable(
- struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext,
- struct SCXLNX_COARSE_PAGE_TABLE *pCoarsePageTable,
- int nForce)
-{
- struct SCXLNX_COARSE_PAGE_TABLE_ARRAY *pArray;
-
- spin_lock(&(pAllocationContext->lock));
-
- pArray = pCoarsePageTable->pParent;
-
- (pArray->nReferenceCount)--;
-
- if (pArray->nReferenceCount == 0) {
- /*
- * no coarse page table descriptor is used
- * check if we should free the whole page
- */
-
- if ((pArray->nType == SCXLNX_PAGE_DESCRIPTOR_TYPE_PREALLOCATED)
- && (nForce == 0))
- /*
- * This is a preallocated page,
- * add the page back to the free list
- */
- list_add(&(pCoarsePageTable->list),
- &(pAllocationContext->sFreeCoarsePageTables));
- else {
- /*
- * None of the page's coarse page table descriptors
- * are in use, free the whole page
- */
- int i;
- u32 *pDescriptors;
-
- /*
- * remove the page's associated coarse page table
- * descriptors from the free list
- */
- for (i = 0; i < 4; i++)
- if (&(pArray->sCoarsePageTables[i]) !=
- pCoarsePageTable)
- list_del(&(pArray->
- sCoarsePageTables[i].list));
-
- pDescriptors =
- pArray->sCoarsePageTables[0].pDescriptors;
- pArray->sCoarsePageTables[0].pDescriptors = NULL;
-
- /* remove the coarse page table from the array */
- list_del(&(pArray->list));
-
- spin_unlock(&(pAllocationContext->lock));
- /*
- * Free the page.
- * The address of the page is contained in the first
- * element
- */
- internal_free_page((unsigned long) pDescriptors);
- /* finaly free the array */
- internal_kfree(pArray);
-
- spin_lock(&(pAllocationContext->lock));
- }
- } else {
- /*
- * Some coarse page table descriptors are in use.
- * Add the descriptor to the free list
- */
- list_add(&(pCoarsePageTable->list),
- &(pAllocationContext->sFreeCoarsePageTables));
- }
-
- spin_unlock(&(pAllocationContext->lock));
-}
-
-
-void SCXLNXInitializeCoarsePageTableAllocator(
- struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext)
-{
- spin_lock_init(&(pAllocationContext->lock));
- INIT_LIST_HEAD(&(pAllocationContext->sCoarsePageTableArrays));
- INIT_LIST_HEAD(&(pAllocationContext->sFreeCoarsePageTables));
-}
-
-void SCXLNXReleaseCoarsePageTableAllocator(
- struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext)
-{
- spin_lock(&(pAllocationContext->lock));
-
- /* now clean up the list of page descriptors */
- while (!list_empty(&(pAllocationContext->sCoarsePageTableArrays))) {
- struct SCXLNX_COARSE_PAGE_TABLE_ARRAY *pPageDesc;
- u32 *pDescriptors;
-
- pPageDesc = list_entry(
- pAllocationContext->sCoarsePageTableArrays.next,
- struct SCXLNX_COARSE_PAGE_TABLE_ARRAY, list);
-
- pDescriptors = pPageDesc->sCoarsePageTables[0].pDescriptors;
- list_del(&(pPageDesc->list));
-
- spin_unlock(&(pAllocationContext->lock));
-
- if (pDescriptors != NULL)
- internal_free_page((unsigned long)pDescriptors);
-
- internal_kfree(pPageDesc);
-
- spin_lock(&(pAllocationContext->lock));
- }
-
- spin_unlock(&(pAllocationContext->lock));
-}
-
-/*
- * Returns the L1 coarse page descriptor for
- * a coarse page table located at address pCoarsePageTableDescriptors
- */
-u32 SCXLNXCommGetL1CoarseDescriptor(
- u32 pCoarsePageTableDescriptors[256])
-{
- u32 nDescriptor = L1_COARSE_DESCRIPTOR_BASE;
- unsigned int info = read_cpuid(CPUID_CACHETYPE);
-
- nDescriptor |= (virt_to_phys((void *) pCoarsePageTableDescriptors)
- & L1_COARSE_DESCRIPTOR_ADDR_MASK);
-
- if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11))) {
- dprintk(KERN_DEBUG "SCXLNXCommGetL1CoarseDescriptor "
- "V31-12 added to descriptor\n");
- /* the 16k alignment restriction applies */
- nDescriptor |= (DESCRIPTOR_V13_12_GET(
- (u32)pCoarsePageTableDescriptors) <<
- L1_COARSE_DESCRIPTOR_V13_12_SHIFT);
- }
-
- return nDescriptor;
-}
-
-
-#define dprintk_desc(...)
-/*
- * Returns the L2 descriptor for the specified user page.
- */
-u32 SCXLNXCommGetL2DescriptorCommon(u32 nVirtAddr, struct mm_struct *mm)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ptep;
- u32 *hwpte;
- u32 tex = 0;
- u32 nDescriptor = 0;
-
- dprintk_desc(KERN_INFO "VirtAddr = %x\n", nVirtAddr);
- pgd = pgd_offset(mm, nVirtAddr);
- dprintk_desc(KERN_INFO "pgd = %x, value=%x\n", (unsigned int) pgd,
- (unsigned int) *pgd);
- if (pgd_none(*pgd))
- goto error;
- pud = pud_offset(pgd, nVirtAddr);
- dprintk_desc(KERN_INFO "pud = %x, value=%x\n", (unsigned int) pud,
- (unsigned int) *pud);
- if (pud_none(*pud))
- goto error;
- pmd = pmd_offset(pud, nVirtAddr);
- dprintk_desc(KERN_INFO "pmd = %x, value=%x\n", (unsigned int) pmd,
- (unsigned int) *pmd);
- if (pmd_none(*pmd))
- goto error;
-
- if (PMD_TYPE_SECT&(*pmd)) {
- /* We have a section */
- dprintk_desc(KERN_INFO "Section descr=%x\n",
- (unsigned int)*pmd);
- if ((*pmd) & PMD_SECT_BUFFERABLE)
- nDescriptor |= DESCRIPTOR_B_MASK;
- if ((*pmd) & PMD_SECT_CACHEABLE)
- nDescriptor |= DESCRIPTOR_C_MASK;
- if ((*pmd) & PMD_SECT_S)
- nDescriptor |= DESCRIPTOR_S_MASK;
- tex = ((*pmd) >> 12) & 7;
- } else {
- /* We have a table */
- ptep = pte_offset_map(pmd, nVirtAddr);
- if (pte_present(*ptep)) {
- dprintk_desc(KERN_INFO "L2 descr=%x\n",
- (unsigned int) *ptep);
- if ((*ptep) & L_PTE_MT_BUFFERABLE)
- nDescriptor |= DESCRIPTOR_B_MASK;
- if ((*ptep) & L_PTE_MT_WRITETHROUGH)
- nDescriptor |= DESCRIPTOR_C_MASK;
- if ((*ptep) & L_PTE_MT_DEV_SHARED)
- nDescriptor |= DESCRIPTOR_S_MASK;
-
- /*
- * Linux's pte doesn't keep track of TEX value.
- * Have to jump to hwpte see include/asm/pgtable.h
- */
- hwpte = (u32 *) (((u32) ptep) - 0x800);
- if (((*hwpte) & L2_DESCRIPTOR_ADDR_MASK) !=
- ((*ptep) & L2_DESCRIPTOR_ADDR_MASK))
- goto error;
- dprintk_desc(KERN_INFO "hw descr=%x\n", *hwpte);
- tex = ((*hwpte) >> 6) & 7;
- pte_unmap(ptep);
- } else {
- pte_unmap(ptep);
- goto error;
- }
- }
-
- nDescriptor |= (tex << 6);
-
- return nDescriptor;
-
-error:
- dprintk(KERN_ERR "Error occured in %s\n", __func__);
- return 0;
-}
-
-
-/*
- * Changes an L2 page descriptor back to a pointer to a physical page
- */
-inline struct page *SCXLNXCommL2PageDescriptorToPage(u32 nL2PageDescriptor)
-{
- return pte_page(nL2PageDescriptor & L2_DESCRIPTOR_ADDR_MASK);
-}
-
-
-/*
- * Returns the L1 descriptor for the 1KB-aligned coarse page table. The address
- * must be in the kernel address space.
- */
-void SCXLNXCommGetL2PageDescriptor(
- u32 *pL2PageDescriptor,
- u32 nFlags, struct mm_struct *mm)
-{
- unsigned long nPageVirtAddr;
- u32 nDescriptor;
- struct page *pPage;
- bool bUnmapPage = false;
-
- dprintk(KERN_INFO
- "SCXLNXCommGetL2PageDescriptor():"
- "*pL2PageDescriptor=%x\n",
- *pL2PageDescriptor);
-
- if (*pL2PageDescriptor == L2_DESCRIPTOR_FAULT)
- return;
-
- pPage = (struct page *) (*pL2PageDescriptor);
-
- nPageVirtAddr = (unsigned long) page_address(pPage);
- if (nPageVirtAddr == 0) {
- dprintk(KERN_INFO "page_address returned 0\n");
- /* Should we use kmap_atomic(pPage, KM_USER0) instead ? */
- nPageVirtAddr = (unsigned long) kmap(pPage);
- if (nPageVirtAddr == 0) {
- *pL2PageDescriptor = L2_DESCRIPTOR_FAULT;
- dprintk(KERN_ERR "kmap returned 0\n");
- return;
- }
- bUnmapPage = true;
- }
-
- nDescriptor = SCXLNXCommGetL2DescriptorCommon(nPageVirtAddr, mm);
- if (nDescriptor == 0) {
- *pL2PageDescriptor = L2_DESCRIPTOR_FAULT;
- return;
- }
- nDescriptor |= L2_PAGE_DESCRIPTOR_BASE;
-
- nDescriptor |= (page_to_phys(pPage) & L2_DESCRIPTOR_ADDR_MASK);
-
- if (!(nFlags & SCX_SHMEM_TYPE_WRITE))
- /* only read access */
- nDescriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ;
- else
- /* read and write access */
- nDescriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE;
-
- if (bUnmapPage)
- kunmap(pPage);
-
- *pL2PageDescriptor = nDescriptor;
-}
-
-
-/*
- * Unlocks the physical memory pages
- * and frees the coarse pages that need to
- */
-void SCXLNXCommReleaseSharedMemory(
- struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext,
- struct SCXLNX_SHMEM_DESC *pShmemDesc,
- u32 nFullCleanup)
-{
- u32 nCoarsePageIndex;
-
- dprintk(KERN_INFO "SCXLNXCommReleaseSharedMemory(%p)\n",
- pShmemDesc);
-
-#ifdef DEBUG_COARSE_TABLES
- printk(KERN_DEBUG "SCXLNXCommReleaseSharedMemory "
- "- numberOfCoarsePages=%d\n",
- pShmemDesc->nNumberOfCoarsePageTables);
-
- for (nCoarsePageIndex = 0;
- nCoarsePageIndex < pShmemDesc->nNumberOfCoarsePageTables;
- nCoarsePageIndex++) {
- u32 nIndex;
-
- printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n",
- pShmemDesc->pCoarsePageTable[nCoarsePageIndex],
- pShmemDesc->pCoarsePageTable[nCoarsePageIndex]->
- pDescriptors,
- nCoarsePageIndex);
- if (pShmemDesc->pCoarsePageTable[nCoarsePageIndex] != NULL) {
- for (nIndex = 0;
- nIndex < SCX_DESCRIPTOR_TABLE_CAPACITY;
- nIndex += 8) {
- int i;
- printk(KERN_DEBUG " ");
- for (i = nIndex; i < nIndex + 8; i++)
- printk(KERN_DEBUG "%p ",
- pShmemDesc->pCoarsePageTable[
- nCoarsePageIndex]->
- pDescriptors);
- printk(KERN_DEBUG "\n");
- }
- }
- }
- printk(KERN_DEBUG "SCXLNXCommReleaseSharedMemory() - done\n\n");
-#endif
-
- /* Parse the coarse page descriptors */
- for (nCoarsePageIndex = 0;
- nCoarsePageIndex < pShmemDesc->nNumberOfCoarsePageTables;
- nCoarsePageIndex++) {
- u32 nPageIndex;
- u32 nFoundStart = 0;
-
- /* parse the page descriptors of the coarse page */
- for (nPageIndex = 0;
- nPageIndex < SCX_DESCRIPTOR_TABLE_CAPACITY;
- nPageIndex++) {
- u32 nL2PageDescriptor = (u32) (pShmemDesc->
- pCoarsePageTable[nCoarsePageIndex]->
- pDescriptors[nPageIndex]);
-
- if (nL2PageDescriptor != L2_DESCRIPTOR_FAULT) {
- struct page *page =
- SCXLNXCommL2PageDescriptorToPage(
- nL2PageDescriptor);
-
- if (!PageReserved(page))
- SetPageDirty(page);
- internal_page_cache_release(page);
-
- nFoundStart = 1;
- } else if (nFoundStart == 1) {
- break;
- }
- }
-
- /*
- * Only free the coarse pages of descriptors not preallocated
- */
- if ((pShmemDesc->nType == SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM) ||
- (nFullCleanup != 0))
- SCXLNXFreeCoarsePageTable(pAllocationContext,
- pShmemDesc->pCoarsePageTable[nCoarsePageIndex],
- 0);
- }
-
- pShmemDesc->nNumberOfCoarsePageTables = 0;
- dprintk(KERN_INFO "SCXLNXCommReleaseSharedMemory(%p) done\n",
- pShmemDesc);
-}
-
-/*
- * Make sure the coarse pages are allocated. If not allocated, do it Locks down
- * the physical memory pages
- * Verifies the memory attributes depending on nFlags
- */
-int SCXLNXCommFillDescriptorTable(
- struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext,
- struct SCXLNX_SHMEM_DESC *pShmemDesc,
- u32 nBufferVAddr,
- struct vm_area_struct **ppVmas,
- u32 pDescriptors[SCX_MAX_COARSE_PAGES],
- u32 *pBufferSize,
- u32 *pBufferStartOffset,
- bool bInUserSpace,
- u32 nFlags,
- u32 *pnDescriptorCount)
-{
- u32 nCoarsePageIndex;
- u32 nNumberOfCoarsePages;
- u32 nPageCount;
- u32 nPageShift = 0;
- u32 nIndex;
- u32 nBufferSize = *pBufferSize;
- int nError;
- unsigned int info = read_cpuid(CPUID_CACHETYPE);
-
- dprintk(KERN_INFO "SCXLNXCommFillDescriptorTable"
- "(%p, nBufferVAddr=0x%08X, size=0x%08X, user=%01x "
- "flags = 0x%08x)\n",
- pShmemDesc,
- nBufferVAddr,
- nBufferSize,
- bInUserSpace,
- nFlags);
-
- /*
- * Compute the number of pages
- * Compute the number of coarse pages
- * Compute the page offset
- */
- nPageCount = ((nBufferVAddr & ~PAGE_MASK) +
- nBufferSize + ~PAGE_MASK) >> PAGE_SHIFT;
-
- /* check whether the 16k alignment restriction applies */
- if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11)))
- /*
- * The 16k alignment restriction applies.
- * Shift data to get them 16k aligned
- */
- nPageShift = DESCRIPTOR_V13_12_GET(nBufferVAddr);
- nPageCount += nPageShift;
-
-
- /*
- * Check the number of pages fit in the coarse pages
- */
- if (nPageCount > (SCX_DESCRIPTOR_TABLE_CAPACITY *
- SCX_MAX_COARSE_PAGES)) {
- dprintk(KERN_ERR "SCXLNXCommFillDescriptorTable(%p): "
- "%u pages required to map shared memory!\n",
- pShmemDesc, nPageCount);
- nError = -ENOMEM;
- goto error;
- }
-
- /* coarse page describe 256 pages */
- nNumberOfCoarsePages = ((nPageCount +
- SCX_DESCRIPTOR_TABLE_CAPACITY_MASK) >>
- SCX_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT);
-
- /*
- * Compute the buffer offset
- */
- *pBufferStartOffset = (nBufferVAddr & ~PAGE_MASK) |
- (nPageShift << PAGE_SHIFT);
-
- /* map each coarse page */
- for (nCoarsePageIndex = 0;
- nCoarsePageIndex < nNumberOfCoarsePages;
- nCoarsePageIndex++) {
- struct SCXLNX_COARSE_PAGE_TABLE *pCoarsePageTable;
-
- /* compute a virtual address with appropriate offset */
- u32 nBufferOffsetVAddr = nBufferVAddr +
- (nCoarsePageIndex * SCX_MAX_COARSE_PAGE_MAPPED_SIZE);
- u32 nPagesToGet;
-
- /*
- * Compute the number of pages left for this coarse page.
- * Decrement nPageCount each time
- */
- nPagesToGet = (nPageCount >>
- SCX_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT) ?
- SCX_DESCRIPTOR_TABLE_CAPACITY : nPageCount;
- nPageCount -= nPagesToGet;
-
- /*
- * Check if the coarse page has already been allocated
- * If not, do it now
- */
- if ((pShmemDesc->nType == SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM)
- || (pShmemDesc->nType ==
- SCXLNX_SHMEM_TYPE_PM_HIBERNATE)) {
- pCoarsePageTable = SCXLNXAllocateCoarsePageTable(
- pAllocationContext,
- SCXLNX_PAGE_DESCRIPTOR_TYPE_NORMAL);
-
- if (pCoarsePageTable == NULL) {
- dprintk(KERN_ERR
- "SCXLNXCommFillDescriptorTable(%p):"
- " SCXLNXConnAllocateCoarsePageTable "
- "failed for coarse page %d\n",
- pShmemDesc, nCoarsePageIndex);
- nError = -ENOMEM;
- goto error;
- }
-
- pShmemDesc->pCoarsePageTable[nCoarsePageIndex] =
- pCoarsePageTable;
- } else {
- pCoarsePageTable =
- pShmemDesc->pCoarsePageTable[nCoarsePageIndex];
- }
-
- /*
- * The page is not necessarily filled with zeroes.
- * Set the fault descriptors ( each descriptor is 4 bytes long)
- */
- memset(pCoarsePageTable->pDescriptors, 0x00,
- SCX_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
-
- if (bInUserSpace) {
- int nPages;
-
- /*
- * TRICK: use pCoarsePageDescriptor->pDescriptors to
- * hold the (struct page*) items before getting their
- * physical address
- */
- down_read(&(current->mm->mmap_sem));
- nPages = internal_get_user_pages(
- current,
- current->mm,
- nBufferOffsetVAddr,
- /*
- * nPageShift is cleared after retrieving first
- * coarse page
- */
- (nPagesToGet - nPageShift),
- (nFlags & SCX_SHMEM_TYPE_WRITE) ? 1 : 0,
- 0,
- (struct page **) (pCoarsePageTable->pDescriptors
- + nPageShift),
- ppVmas);
- up_read(&(current->mm->mmap_sem));
-
- if ((nPages <= 0) ||
- (nPages != (nPagesToGet - nPageShift))) {
- dprintk(KERN_ERR"SCXLNXCommFillDescriptorTable:"
- " get_user_pages got %d pages while "
- "trying to get %d pages!\n",
- nPages, nPagesToGet - nPageShift);
- nError = -EFAULT;
- goto error;
- }
-
- for (nIndex = nPageShift;
- nIndex < nPageShift + nPages;
- nIndex++) {
- /* Get the actual L2 descriptors */
- SCXLNXCommGetL2PageDescriptor(
- &pCoarsePageTable->pDescriptors[nIndex],
- nFlags,
- current->mm);
- /*
- * Reject Strongly-Ordered or Device Memory
- */
-#define IS_STRONGLY_ORDERED_OR_DEVICE_MEM(x) \
- ((((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_STRONGLY_ORDERED) || \
- (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_SHARED_DEVICE) || \
- (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_NON_SHARED_DEVICE))
-
- if (IS_STRONGLY_ORDERED_OR_DEVICE_MEM(
- pCoarsePageTable->
- pDescriptors[nIndex])) {
- dprintk(KERN_ERR
- "SCXLNXCommFillDescriptorTable:"
- " descriptor 0x%08X use "
- "strongly-ordered or device "
- "memory. Rejecting!\n",
- pCoarsePageTable->
- pDescriptors[nIndex]);
- nError = -EFAULT;
- goto error;
- }
- }
- } else {
- /* Kernel-space memory */
- for (nIndex = nPageShift;
- nIndex < nPagesToGet;
- nIndex++) {
- unsigned long addr =
- (unsigned long) (nBufferOffsetVAddr +
- ((nIndex - nPageShift) *
- PAGE_SIZE));
- pCoarsePageTable->pDescriptors[nIndex] =
- (u32) vmalloc_to_page((void *)addr);
- get_page((struct page *) pCoarsePageTable->
- pDescriptors[nIndex]);
-
- /* change coarse page "page address" */
- SCXLNXCommGetL2PageDescriptor(
- &pCoarsePageTable->pDescriptors[nIndex],
- nFlags,
- &init_mm);
- }
- }
-
- dmac_flush_range((void *)pCoarsePageTable->pDescriptors,
- (void *)(((u32)(pCoarsePageTable->pDescriptors)) +
- SCX_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32)));
-
- outer_clean_range(
- __pa(pCoarsePageTable->pDescriptors),
- __pa(pCoarsePageTable->pDescriptors) +
- SCX_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
- wmb();
-
- /* Update the coarse page table address */
- pDescriptors[nCoarsePageIndex] =
- SCXLNXCommGetL1CoarseDescriptor(
- pCoarsePageTable->pDescriptors);
-
- /*
- * The next coarse page has no page shift, reset the
- * nPageShift
- */
- nPageShift = 0;
- }
-
- *pnDescriptorCount = nNumberOfCoarsePages;
- pShmemDesc->nNumberOfCoarsePageTables = nNumberOfCoarsePages;
-
-#ifdef DEBUG_COARSE_TABLES
- printk(KERN_DEBUG "nSCXLNXCommFillDescriptorTable - size=0x%08X "
- "numberOfCoarsePages=%d\n", *pBufferSize,
- pShmemDesc->nNumberOfCoarsePageTables);
- for (nCoarsePageIndex = 0;
- nCoarsePageIndex < pShmemDesc->nNumberOfCoarsePageTables;
- nCoarsePageIndex++) {
- u32 nIndex;
- struct SCXLNX_COARSE_PAGE_TABLE *pCorsePageTable =
- pShmemDesc->pCoarsePageTable[nCoarsePageIndex];
-
- printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n",
- pCorsePageTable,
- pCorsePageTable->pDescriptors,
- nCoarsePageIndex);
- for (nIndex = 0;
- nIndex < SCX_DESCRIPTOR_TABLE_CAPACITY;
- nIndex += 8) {
- int i;
- printk(KERN_DEBUG " ");
- for (i = nIndex; i < nIndex + 8; i++)
- printk(KERN_DEBUG "0x%08X ",
- pCorsePageTable->pDescriptors[i]);
- printk(KERN_DEBUG "\n");
- }
- }
- printk(KERN_DEBUG "nSCXLNXCommFillDescriptorTable() - done\n\n");
-#endif
-
- return 0;
-
-error:
- SCXLNXCommReleaseSharedMemory(
- pAllocationContext,
- pShmemDesc,
- 0);
-
- return nError;
-}
-
-
-/*----------------------------------------------------------------------------
- * Standard communication operations
- *----------------------------------------------------------------------------*/
-
-u8 *SCXLNXCommGetDescription(struct SCXLNX_COMM *pComm)
-{
- if (test_bit(SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED, &(pComm->nFlags)))
- return pComm->pBuffer->sVersionDescription;
-
- return NULL;
-}
-
-/*
- * Returns a non-zero value if the specified S-timeout has expired, zero
- * otherwise.
- *
- * The placeholder referenced to by pnRelativeTimeoutJiffies gives the relative
- * timeout from now in jiffies. It is set to zero if the S-timeout has expired,
- * or to MAX_SCHEDULE_TIMEOUT if the S-timeout is infinite.
- */
-static int SCXLNXCommTestSTimeout(
- u64 sTimeout,
- signed long *pnRelativeTimeoutJiffies)
-{
- struct timeval now;
- u64 sTime64;
-
- *pnRelativeTimeoutJiffies = 0;
-
- /* immediate timeout */
- if (sTimeout == TIME_IMMEDIATE)
- return 1;
-
- /* infinite timeout */
- if (sTimeout == TIME_INFINITE) {
- dprintk(KERN_DEBUG "SCXLNXCommTestSTimeout: "
- "timeout is infinite\n");
- *pnRelativeTimeoutJiffies = MAX_SCHEDULE_TIMEOUT;
- return 0;
- }
-
- do_gettimeofday(&now);
- sTime64 = now.tv_sec;
- /* will not overflow as operations are done on 64bit values */
- sTime64 = (sTime64 * 1000) + (now.tv_usec / 1000);
-
- /* timeout expired */
- if (sTime64 >= sTimeout) {
- dprintk(KERN_DEBUG "SCXLNXCommTestSTimeout: timeout expired\n");
- return 1;
- }
-
- /*
- * finite timeout, compute pnRelativeTimeoutJiffies
- */
- /* will not overflow as sTime64 < sTimeout */
- sTimeout -= sTime64;
-
- /* guarantee *pnRelativeTimeoutJiffies is a valid timeout */
- if ((sTimeout >> 32) != 0)
- *pnRelativeTimeoutJiffies = MAX_JIFFY_OFFSET;
- else
- *pnRelativeTimeoutJiffies =
- msecs_to_jiffies((unsigned int) sTimeout);
-
- dprintk(KERN_DEBUG "SCXLNXCommTestSTimeout: timeout is 0x%lx\n",
- *pnRelativeTimeoutJiffies);
- return 0;
-}
-
-static void tf_copy_answers(struct SCXLNX_COMM *pComm)
-{
- u32 nFirstAnswer;
- u32 nFirstFreeAnswer;
- struct SCXLNX_ANSWER_STRUCT *pAnswerStructureTemp;
-
- if (test_bit(SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED, &(pComm->nFlags))) {
- spin_lock(&pComm->lock);
- nFirstFreeAnswer = SCXLNXCommReadReg32(
- &pComm->pBuffer->nFirstFreeAnswer);
- nFirstAnswer = SCXLNXCommReadReg32(
- &pComm->pBuffer->nFirstAnswer);
-
- while (nFirstAnswer != nFirstFreeAnswer) {
- /* answer queue not empty */
- union SCX_ANSWER_MESSAGE sComAnswer;
- struct SCX_ANSWER_HEADER sHeader;
-
- /*
- * the size of the command in words of 32bit, not in
- * bytes
- */
- u32 nCommandSize;
- u32 i;
- u32 *pTemp = (uint32_t *) &sHeader;
-
- dprintk(KERN_INFO
- "[pid=%d] tf_copy_answers(%p): "
- "Read answers from L1\n",
- current->pid, pComm);
-
- /* Read the answer header */
- for (i = 0;
- i < sizeof(struct SCX_ANSWER_HEADER)/sizeof(u32);
- i++)
- pTemp[i] = pComm->pBuffer->sAnswerQueue[
- (nFirstAnswer + i) %
- SCX_S_ANSWER_QUEUE_CAPACITY];
-
- /* Read the answer from the L1_Buffer*/
- nCommandSize = sHeader.nMessageSize +
- sizeof(struct SCX_ANSWER_HEADER)/sizeof(u32);
- pTemp = (uint32_t *) &sComAnswer;
- for (i = 0; i < nCommandSize; i++)
- pTemp[i] = pComm->pBuffer->sAnswerQueue[
- (nFirstAnswer + i) %
- SCX_S_ANSWER_QUEUE_CAPACITY];
-
- pAnswerStructureTemp = (struct SCXLNX_ANSWER_STRUCT *)
- sComAnswer.sHeader.nOperationID;
-
- SCXLNXDumpAnswer(&sComAnswer);
-
- memcpy(pAnswerStructureTemp->pAnswer, &sComAnswer,
- nCommandSize * sizeof(u32));
- pAnswerStructureTemp->bAnswerCopied = true;
-
- nFirstAnswer += nCommandSize;
- SCXLNXCommWriteReg32(&pComm->pBuffer->nFirstAnswer,
- nFirstAnswer);
- }
- spin_unlock(&(pComm->lock));
- }
-}
-
-static void tf_copy_command(
- struct SCXLNX_COMM *pComm,
- union SCX_COMMAND_MESSAGE *pMessage,
- struct SCXLNX_CONNECTION *pConn,
- enum SCXLNX_COMMAND_STATE *command_status)
-{
- if ((test_bit(SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED, &(pComm->nFlags)))
- && (pMessage != NULL)) {
- /*
- * Write the message in the message queue.
- */
-
- if (*command_status == SCXLNX_COMMAND_STATE_PENDING) {
- u32 nCommandSize;
- u32 nQueueWordsCount;
- u32 i;
- u32 nFirstFreeCommand;
- u32 nFirstCommand;
-
- spin_lock(&pComm->lock);
-
- nFirstCommand = SCXLNXCommReadReg32(
- &pComm->pBuffer->nFirstCommand);
- nFirstFreeCommand = SCXLNXCommReadReg32(
- &pComm->pBuffer->nFirstFreeCommand);
-
- nQueueWordsCount = nFirstFreeCommand - nFirstCommand;
- nCommandSize = pMessage->sHeader.nMessageSize +
- sizeof(struct SCX_COMMAND_HEADER)/sizeof(u32);
- if ((nQueueWordsCount + nCommandSize) <
- SCX_N_MESSAGE_QUEUE_CAPACITY) {
- /*
- * Command queue is not full.
- * If the Command queue is full,
- * the command will be copied at
- * another iteration
- * of the current function.
- */
-
- /*
- * Change the conn state
- */
- if (pConn == NULL)
- goto copy;
-
- spin_lock(&(pConn->stateLock));
-
- if ((pConn->nState ==
- SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT)
- &&
- (pMessage->sHeader.nMessageType ==
- SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
-
- dprintk(KERN_INFO
- "tf_copy_command(%p):"
- "Conn state is DEVICE_CONTEXT_SENT\n",
- pConn);
- pConn->nState =
- SCXLNX_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT;
- } else if ((pConn->nState !=
- SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT)
- &&
- (pMessage->sHeader.nMessageType !=
- SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
- /* The connection
- * is no longer valid.
- * We may not send any command on it,
- * not even another
- * DESTROY_DEVICE_CONTEXT.
- */
- dprintk(KERN_INFO
- "[pid=%d] tf_copy_command(%p): "
- "Connection no longer valid."
- "ABORT\n",
- current->pid, pConn);
- *command_status =
- SCXLNX_COMMAND_STATE_ABORTED;
- spin_unlock(
- &(pConn->stateLock));
- spin_unlock(
- &pComm->lock);
- return;
- } else if (
- (pMessage->sHeader.nMessageType ==
- SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) &&
- (pConn->nState ==
- SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT)
- ) {
- dprintk(KERN_INFO
- "[pid=%d] tf_copy_command(%p): "
- "Conn state is "
- "DESTROY_DEVICE_CONTEXT_SENT\n",
- current->pid, pConn);
- pConn->nState =
- SCXLNX_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT;
- }
- spin_unlock(&(pConn->stateLock));
-copy:
- /*
- * Copy the command to L1 Buffer
- */
- dprintk(KERN_INFO
- "[pid=%d] tf_copy_command(%p): "
- "Write Message in the queue\n",
- current->pid, pMessage);
- SCXLNXDumpMessage(pMessage);
-
- for (i = 0; i < nCommandSize; i++)
- pComm->pBuffer->sCommandQueue[
- (nFirstFreeCommand + i) %
- SCX_N_MESSAGE_QUEUE_CAPACITY] =
- ((uint32_t *) pMessage)[i];
-
- *command_status =
- SCXLNX_COMMAND_STATE_SENT;
- nFirstFreeCommand += nCommandSize;
-
- SCXLNXCommWriteReg32(
- &pComm->
- pBuffer->nFirstFreeCommand,
- nFirstFreeCommand);
- }
- spin_unlock(&pComm->lock);
- }
- }
-}
-
-/*
- * Sends the specified message through the specified communication channel.
- *
- * This function sends the command and waits for the answer
- *
- * Returns zero upon successful completion, or an appropriate error code upon
- * failure.
- */
-static int tf_send_recv(struct SCXLNX_COMM *pComm,
- union SCX_COMMAND_MESSAGE *pMessage,
- struct SCXLNX_ANSWER_STRUCT *pAnswerStruct,
- struct SCXLNX_CONNECTION *pConn,
- int bKillable
- #ifdef CONFIG_TF_ZEBRA
- , bool *secure_is_idle
- #endif
- )
-{
- int result;
- u64 sTimeout;
- signed long nRelativeTimeoutJiffies;
- bool wait_prepared = false;
- enum SCXLNX_COMMAND_STATE command_status = SCXLNX_COMMAND_STATE_PENDING;
- DEFINE_WAIT(wait);
-#ifdef CONFIG_FREEZER
- unsigned long saved_flags;
-#endif
- dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p)\n",
- current->pid, pMessage);
-
-#ifdef CONFIG_FREEZER
- saved_flags = current->flags;
- current->flags |= PF_FREEZER_NOSIG;
-#endif
-
- /*
- * Read all answers from the answer queue
- */
-copy_answers:
- tf_copy_answers(pComm);
-
- tf_copy_command(pComm, pMessage, pConn, &command_status);
-
- /*
- * Notify all waiting threads
- */
- wake_up(&(pComm->waitQueue));
-
-#ifdef CONFIG_FREEZER
- if (unlikely(freezing(current))) {
-
-#ifdef CONFIG_TF_ZEBRA
- if (!(*secure_is_idle)) {
- if (tf_schedule_secure_world(pComm, true) ==
- STATUS_PENDING)
- goto copy_answers;
-
- tf_l4sec_clkdm_allow_idle(true, true);
- *secure_is_idle = true;
- }
-#endif
-
- dprintk(KERN_INFO
- "Entering refrigerator.\n");
- refrigerator();
- dprintk(KERN_INFO
- "Left refrigerator.\n");
- goto copy_answers;
- }
-#endif
-
-#ifndef CONFIG_PREEMPT
- if (need_resched())
- schedule();
-#endif
-
-#ifdef CONFIG_TF_ZEBRA
- /*
- * Handle RPC (if any)
- */
- if (SCXLNXCommExecuteRPCCommand(pComm) == RPC_NON_YIELD)
- goto schedule_secure_world;
-#endif
-
- /*
- * Join wait queue
- */
- /*dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p): Prepare to wait\n",
- current->pid, pMessage);*/
- prepare_to_wait(&pComm->waitQueue, &wait,
- bKillable ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
- wait_prepared = true;
-
- /*
- * Check if our answer is available
- */
- if (command_status == SCXLNX_COMMAND_STATE_ABORTED) {
- /* Not waiting for an answer, return error code */
- result = -EINTR;
- dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
- "Command status is ABORTED."
- "Exit with 0x%x\n",
- current->pid, result);
- goto exit;
- }
- if (pAnswerStruct->bAnswerCopied) {
- dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
- "Received answer (type 0x%02X)\n",
- current->pid,
- pAnswerStruct->pAnswer->sHeader.nMessageType);
- result = 0;
- goto exit;
- }
-
- /*
- * Check if a signal is pending
- */
- if (bKillable && (sigkill_pending())) {
- if (command_status == SCXLNX_COMMAND_STATE_PENDING)
- /*Command was not sent. */
- result = -EINTR;
- else
- /* Command was sent but no answer was received yet. */
- result = -EIO;
-
- dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
- "Signal Pending. Return error %d\n",
- current->pid, result);
- goto exit;
- }
-
- /*
- * Check if secure world is schedulable. It is schedulable if at
- * least one of the following conditions holds:
- * + it is still initializing (SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED
- * is not set);
- * + there is a command in the queue;
- * + the secure world timeout is zero.
- */
- if (test_bit(SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED, &(pComm->nFlags))) {
- u32 nFirstFreeCommand;
- u32 nFirstCommand;
- spin_lock(&pComm->lock);
- nFirstCommand = SCXLNXCommReadReg32(
- &pComm->pBuffer->nFirstCommand);
- nFirstFreeCommand = SCXLNXCommReadReg32(
- &pComm->pBuffer->nFirstFreeCommand);
- spin_unlock(&pComm->lock);
- SCXLNXCommReadTimeout(pComm, &sTimeout);
- if ((nFirstFreeCommand == nFirstCommand) &&
- (SCXLNXCommTestSTimeout(sTimeout,
- &nRelativeTimeoutJiffies) == 0))
- /*
- * If command queue is empty and if timeout has not
- * expired secure world is not schedulable
- */
- goto wait;
- }
-
- finish_wait(&pComm->waitQueue, &wait);
- wait_prepared = false;
-
- /*
- * Yield to the Secure World
- */
-#ifdef CONFIG_TF_ZEBRA
-schedule_secure_world:
- if (*secure_is_idle) {
- tf_l4sec_clkdm_wakeup(true, true);
- *secure_is_idle = false;
- }
-#endif
-
- result = tf_schedule_secure_world(pComm, false);
- if (result < 0)
- goto exit;
- goto copy_answers;
-
-wait:
- if (bKillable && (sigkill_pending())) {
- if (command_status == SCXLNX_COMMAND_STATE_PENDING)
- result = -EINTR; /* Command was not sent. */
- else
- /* Command was sent but no answer was received yet. */
- result = -EIO;
-
- dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
- "Signal Pending while waiting. Return error %d\n",
- current->pid, result);
- goto exit;
- }
-
- if (nRelativeTimeoutJiffies == MAX_SCHEDULE_TIMEOUT)
- dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
- "prepare to sleep infinitely\n", current->pid);
- else
- dprintk(KERN_INFO "tf_send_recv: "
- "prepare to sleep 0x%lx jiffies\n",
- nRelativeTimeoutJiffies);
-
-#ifdef CONFIG_TF_ZEBRA
- if (!(*secure_is_idle)) {
- if (tf_schedule_secure_world(pComm, true) == STATUS_PENDING) {
- finish_wait(&pComm->waitQueue, &wait);
- wait_prepared = false;
- goto copy_answers;
- }
- tf_l4sec_clkdm_allow_idle(true, true);
- *secure_is_idle = true;
- }
-#endif
-
- /* go to sleep */
- if (schedule_timeout(nRelativeTimeoutJiffies) == 0)
- dprintk(KERN_INFO
- "tf_send_recv: timeout expired\n");
- else
- dprintk(KERN_INFO
- "tf_send_recv: signal delivered\n");
-
- finish_wait(&pComm->waitQueue, &wait);
- wait_prepared = false;
- goto copy_answers;
-
-exit:
- if (wait_prepared) {
- finish_wait(&pComm->waitQueue, &wait);
- wait_prepared = false;
- }
-
-#ifdef CONFIG_TF_ZEBRA
- if ((!(*secure_is_idle)) && (result != -EIO)) {
- if (tf_schedule_secure_world(pComm, true) == STATUS_PENDING)
- goto copy_answers;
-
- tf_l4sec_clkdm_allow_idle(true, true);
- *secure_is_idle = true;
- }
-#endif
-
-#ifdef CONFIG_FREEZER
- current->flags &= ~(PF_FREEZER_NOSIG);
- current->flags |= (saved_flags & PF_FREEZER_NOSIG);
-#endif
-
- return result;
-}
-
-/*
- * Sends the specified message through the specified communication channel.
- *
- * This function sends the message and waits for the corresponding answer
- * It may return if a signal needs to be delivered.
- *
- * If pConn is not NULL, before sending the message, this function checks that
- * it is still valid by calling the function SCXLNXConnCheckMessageValidity
- *
- * Returns zero upon successful completion, or an appropriate error code upon
- * failure.
- */
-int SCXLNXCommSendReceive(struct SCXLNX_COMM *pComm,
- union SCX_COMMAND_MESSAGE *pMessage,
- union SCX_ANSWER_MESSAGE *pAnswer,
- struct SCXLNX_CONNECTION *pConn,
- bool bKillable)
-{
- int nError;
- struct SCXLNX_ANSWER_STRUCT sAnswerStructure;
-#ifdef CONFIG_SMP
- long ret_affinity;
- cpumask_t saved_cpu_mask;
- cpumask_t local_cpu_mask = CPU_MASK_NONE;
-#endif
-#ifdef CONFIG_TF_ZEBRA
- bool secure_is_idle = true;
-#endif
-
- sAnswerStructure.pAnswer = pAnswer;
- sAnswerStructure.bAnswerCopied = false;
-
- if (pMessage != NULL)
- pMessage->sHeader.nOperationID = (u32) &sAnswerStructure;
-
- dprintk(KERN_INFO "SCXLNXSMCommSendReceive: "
- "tf_send_recv\n");
-
-#ifdef CONFIG_TF_ZEBRA
- if (!test_bit(SCXLNX_COMM_FLAG_PA_AVAILABLE, &pComm->nFlags)) {
- dprintk(KERN_ERR "SCXLNXCommSendReceive(%p): "
- "Secure world not started\n", pComm);
-
- return -EFAULT;
- }
-#endif
-
- if (test_bit(SCXLNX_COMM_FLAG_TERMINATING, &(pComm->nFlags)) != 0) {
- dprintk(KERN_DEBUG "SCXLNXSMCommSendReceive: "
- "Flag Terminating is set\n");
- return 0;
- }
-
-#ifdef CONFIG_SMP
- cpu_set(0, local_cpu_mask);
- sched_getaffinity(0, &saved_cpu_mask);
- ret_affinity = sched_setaffinity(0, &local_cpu_mask);
- if (ret_affinity != 0)
- dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity);
-#endif
-
-
- /*
- * Send the command
- */
- nError = tf_send_recv(pComm,
- pMessage, &sAnswerStructure, pConn, bKillable
- #ifdef CONFIG_TF_ZEBRA
- , &secure_is_idle
- #endif
- );
-
- if (!bKillable && sigkill_pending()) {
- if ((pMessage->sHeader.nMessageType ==
- SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) &&
- (pAnswer->sCreateDeviceContextAnswer.nErrorCode ==
- S_SUCCESS)) {
-
- /*
- * CREATE_DEVICE_CONTEXT was interrupted.
- */
- dprintk(KERN_INFO "SCXLNXSMCommSendReceive: "
- "sending DESTROY_DEVICE_CONTEXT\n");
- sAnswerStructure.pAnswer = pAnswer;
- sAnswerStructure.bAnswerCopied = false;
-
- pMessage->sHeader.nMessageType =
- SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
- pMessage->sHeader.nMessageSize =
- (sizeof(struct
- SCX_COMMAND_DESTROY_DEVICE_CONTEXT) -
- sizeof(struct SCX_COMMAND_HEADER))/sizeof(u32);
- pMessage->sHeader.nOperationID =
- (u32) &sAnswerStructure;
- pMessage->sDestroyDeviceContextMessage.hDeviceContext =
- pAnswer->sCreateDeviceContextAnswer.
- hDeviceContext;
-
- goto destroy_context;
- }
- }
-
- if (nError == 0) {
- /*
- * tf_send_recv returned Success.
- */
- if (pMessage->sHeader.nMessageType ==
- SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) {
- spin_lock(&(pConn->stateLock));
- pConn->nState = SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT;
- spin_unlock(&(pConn->stateLock));
- } else if (pMessage->sHeader.nMessageType ==
- SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
- spin_lock(&(pConn->stateLock));
- pConn->nState = SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT;
- spin_unlock(&(pConn->stateLock));
- }
- } else if (nError == -EINTR) {
- /*
- * No command was sent, return failure.
- */
- dprintk(KERN_ERR
- "SCXLNXSMCommSendReceive: "
- "tf_send_recv failed (error %d) !\n",
- nError);
- } else if (nError == -EIO) {
- /*
- * A command was sent but its answer is still pending.
- */
-
- /* means bKillable is true */
- dprintk(KERN_ERR
- "SCXLNXSMCommSendReceive: "
- "tf_send_recv interrupted (error %d)."
- "Send DESTROY_DEVICE_CONTEXT.\n", nError);
-
- /* Send the DESTROY_DEVICE_CONTEXT. */
- sAnswerStructure.pAnswer = pAnswer;
- sAnswerStructure.bAnswerCopied = false;
-
- pMessage->sHeader.nMessageType =
- SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
- pMessage->sHeader.nMessageSize =
- (sizeof(struct SCX_COMMAND_DESTROY_DEVICE_CONTEXT) -
- sizeof(struct SCX_COMMAND_HEADER))/sizeof(u32);
- pMessage->sHeader.nOperationID =
- (u32) &sAnswerStructure;
- pMessage->sDestroyDeviceContextMessage.hDeviceContext =
- pConn->hDeviceContext;
-
- nError = tf_send_recv(pComm,
- pMessage, &sAnswerStructure, pConn, false
- #ifdef CONFIG_TF_ZEBRA
- , &secure_is_idle
- #endif
- );
- if (nError == -EINTR) {
- /*
- * Another thread already sent
- * DESTROY_DEVICE_CONTEXT.
- * We must still wait for the answer
- * to the original command.
- */
- pMessage = NULL;
- goto destroy_context;
- } else {
- /* An answer was received.
- * Check if it is the answer
- * to the DESTROY_DEVICE_CONTEXT.
- */
- spin_lock(&pComm->lock);
- if (pAnswer->sHeader.nMessageType !=
- SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
- sAnswerStructure.bAnswerCopied = false;
- }
- spin_unlock(&pComm->lock);
- if (!sAnswerStructure.bAnswerCopied) {
- /* Answer to DESTROY_DEVICE_CONTEXT
- * was not yet received.
- * Wait for the answer.
- */
- dprintk(KERN_INFO
- "[pid=%d] SCXLNXCommSendReceive:"
- "Answer to DESTROY_DEVICE_CONTEXT"
- "not yet received.Retry\n",
- current->pid);
- pMessage = NULL;
- goto destroy_context;
- }
- }
- }
-
- dprintk(KERN_INFO "SCXLNXCommSendReceive(): Message answer ready\n");
- goto exit;
-
-destroy_context:
- nError = tf_send_recv(pComm,
- pMessage, &sAnswerStructure, pConn, false
- #ifdef CONFIG_TF_ZEBRA
- , &secure_is_idle
- #endif
- );
-
- /*
- * tf_send_recv cannot return an error because
- * it's not killable and not within a connection
- */
- BUG_ON(nError != 0);
-
- /* Reset the state, so a new CREATE DEVICE CONTEXT can be sent */
- spin_lock(&(pConn->stateLock));
- pConn->nState = SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT;
- spin_unlock(&(pConn->stateLock));
-
-exit:
-
-#ifdef CONFIG_SMP
- ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
- if (ret_affinity != 0)
- dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity);
-#endif
- return nError;
-}
-
-/*----------------------------------------------------------------------------
- * Power management
- *----------------------------------------------------------------------------*/
-
-
-/*
- * Handles all the power management calls.
- * The nOperation is the type of power management
- * operation to be performed.
- *
- * This routine will only return if a failure occured or if
- * the required opwer management is of type "resume".
- * "Hibernate" and "Shutdown" should lock when doing the
- * corresponding SMC to the Secure World
- */
-int SCXLNXCommPowerManagement(struct SCXLNX_COMM *pComm,
- enum SCXLNX_POWER_OPERATION nOperation)
-{
- u32 nStatus;
- int nError = 0;
-
- dprintk(KERN_INFO "SCXLNXCommPowerManagement(%d)\n", nOperation);
-
-#ifdef CONFIG_TF_ZEBRA
- if (!test_bit(SCXLNX_COMM_FLAG_PA_AVAILABLE, &pComm->nFlags)) {
- dprintk(KERN_INFO "SCXLNXCommPowerManagement(%p): "
- "succeeded (not started)\n", pComm);
-
- return 0;
- }
-#endif
-
- nStatus = ((SCXLNXCommReadReg32(&(pComm->pBuffer->nStatus_S))
- & SCX_STATUS_POWER_STATE_MASK)
- >> SCX_STATUS_POWER_STATE_SHIFT);
-
- switch (nOperation) {
- case SCXLNX_POWER_OPERATION_SHUTDOWN:
- switch (nStatus) {
- case SCX_POWER_MODE_ACTIVE:
- nError = SCXLNXCommShutdown(pComm);
-
- if (nError) {
- dprintk(KERN_ERR "SCXLNXCommPowerManagement(): "
- "Failed with error code 0x%08x\n",
- nError);
- goto error;
- }
- break;
-
- default:
- goto not_allowed;
- }
- break;
-
- case SCXLNX_POWER_OPERATION_HIBERNATE:
- switch (nStatus) {
- case SCX_POWER_MODE_ACTIVE:
- nError = SCXLNXCommHibernate(pComm);
-
- if (nError) {
- dprintk(KERN_ERR "SCXLNXCommPowerManagement(): "
- "Failed with error code 0x%08x\n",
- nError);
- goto error;
- }
- break;
-
- default:
- goto not_allowed;
- }
- break;
-
- case SCXLNX_POWER_OPERATION_RESUME:
- nError = SCXLNXCommResume(pComm);
-
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXCommPowerManagement(): "
- "Failed with error code 0x%08x\n",
- nError);
- goto error;
- }
- break;
- }
-
- dprintk(KERN_INFO "SCXLNXCommPowerManagement(): succeeded\n");
- return 0;
-
-not_allowed:
- dprintk(KERN_ERR "SCXLNXCommPowerManagement(): "
- "Power command not allowed in current "
- "Secure World state %d\n", nStatus);
- nError = -ENOTTY;
-error:
- return nError;
-}
-
diff --git a/security/tf_driver/scxlnx_comm_tz.c b/security/tf_driver/scxlnx_comm_tz.c
deleted file mode 100644
index b186d98548a4..000000000000
--- a/security/tf_driver/scxlnx_comm_tz.c
+++ /dev/null
@@ -1,891 +0,0 @@
-/*
- * Copyright (c) 2010 Trusted Logic S.A.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
- */
-
-#include <asm/div64.h>
-#include <asm/system.h>
-#include <linux/version.h>
-#include <asm/cputype.h>
-#include <linux/interrupt.h>
-#include <linux/page-flags.h>
-#include <linux/pagemap.h>
-#include <linux/vmalloc.h>
-#include <linux/jiffies.h>
-
-#include "scxlnx_defs.h"
-#include "scxlnx_comm.h"
-#include "scx_protocol.h"
-#include "scxlnx_util.h"
-#include "scxlnx_conn.h"
-
-/*
- * Structure common to all SMC operations
- */
-struct SCXLNX_GENERIC_SMC {
- u32 reg0;
- u32 reg1;
- u32 reg2;
- u32 reg3;
- u32 reg4;
-};
-
-/*----------------------------------------------------------------------------
- * SMC operations
- *----------------------------------------------------------------------------*/
-
-static inline void SCXLNXCommCallGenericSMC(
- struct SCXLNX_GENERIC_SMC *pGenericSMC)
-{
-#ifdef CONFIG_SMP
- long ret;
- cpumask_t saved_cpu_mask;
- cpumask_t local_cpu_mask = CPU_MASK_NONE;
-
- cpu_set(0, local_cpu_mask);
- sched_getaffinity(0, &saved_cpu_mask);
- ret = sched_setaffinity(0, &local_cpu_mask);
- if (ret != 0)
- {
- dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret);
- }
-#endif
-
- __asm__ volatile(
- "mov r0, %2\n"
- "mov r1, %3\n"
- "mov r2, %4\n"
- "mov r3, %5\n"
- "mov r4, %6\n"
- ".word 0xe1600070 @ SMC 0\n"
- "mov %0, r0\n"
- "mov %1, r1\n"
- : "=r" (pGenericSMC->reg0), "=r" (pGenericSMC->reg1)
- : "r" (pGenericSMC->reg0), "r" (pGenericSMC->reg1),
- "r" (pGenericSMC->reg2), "r" (pGenericSMC->reg3),
- "r" (pGenericSMC->reg4)
- : "r0", "r1", "r2", "r3", "r4");
-
-#ifdef CONFIG_SMP
- ret = sched_setaffinity(0, &saved_cpu_mask);
- if (ret != 0)
- {
- dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret);
- }
-#endif
-}
-
-/*
- * Calls the get protocol version SMC.
- * Fills the parameter pProtocolVersion with the version number returned by the
- * SMC
- */
-static inline void SCXLNXCommCallGetProtocolVersionSMC(u32 *pProcotolVersion)
-{
- struct SCXLNX_GENERIC_SMC sGenericSMC;
-
- sGenericSMC.reg0 = SCX_SMC_GET_PROTOCOL_VERSION;
- sGenericSMC.reg1 = 0;
- sGenericSMC.reg2 = 0;
- sGenericSMC.reg3 = 0;
- sGenericSMC.reg4 = 0;
-
- SCXLNXCommCallGenericSMC(&sGenericSMC);
- *pProcotolVersion = sGenericSMC.reg1;
-}
-
-
-/*
- * Calls the init SMC with the specified parameters.
- * Returns zero upon successful completion, or an appropriate error code upon
- * failure.
- */
-static inline int SCXLNXCommCallInitSMC(u32 nSharedPageDescriptor)
-{
- struct SCXLNX_GENERIC_SMC sGenericSMC;
-
- sGenericSMC.reg0 = SCX_SMC_INIT;
- /* Descriptor for the layer 1 shared buffer */
- sGenericSMC.reg1 = nSharedPageDescriptor;
- sGenericSMC.reg2 = 0;
- sGenericSMC.reg3 = 0;
- sGenericSMC.reg4 = 0;
-
- SCXLNXCommCallGenericSMC(&sGenericSMC);
- if (sGenericSMC.reg0 != S_SUCCESS)
- printk(KERN_ERR "SCXLNXCommCallInitSMC:"
- " r0=0x%08X upon return (expected 0x%08X)!\n",
- sGenericSMC.reg0,
- S_SUCCESS);
-
- return sGenericSMC.reg0;
-}
-
-
-/*
- * Calls the reset irq SMC.
- */
-static inline void SCXLNXCommCallResetIrqSMC(void)
-{
- struct SCXLNX_GENERIC_SMC sGenericSMC;
-
- sGenericSMC.reg0 = SCX_SMC_RESET_IRQ;
- sGenericSMC.reg1 = 0;
- sGenericSMC.reg2 = 0;
- sGenericSMC.reg3 = 0;
- sGenericSMC.reg4 = 0;
-
- SCXLNXCommCallGenericSMC(&sGenericSMC);
-}
-
-
-/*
- * Calls the WAKE_UP SMC.
- * Returns zero upon successful completion, or an appropriate error code upon
- * failure.
- */
-static inline int SCXLNXCommCallWakeUpSMC(u32 nL1SharedBufferDescriptor,
- u32 nSharedMemStartOffset,
- u32 nSharedMemSize)
-{
- struct SCXLNX_GENERIC_SMC sGenericSMC;
-
- sGenericSMC.reg0 = SCX_SMC_WAKE_UP;
- sGenericSMC.reg1 = nSharedMemStartOffset;
- /* long form command */
- sGenericSMC.reg2 = nSharedMemSize | 0x80000000;
- sGenericSMC.reg3 = nL1SharedBufferDescriptor;
- sGenericSMC.reg4 = 0;
-
- SCXLNXCommCallGenericSMC(&sGenericSMC);
-
- if (sGenericSMC.reg0 != S_SUCCESS)
- printk(KERN_ERR "SCXLNXCommCallWakeUpSMC:"
- " r0=0x%08X upon return (expected 0x%08X)!\n",
- sGenericSMC.reg0,
- S_SUCCESS);
-
- return sGenericSMC.reg0;
-}
-
-/*
- * Calls the N-Yield SMC.
- */
-static inline void SCXLNXCommCallNYieldSMC(void)
-{
- struct SCXLNX_GENERIC_SMC sGenericSMC;
-
- sGenericSMC.reg0 = SCX_SMC_N_YIELD;
- sGenericSMC.reg1 = 0;
- sGenericSMC.reg2 = 0;
- sGenericSMC.reg3 = 0;
- sGenericSMC.reg4 = 0;
-
- SCXLNXCommCallGenericSMC(&sGenericSMC);
-}
-
-/* Yields the Secure World */
-int tf_schedule_secure_world(struct SCXLNX_COMM *pComm, bool prepare_exit)
-{
- SCXLNXCommSetCurrentTime(pComm);
-
- /* yield to the Secure World */
- SCXLNXCommCallNYieldSMC();
-
- return 0;
-}
-
-/*
- * Returns the L2 descriptor for the specified user page.
- */
-
-#define L2_INIT_DESCRIPTOR_BASE (0x00000003)
-#define L2_INIT_DESCRIPTOR_V13_12_SHIFT (4)
-
-static u32 SCXLNXCommGetL2InitDescriptor(void *pVirtAddr)
-{
- struct page *pPage;
- u32 nVirtAddr;
- u32 nPhysAddr;
- u32 nDescriptor;
-
- nDescriptor = L2_INIT_DESCRIPTOR_BASE;
- nVirtAddr = (u32) pVirtAddr;
-
- /* get physical address and add to nDescriptor */
- pPage = virt_to_page(pVirtAddr);
- nPhysAddr = page_to_phys(pPage);
- nDescriptor |= (nPhysAddr & L2_DESCRIPTOR_ADDR_MASK);
-
- /* Add virtual address v[13:12] bits to nDescriptor */
- nDescriptor |= (DESCRIPTOR_V13_12_GET(nVirtAddr)
- << L2_INIT_DESCRIPTOR_V13_12_SHIFT);
-
- nDescriptor |= SCXLNXCommGetL2DescriptorCommon(nVirtAddr, &init_mm);
-
-
- return nDescriptor;
-}
-
-
-/*----------------------------------------------------------------------------
- * Power management
- *----------------------------------------------------------------------------*/
-
-/*
- * Free the memory used by the W3B buffer for the specified comm.
- * This function does nothing if no W3B buffer is allocated for the device.
- */
-static inline void SCXLNXCommFreeW3B(struct SCXLNX_COMM *pComm)
-{
- SCXLNXCommReleaseSharedMemory(
- &(pComm->sW3BAllocationContext),
- &(pComm->sW3BShmemDesc),
- 0);
-
- SCXLNXReleaseCoarsePageTableAllocator(&(pComm->sW3BAllocationContext));
-
- internal_vfree((void *)pComm->nW3BShmemVAddr);
- pComm->nW3BShmemVAddr = 0;
- pComm->nW3BShmemSize = 0;
- clear_bit(SCXLNX_COMM_FLAG_W3B_ALLOCATED, &(pComm->nFlags));
-}
-
-
-/*
- * Allocates the W3B buffer for the specified comm.
- * Returns zero upon successful completion, or an appropriate error code upon
- * failure.
- */
-static inline int SCXLNXCommAllocateW3B(struct SCXLNX_COMM *pComm)
-{
- int nError;
- u32 nFlags;
- u32 nConfigFlags_S;
- u32 *pW3BDescriptors;
- u32 nW3BDescriptorCount;
- u32 nW3BCurrentSize;
-
- nConfigFlags_S = SCXLNXCommReadReg32(&pComm->pBuffer->nConfigFlags_S);
-
-retry:
- if ((test_bit(SCXLNX_COMM_FLAG_W3B_ALLOCATED, &(pComm->nFlags))) == 0) {
- /*
- * Initialize the shared memory for the W3B
- */
- SCXLNXInitializeCoarsePageTableAllocator(
- &pComm->sW3BAllocationContext);
- } else {
- /*
- * The W3B is allocated but do we have to reallocate a bigger
- * one?
- */
- /* Check H bit */
- if ((nConfigFlags_S & (1<<4)) != 0) {
- /* The size of the W3B may change after SMC_INIT */
- /* Read the current value */
- nW3BCurrentSize = SCXLNXCommReadReg32(
- &pComm->pBuffer->nW3BSizeCurrent_S);
- if (pComm->nW3BShmemSize > nW3BCurrentSize)
- return 0;
-
- SCXLNXCommFreeW3B(pComm);
- goto retry;
- } else {
- return 0;
- }
- }
-
- /* check H bit */
- if ((nConfigFlags_S & (1<<4)) != 0)
- /* The size of the W3B may change after SMC_INIT */
- /* Read the current value */
- pComm->nW3BShmemSize = SCXLNXCommReadReg32(
- &pComm->pBuffer->nW3BSizeCurrent_S);
- else
- pComm->nW3BShmemSize = SCXLNXCommReadReg32(
- &pComm->pBuffer->nW3BSizeMax_S);
-
- pComm->nW3BShmemVAddr = (u32) internal_vmalloc(pComm->nW3BShmemSize);
- if (pComm->nW3BShmemVAddr == 0) {
- printk(KERN_ERR "SCXLNXCommAllocateW3B():"
- " Out of memory for W3B buffer (%u bytes)!\n",
- (unsigned int)(pComm->nW3BShmemSize));
- nError = -ENOMEM;
- goto error;
- }
-
- /* initialize the sW3BShmemDesc structure */
- pComm->sW3BShmemDesc.nType = SCXLNX_SHMEM_TYPE_PM_HIBERNATE;
- INIT_LIST_HEAD(&(pComm->sW3BShmemDesc.list));
-
- nFlags = (SCX_SHMEM_TYPE_READ | SCX_SHMEM_TYPE_WRITE);
-
- /* directly point to the L1 shared buffer W3B descriptors */
- pW3BDescriptors = pComm->pBuffer->nW3BDescriptors;
-
- /*
- * SCXLNXCommFillDescriptorTable uses the following parameter as an
- * IN/OUT
- */
-
- nError = SCXLNXCommFillDescriptorTable(
- &(pComm->sW3BAllocationContext),
- &(pComm->sW3BShmemDesc),
- pComm->nW3BShmemVAddr,
- NULL,
- pW3BDescriptors,
- &(pComm->nW3BShmemSize),
- &(pComm->nW3BShmemOffset),
- false,
- nFlags,
- &nW3BDescriptorCount);
- if (nError != 0) {
- printk(KERN_ERR "SCXLNXCommAllocateW3B():"
- " SCXLNXCommFillDescriptorTable failed with "
- "error code 0x%08x!\n",
- nError);
- goto error;
- }
-
- set_bit(SCXLNX_COMM_FLAG_W3B_ALLOCATED, &(pComm->nFlags));
-
- /* successful completion */
- return 0;
-
-error:
- SCXLNXCommFreeW3B(pComm);
-
- return nError;
-}
-
-/*
- * Perform a Secure World shutdown operation.
- * The routine does not return if the operation succeeds.
- * the routine returns an appropriate error code if
- * the operation fails.
- */
-int SCXLNXCommShutdown(struct SCXLNX_COMM *pComm)
-{
-#ifdef CONFIG_TFN
- /* this function is useless for the TEGRA product */
- return 0;
-#else
- int nError;
- union SCX_COMMAND_MESSAGE sMessage;
- union SCX_ANSWER_MESSAGE sAnswer;
-
- dprintk(KERN_INFO "SCXLNXCommShutdown()\n");
-
- memset(&sMessage, 0, sizeof(sMessage));
-
- sMessage.sHeader.nMessageType = SCX_MESSAGE_TYPE_MANAGEMENT;
- sMessage.sHeader.nMessageSize =
- (sizeof(struct SCX_COMMAND_MANAGEMENT) -
- sizeof(struct SCX_COMMAND_HEADER))/sizeof(u32);
-
- sMessage.sManagementMessage.nCommand = SCX_MANAGEMENT_SHUTDOWN;
-
- nError = SCXLNXCommSendReceive(
- pComm,
- &sMessage,
- &sAnswer,
- NULL,
- false);
-
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXCommShutdown(): "
- "SCXLNXCommSendReceive failed (error %d)!\n",
- nError);
- return nError;
- }
-
-#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
- if (sAnswer.sHeader.nErrorCode != 0)
- dprintk(KERN_ERR "tf_driver: shutdown failed.\n");
- else
- dprintk(KERN_INFO "tf_driver: shutdown succeeded.\n");
-#endif
-
- return sAnswer.sHeader.nErrorCode;
-#endif
-}
-
-
-/*
- * Perform a Secure World hibernate operation.
- * The routine does not return if the operation succeeds.
- * the routine returns an appropriate error code if
- * the operation fails.
- */
-int SCXLNXCommHibernate(struct SCXLNX_COMM *pComm)
-{
-#ifdef CONFIG_TFN
- /* this function is useless for the TEGRA product */
- return 0;
-#else
- int nError;
- union SCX_COMMAND_MESSAGE sMessage;
- union SCX_ANSWER_MESSAGE sAnswer;
- u32 nFirstCommand;
- u32 nFirstFreeCommand;
-
- dprintk(KERN_INFO "SCXLNXCommHibernate()\n");
-
- nError = SCXLNXCommAllocateW3B(pComm);
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXCommHibernate(): "
- "SCXLNXCommAllocateW3B failed (error %d)!\n",
- nError);
- return nError;
- }
-
- /*
- * As the polling thread is already hibernating, we
- * should send the message and receive the answer ourself
- */
-
- /* build the "prepare to hibernate" message */
- sMessage.sHeader.nMessageType = SCX_MESSAGE_TYPE_MANAGEMENT;
- sMessage.sManagementMessage.nCommand = SCX_MANAGEMENT_HIBERNATE;
- /* Long Form Command */
- sMessage.sManagementMessage.nSharedMemDescriptors[0] = 0;
- sMessage.sManagementMessage.nSharedMemDescriptors[1] = 0;
- sMessage.sManagementMessage.nW3BSize =
- pComm->nW3BShmemSize | 0x80000000;
- sMessage.sManagementMessage.nW3BStartOffset =
- pComm->nW3BShmemOffset;
- sMessage.sHeader.nOperationID = (u32) &sAnswer;
-
- SCXLNXDumpMessage(&sMessage);
-
- /* find a slot to send the message in */
-
- /* AFY: why not use the function SCXLNXCommSendReceive?? We are
- * duplicating a lot of subtle code here. And it's not going to be
- * tested because power management is currently not supported by the
- * secure world. */
- for (;;) {
- int nQueueWordsCount, nCommandSize;
-
- spin_lock(&(pComm->lock));
-
- nFirstCommand = SCXLNXCommReadReg32(
- &pComm->pBuffer->nFirstCommand);
- nFirstFreeCommand = SCXLNXCommReadReg32(
- &pComm->pBuffer->nFirstFreeCommand);
-
- nQueueWordsCount = nFirstFreeCommand - nFirstCommand;
- nCommandSize = sMessage.sHeader.nMessageSize
- + sizeof(struct SCX_COMMAND_HEADER);
- if ((nQueueWordsCount + nCommandSize) <
- SCX_N_MESSAGE_QUEUE_CAPACITY) {
- /* Command queue is not full */
- memcpy(&pComm->pBuffer->sCommandQueue[
- nFirstFreeCommand %
- SCX_N_MESSAGE_QUEUE_CAPACITY],
- &sMessage,
- nCommandSize * sizeof(u32));
-
- SCXLNXCommWriteReg32(&pComm->pBuffer->nFirstFreeCommand,
- nFirstFreeCommand + nCommandSize);
-
- spin_unlock(&(pComm->lock));
- break;
- }
-
- spin_unlock(&(pComm->lock));
- (void)tf_schedule_secure_world(pComm, false);
- }
-
- /* now wait for the answer, dispatching other answers */
- while (1) {
- u32 nFirstAnswer;
- u32 nFirstFreeAnswer;
-
- /* check all the answers */
- nFirstFreeAnswer = SCXLNXCommReadReg32(
- &pComm->pBuffer->nFirstFreeAnswer);
- nFirstAnswer = SCXLNXCommReadReg32(
- &pComm->pBuffer->nFirstAnswer);
-
- if (nFirstAnswer != nFirstFreeAnswer) {
- int bFoundAnswer = 0;
-
- do {
- /* answer queue not empty */
- union SCX_ANSWER_MESSAGE sComAnswer;
- struct SCX_ANSWER_HEADER sHeader;
- /* size of the command in words of 32bit */
- int nCommandSize;
-
- /* get the nMessageSize */
- memcpy(&sHeader,
- &pComm->pBuffer->sAnswerQueue[
- nFirstAnswer %
- SCX_S_ANSWER_QUEUE_CAPACITY],
- sizeof(struct SCX_ANSWER_HEADER));
- nCommandSize = sHeader.nMessageSize +
- sizeof(struct SCX_ANSWER_HEADER);
-
- /*
- * NOTE: nMessageSize is the number of words
- * following the first word
- */
- memcpy(&sComAnswer,
- &pComm->pBuffer->sAnswerQueue[
- nFirstAnswer %
- SCX_S_ANSWER_QUEUE_CAPACITY],
- nCommandSize * sizeof(u32));
-
- SCXLNXDumpAnswer(&sComAnswer);
-
- if (sComAnswer.sHeader.nOperationID ==
- (u32) &sAnswer) {
- /*
- * this is the answer to the "prepare to
- * hibernate" message
- */
- memcpy(&sAnswer,
- &sComAnswer,
- nCommandSize * sizeof(u32));
-
- bFoundAnswer = 1;
- SCXLNXCommWriteReg32(
- &pComm->pBuffer->nFirstAnswer,
- nFirstAnswer + nCommandSize);
- break;
- } else {
- /*
- * this is a standard message answer,
- * dispatch it
- */
- struct SCXLNX_ANSWER_STRUCT
- *pAnswerStructure;
-
- pAnswerStructure =
- (struct SCXLNX_ANSWER_STRUCT *)
- sComAnswer.sHeader.nOperationID;
-
- memcpy(pAnswerStructure->pAnswer,
- &sComAnswer,
- nCommandSize * sizeof(u32));
-
- pAnswerStructure->bAnswerCopied = true;
- }
-
- SCXLNXCommWriteReg32(
- &pComm->pBuffer->nFirstAnswer,
- nFirstAnswer + nCommandSize);
- } while (nFirstAnswer != nFirstFreeAnswer);
-
- if (bFoundAnswer)
- break;
- }
-
- /*
- * since the Secure World is at least running the "prepare to
- * hibernate" message, its timeout must be immediate So there is
- * no need to check its timeout and schedule() the current
- * thread
- */
- (void)tf_schedule_secure_world(pComm, false);
- } /* while (1) */
-
- printk(KERN_INFO "tf_driver: hibernate.\n");
- return 0;
-#endif
-}
-
-
-/*
- * Perform a Secure World resume operation.
- * The routine returns once the Secure World is active again
- * or if an error occurs during the "resume" process
- */
-int SCXLNXCommResume(struct SCXLNX_COMM *pComm)
-{
-#ifdef CONFIG_TFN
- /* this function is useless for the TEGRA product */
- return 0;
-#else
- int nError;
- u32 nStatus;
-
- dprintk(KERN_INFO "SCXLNXCommResume()\n");
-
- nError = SCXLNXCommCallWakeUpSMC(
- SCXLNXCommGetL2InitDescriptor(pComm->pBuffer),
- pComm->nW3BShmemOffset,
- pComm->nW3BShmemSize);
-
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXCommResume(): "
- "SCXLNXCommCallWakeUpSMC failed (error %d)!\n",
- nError);
- return nError;
- }
-
- nStatus = ((SCXLNXCommReadReg32(&(pComm->pBuffer->nStatus_S))
- & SCX_STATUS_POWER_STATE_MASK)
- >> SCX_STATUS_POWER_STATE_SHIFT);
-
- while ((nStatus != SCX_POWER_MODE_ACTIVE)
- && (nStatus != SCX_POWER_MODE_PANIC)) {
- SCXLNXCommCallNYieldSMC();
-
- nStatus = ((SCXLNXCommReadReg32(&(pComm->pBuffer->nStatus_S))
- & SCX_STATUS_POWER_STATE_MASK)
- >> SCX_STATUS_POWER_STATE_SHIFT);
-
- /*
- * As this may last quite a while, call the kernel scheduler to
- * hand over CPU for other operations
- */
- schedule();
- }
-
- switch (nStatus) {
- case SCX_POWER_MODE_ACTIVE:
- break;
-
- case SCX_POWER_MODE_PANIC:
- dprintk(KERN_ERR "SCXLNXCommResume(): "
- "Secure World POWER_MODE_PANIC!\n");
- return -EINVAL;
-
- default:
- dprintk(KERN_ERR "SCXLNXCommResume(): "
- "unexpected Secure World POWER_MODE (%d)!\n", nStatus);
- return -EINVAL;
- }
-
- dprintk(KERN_INFO "SCXLNXCommResume() succeeded\n");
- return 0;
-#endif
-}
-
-/*----------------------------------------------------------------------------
- * Communication initialization and termination
- *----------------------------------------------------------------------------*/
-
-/*
- * Handles the software interrupts issued by the Secure World.
- */
-static irqreturn_t SCXLNXCommSoftIntHandler(int irq, void *dev_id)
-{
- struct SCXLNX_COMM *pComm = (struct SCXLNX_COMM *) dev_id;
-
- if (pComm->pBuffer == NULL)
- return IRQ_NONE;
-
- if ((SCXLNXCommReadReg32(&pComm->pBuffer->nStatus_S) &
- SCX_STATUS_P_MASK) == 0)
- /* interrupt not issued by the Trusted Foundations Software */
- return IRQ_NONE;
-
- SCXLNXCommCallResetIrqSMC();
-
- /* signal N_SM_EVENT */
- wake_up(&pComm->waitQueue);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Initializes the communication with the Secure World.
- * The L1 shared buffer is allocated and the Secure World
- * is yielded for the first time.
- * returns successfuly once the communication with
- * the Secure World is up and running
- *
- * Returns 0 upon success or appropriate error code
- * upon failure
- */
-int SCXLNXCommInit(struct SCXLNX_COMM *pComm)
-{
- int nError;
- struct page *pBufferPage;
- u32 nProtocolVersion;
-
- dprintk(KERN_INFO "SCXLNXCommInit()\n");
-
- spin_lock_init(&(pComm->lock));
- pComm->nFlags = 0;
- pComm->pBuffer = NULL;
- init_waitqueue_head(&(pComm->waitQueue));
-
- /*
- * Check the Secure World protocol version is the expected one.
- */
- SCXLNXCommCallGetProtocolVersionSMC(&nProtocolVersion);
-
- if ((GET_PROTOCOL_MAJOR_VERSION(nProtocolVersion))
- != SCX_S_PROTOCOL_MAJOR_VERSION) {
- printk(KERN_ERR "SCXLNXCommInit():"
- " Unsupported Secure World Major Version "
- "(0x%02X, expected 0x%02X)!\n",
- GET_PROTOCOL_MAJOR_VERSION(nProtocolVersion),
- SCX_S_PROTOCOL_MAJOR_VERSION);
- nError = -EIO;
- goto error;
- }
-
- /*
- * Register the software interrupt handler if required to.
- */
- if (pComm->nSoftIntIrq != -1) {
- dprintk(KERN_INFO "SCXLNXCommInit(): "
- "Registering software interrupt handler (IRQ %d)\n",
- pComm->nSoftIntIrq);
-
- nError = request_irq(pComm->nSoftIntIrq,
- SCXLNXCommSoftIntHandler,
- IRQF_SHARED,
- SCXLNX_DEVICE_BASE_NAME,
- pComm);
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXCommInit(): "
- "request_irq failed for irq %d (error %d)\n",
- pComm->nSoftIntIrq, nError);
- goto error;
- }
- set_bit(SCXLNX_COMM_FLAG_IRQ_REQUESTED, &(pComm->nFlags));
- }
-
- /*
- * Allocate and initialize the L1 shared buffer.
- */
- pComm->pBuffer = (void *) internal_get_zeroed_page(GFP_KERNEL);
- if (pComm->pBuffer == NULL) {
- printk(KERN_ERR "SCXLNXCommInit():"
- " get_zeroed_page failed for L1 shared buffer!\n");
- nError = -ENOMEM;
- goto error;
- }
-
- /*
- * Ensure the page storing the L1 shared buffer is mapped.
- */
- pBufferPage = virt_to_page(pComm->pBuffer);
- trylock_page(pBufferPage);
-
- dprintk(KERN_INFO "SCXLNXCommInit(): "
- "L1 shared buffer allocated at virtual:%p, "
- "physical:%p (page:%p)\n",
- pComm->pBuffer,
- (void *)virt_to_phys(pComm->pBuffer),
- pBufferPage);
-
- set_bit(SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED, &(pComm->nFlags));
-
- /*
- * Init SMC
- */
- nError = SCXLNXCommCallInitSMC(
- SCXLNXCommGetL2InitDescriptor(pComm->pBuffer));
- if (nError != S_SUCCESS) {
- dprintk(KERN_ERR "SCXLNXCommInit(): "
- "SCXLNXCommCallInitSMC failed (error 0x%08X)!\n",
- nError);
- goto error;
- }
-
- /*
- * check whether the interrupts are actually enabled
- * If not, remove irq handler
- */
- if ((SCXLNXCommReadReg32(&pComm->pBuffer->nConfigFlags_S) &
- SCX_CONFIG_FLAG_S) == 0) {
- if (test_and_clear_bit(SCXLNX_COMM_FLAG_IRQ_REQUESTED,
- &(pComm->nFlags)) != 0) {
- dprintk(KERN_INFO "SCXLNXCommInit(): "
- "Interrupts not used, unregistering "
- "softint (IRQ %d)\n",
- pComm->nSoftIntIrq);
-
- free_irq(pComm->nSoftIntIrq, pComm);
- }
- } else {
- if (test_bit(SCXLNX_COMM_FLAG_IRQ_REQUESTED,
- &(pComm->nFlags)) == 0) {
- /*
- * Interrupts are enabled in the Secure World, but not
- * handled by driver
- */
- dprintk(KERN_ERR "SCXLNXCommInit(): "
- "soft_interrupt argument not provided\n");
- nError = -EINVAL;
- goto error;
- }
- }
-
- /*
- * Successful completion.
- */
-
- /* yield for the first time */
- (void)tf_schedule_secure_world(pComm, false);
-
- dprintk(KERN_INFO "SCXLNXCommInit(): Success\n");
- return S_SUCCESS;
-
-error:
- /*
- * Error handling.
- */
- dprintk(KERN_INFO "SCXLNXCommInit(): Failure (error %d)\n",
- nError);
- SCXLNXCommTerminate(pComm);
- return nError;
-}
-
-
-/*
- * Attempt to terminate the communication with the Secure World.
- * The L1 shared buffer is freed.
- * Calling this routine terminates definitaly the communication
- * with the Secure World : there is no way to inform the Secure World of a new
- * L1 shared buffer to be used once it has been initialized.
- */
-void SCXLNXCommTerminate(struct SCXLNX_COMM *pComm)
-{
- dprintk(KERN_INFO "SCXLNXCommTerminate()\n");
-
- set_bit(SCXLNX_COMM_FLAG_TERMINATING, &(pComm->nFlags));
-
- if ((test_bit(SCXLNX_COMM_FLAG_W3B_ALLOCATED,
- &(pComm->nFlags))) != 0) {
- dprintk(KERN_INFO "SCXLNXCommTerminate(): "
- "Freeing the W3B buffer...\n");
- SCXLNXCommFreeW3B(pComm);
- }
-
- if ((test_bit(SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED,
- &(pComm->nFlags))) != 0) {
- __clear_page_locked(virt_to_page(pComm->pBuffer));
- internal_free_page((unsigned long) pComm->pBuffer);
- }
-
- if ((test_bit(SCXLNX_COMM_FLAG_IRQ_REQUESTED,
- &(pComm->nFlags))) != 0) {
- dprintk(KERN_INFO "SCXLNXCommTerminate(): "
- "Unregistering softint (IRQ %d)\n",
- pComm->nSoftIntIrq);
- free_irq(pComm->nSoftIntIrq, pComm);
- }
-}
diff --git a/security/tf_driver/scxlnx_conn.c b/security/tf_driver/scxlnx_conn.c
deleted file mode 100644
index cac8e0e795e2..000000000000
--- a/security/tf_driver/scxlnx_conn.c
+++ /dev/null
@@ -1,1530 +0,0 @@
-/*
- * Copyright (c) 2006-2010 Trusted Logic S.A.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
- */
-
-#include <asm/atomic.h>
-#include <linux/uaccess.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/types.h>
-
-#include "s_version.h"
-
-#include "scx_protocol.h"
-#include "scxlnx_defs.h"
-#include "scxlnx_util.h"
-#include "scxlnx_comm.h"
-#include "scxlnx_conn.h"
-
-#ifdef CONFIG_TF_ZEBRA
-#include "scx_public_crypto.h"
-#endif
-
-/*----------------------------------------------------------------------------
- * Management of the shared memory blocks.
- *
- * Shared memory blocks are the blocks registered through
- * the commands REGISTER_SHARED_MEMORY and POWER_MANAGEMENT
- *----------------------------------------------------------------------------*/
-
-/**
- * Unmaps a shared memory
- **/
-static void SCXLNXConnUnmapShmem(
- struct SCXLNX_CONNECTION *pConn,
- struct SCXLNX_SHMEM_DESC *pShmemDesc,
- u32 nFullCleanup)
-{
- /* check pShmemDesc contains a descriptor */
- if (pShmemDesc == NULL)
- return;
-
- dprintk(KERN_DEBUG "SCXLNXConnUnmapShmem(%p)\n", pShmemDesc);
-
-retry:
- mutex_lock(&(pConn->sharedMemoriesMutex));
- if (atomic_read(&pShmemDesc->nRefCnt) > 1) {
- /*
- * Shared mem still in use, wait for other operations completion
- * before actually unmapping it.
- */
- dprintk(KERN_INFO "Descriptor in use\n");
- mutex_unlock(&(pConn->sharedMemoriesMutex));
- schedule();
- goto retry;
- }
-
- SCXLNXCommReleaseSharedMemory(
- &(pConn->sAllocationContext),
- pShmemDesc,
- nFullCleanup);
-
- list_del(&(pShmemDesc->list));
-
- if ((pShmemDesc->nType == SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM) ||
- (nFullCleanup != 0)) {
- internal_kfree(pShmemDesc);
-
- atomic_dec(&(pConn->nShmemAllocated));
- } else {
- /*
- * This is a preallocated shared memory, add to free list
- * Since the device context is unmapped last, it is
- * always the first element of the free list if no
- * device context has been created
- */
- pShmemDesc->hIdentifier = 0;
- list_add(&(pShmemDesc->list), &(pConn->sFreeSharedMemoryList));
- }
-
- mutex_unlock(&(pConn->sharedMemoriesMutex));
-}
-
-
-/**
- * Find the first available slot for a new block of shared memory
- * and map the user buffer.
- * Update the pDescriptors to L1 descriptors
- * Update the pBufferStartOffset and pBufferSize fields
- * pShmemDesc is updated to the mapped shared memory descriptor
- **/
-static int SCXLNXConnMapShmem(
- struct SCXLNX_CONNECTION *pConn,
- u32 nBufferVAddr,
- /* flags for read-write access rights on the memory */
- u32 nFlags,
- bool bInUserSpace,
- u32 pDescriptors[SCX_MAX_COARSE_PAGES],
- u32 *pBufferStartOffset,
- u32 *pBufferSize,
- struct SCXLNX_SHMEM_DESC **ppShmemDesc,
- u32 *pnDescriptorCount)
-{
- struct SCXLNX_SHMEM_DESC *pShmemDesc = NULL;
- int nError;
-
- dprintk(KERN_INFO "SCXLNXConnMapShmem(%p, %p, flags = 0x%08x)\n",
- pConn,
- (void *) nBufferVAddr,
- nFlags);
-
- mutex_lock(&(pConn->sharedMemoriesMutex));
-
- /*
- * Check the list of free shared memory
- * is not empty
- */
- if (list_empty(&(pConn->sFreeSharedMemoryList))) {
- if (atomic_read(&(pConn->nShmemAllocated)) ==
- SCXLNX_SHMEM_MAX_COUNT) {
- printk(KERN_ERR "SCXLNXConnMapShmem(%p):"
- " maximum shared memories already registered\n",
- pConn);
- nError = -ENOMEM;
- goto error;
- }
-
- atomic_inc(&(pConn->nShmemAllocated));
-
- /* no descriptor available, allocate a new one */
-
- pShmemDesc = (struct SCXLNX_SHMEM_DESC *) internal_kmalloc(
- sizeof(*pShmemDesc), GFP_KERNEL);
- if (pShmemDesc == NULL) {
- printk(KERN_ERR "SCXLNXConnMapShmem(%p):"
- " failed to allocate descriptor\n",
- pConn);
- nError = -ENOMEM;
- goto error;
- }
-
- /* Initialize the structure */
- pShmemDesc->nType = SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM;
- atomic_set(&pShmemDesc->nRefCnt, 1);
- INIT_LIST_HEAD(&(pShmemDesc->list));
- } else {
- /* take the first free shared memory descriptor */
- pShmemDesc = list_entry(pConn->sFreeSharedMemoryList.next,
- struct SCXLNX_SHMEM_DESC, list);
- list_del(&(pShmemDesc->list));
- }
-
- /* Add the descriptor to the used list */
- list_add(&(pShmemDesc->list), &(pConn->sUsedSharedMemoryList));
-
- nError = SCXLNXCommFillDescriptorTable(
- &(pConn->sAllocationContext),
- pShmemDesc,
- nBufferVAddr,
- pConn->ppVmas,
- pDescriptors,
- pBufferSize,
- pBufferStartOffset,
- bInUserSpace,
- nFlags,
- pnDescriptorCount);
-
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXConnMapShmem(%p):"
- " SCXLNXCommFillDescriptorTable failed with error "
- "code %d!\n",
- pConn,
- nError);
- goto error;
- }
- pShmemDesc->pBuffer = (u8 *) nBufferVAddr;
-
- /*
- * Successful completion.
- */
- *ppShmemDesc = pShmemDesc;
- mutex_unlock(&(pConn->sharedMemoriesMutex));
- dprintk(KERN_DEBUG "SCXLNXConnMapShmem: success\n");
- return 0;
-
-
- /*
- * Error handling.
- */
-error:
- mutex_unlock(&(pConn->sharedMemoriesMutex));
- dprintk(KERN_ERR "SCXLNXConnMapShmem: failure with error code %d\n",
- nError);
-
- SCXLNXConnUnmapShmem(
- pConn,
- pShmemDesc,
- 0);
-
- return nError;
-}
-
-
-
-/* This function is a copy of the find_vma() function
-in linux kernel 2.6.15 version with some fixes :
- - memory block may end on vm_end
- - check the full memory block is in the memory area
- - guarantee NULL is returned if no memory area is found */
-struct vm_area_struct *SCXLNXConnFindVma(struct mm_struct *mm,
- unsigned long addr, unsigned long size)
-{
- struct vm_area_struct *vma = NULL;
-
- dprintk(KERN_INFO
- "SCXLNXConnFindVma addr=0x%lX size=0x%lX\n", addr, size);
-
- if (mm) {
- /* Check the cache first. */
- /* (Cache hit rate is typically around 35%.) */
- vma = mm->mmap_cache;
- if (!(vma && vma->vm_end >= (addr+size) &&
- vma->vm_start <= addr)) {
- struct rb_node *rb_node;
-
- rb_node = mm->mm_rb.rb_node;
- vma = NULL;
-
- while (rb_node) {
- struct vm_area_struct *vma_tmp;
-
- vma_tmp = rb_entry(rb_node,
- struct vm_area_struct, vm_rb);
-
- dprintk(KERN_INFO
- "vma_tmp->vm_start=0x%lX"
- "vma_tmp->vm_end=0x%lX\n",
- vma_tmp->vm_start,
- vma_tmp->vm_end);
-
- if (vma_tmp->vm_end >= (addr+size)) {
- vma = vma_tmp;
- if (vma_tmp->vm_start <= addr)
- break;
-
- rb_node = rb_node->rb_left;
- } else {
- rb_node = rb_node->rb_right;
- }
- }
-
- if (vma)
- mm->mmap_cache = vma;
- if (rb_node == NULL)
- vma = NULL;
- }
- }
- return vma;
-}
-
-static int SCXLNXConnValidateSharedMemoryBlockAndFlags(
- void *pSharedMemory,
- u32 nSharedMemorySize,
- u32 nFlags)
-{
- struct vm_area_struct *vma;
- unsigned long nSharedMemory = (unsigned long) pSharedMemory;
- u32 nChunk;
-
- if (nSharedMemorySize == 0)
- /* This is always valid */
- return 0;
-
- if ((nSharedMemory + nSharedMemorySize) < nSharedMemory)
- /* Overflow */
- return -EINVAL;
-
- down_read(&current->mm->mmap_sem);
-
- /*
- * When looking for a memory address, split buffer into chunks of
- * size=PAGE_SIZE.
- */
- nChunk = PAGE_SIZE - (nSharedMemory & (PAGE_SIZE-1));
- if (nChunk > nSharedMemorySize)
- nChunk = nSharedMemorySize;
-
- do {
- vma = SCXLNXConnFindVma(current->mm, nSharedMemory, nChunk);
-
- if (vma == NULL)
- goto error;
-
- if (nFlags & SCX_SHMEM_TYPE_READ)
- if (!(vma->vm_flags & VM_READ))
- goto error;
- if (nFlags & SCX_SHMEM_TYPE_WRITE)
- if (!(vma->vm_flags & VM_WRITE))
- goto error;
-
- nSharedMemorySize -= nChunk;
- nSharedMemory += nChunk;
- nChunk = (nSharedMemorySize <= PAGE_SIZE ?
- nSharedMemorySize : PAGE_SIZE);
- } while (nSharedMemorySize != 0);
-
- up_read(&current->mm->mmap_sem);
- return 0;
-
-error:
- up_read(&current->mm->mmap_sem);
- dprintk(KERN_ERR "SCXLNXConnValidateSharedMemoryBlockAndFlags: "
- "return error\n");
- return -EFAULT;
-}
-
-
-static int SCXLNXConnMapTempShMem(struct SCXLNX_CONNECTION *pConn,
- struct SCX_COMMAND_PARAM_TEMP_MEMREF *pTempMemRef,
- u32 nParamType,
- struct SCXLNX_SHMEM_DESC **ppShmemDesc)
-{
- u32 nFlags;
- u32 nError = S_SUCCESS;
-
- dprintk(KERN_INFO "SCXLNXConnMapTempShMem(%p, "
- "0x%08x[size=0x%08x], offset=0x%08x)\n",
- pConn,
- pTempMemRef->nDescriptor,
- pTempMemRef->nSize,
- pTempMemRef->nOffset);
-
- switch (nParamType) {
- case SCX_PARAM_TYPE_MEMREF_TEMP_INPUT:
- nFlags = SCX_SHMEM_TYPE_READ;
- break;
- case SCX_PARAM_TYPE_MEMREF_TEMP_OUTPUT:
- nFlags = SCX_SHMEM_TYPE_WRITE;
- break;
- case SCX_PARAM_TYPE_MEMREF_TEMP_INOUT:
- nFlags = SCX_SHMEM_TYPE_WRITE | SCX_SHMEM_TYPE_READ;
- break;
- default:
- nError = -EINVAL;
- goto error;
- }
-
- if (pTempMemRef->nDescriptor == 0) {
- /* NULL tmpref */
- pTempMemRef->nOffset = 0;
- *ppShmemDesc = NULL;
- } else if ((pTempMemRef->nDescriptor != 0) &&
- (pTempMemRef->nSize == 0)) {
- /* Empty tmpref */
- pTempMemRef->nOffset = pTempMemRef->nDescriptor;
- pTempMemRef->nDescriptor = 0;
- pTempMemRef->nSize = 0;
- *ppShmemDesc = NULL;
- } else {
- /* Map the temp shmem block */
-
- u32 nSharedMemDescriptors[SCX_MAX_COARSE_PAGES];
- u32 nDescriptorCount;
-
- nError = SCXLNXConnValidateSharedMemoryBlockAndFlags(
- (void *) pTempMemRef->nDescriptor,
- pTempMemRef->nSize,
- nFlags);
- if (nError != 0)
- goto error;
-
- nError = SCXLNXConnMapShmem(
- pConn,
- pTempMemRef->nDescriptor,
- nFlags,
- true,
- nSharedMemDescriptors,
- &(pTempMemRef->nOffset),
- &(pTempMemRef->nSize),
- ppShmemDesc,
- &nDescriptorCount);
- pTempMemRef->nDescriptor = nSharedMemDescriptors[0];
- }
-
-error:
- return nError;
-}
-
-/*
- * Clean up a list of shared memory descriptors.
- */
-static void SCXLNXSharedMemoryCleanupList(
- struct SCXLNX_CONNECTION *pConn,
- struct list_head *pList)
-{
- while (!list_empty(pList)) {
- struct SCXLNX_SHMEM_DESC *pShmemDesc;
-
- pShmemDesc = list_entry(pList->next, struct SCXLNX_SHMEM_DESC,
- list);
-
- SCXLNXConnUnmapShmem(pConn, pShmemDesc, 1);
- }
-}
-
-
-/*
- * Clean up the shared memory information in the connection.
- * Releases all allocated pages.
- */
-void SCXLNXConnCleanupSharedMemory(struct SCXLNX_CONNECTION *pConn)
-{
- /* clean up the list of used and free descriptors.
- * done outside the mutex, because SCXLNXConnUnmapShmem already
- * mutex()ed
- */
- SCXLNXSharedMemoryCleanupList(pConn,
- &pConn->sUsedSharedMemoryList);
- SCXLNXSharedMemoryCleanupList(pConn,
- &pConn->sFreeSharedMemoryList);
-
- mutex_lock(&(pConn->sharedMemoriesMutex));
-
- /* Free the Vmas page */
- if (pConn->ppVmas) {
- internal_free_page((unsigned long) pConn->ppVmas);
- pConn->ppVmas = NULL;
- }
-
- SCXLNXReleaseCoarsePageTableAllocator(
- &(pConn->sAllocationContext));
-
- mutex_unlock(&(pConn->sharedMemoriesMutex));
-}
-
-
-/*
- * Initialize the shared memory in a connection.
- * Allocates the minimum memory to be provided
- * for shared memory management
- */
-int SCXLNXConnInitSharedMemory(struct SCXLNX_CONNECTION *pConn)
-{
- int nError;
- int nSharedMemoryDescriptorIndex;
- int nCoarsePageIndex;
-
- /*
- * We only need to initialize special elements and attempt to allocate
- * the minimum shared memory descriptors we want to support
- */
-
- mutex_init(&(pConn->sharedMemoriesMutex));
- INIT_LIST_HEAD(&(pConn->sFreeSharedMemoryList));
- INIT_LIST_HEAD(&(pConn->sUsedSharedMemoryList));
- atomic_set(&(pConn->nShmemAllocated), 0);
-
- SCXLNXInitializeCoarsePageTableAllocator(
- &(pConn->sAllocationContext));
-
-
- /*
- * Preallocate 3 pages to increase the chances that a connection
- * succeeds in allocating shared mem
- */
- for (nSharedMemoryDescriptorIndex = 0;
- nSharedMemoryDescriptorIndex < 3;
- nSharedMemoryDescriptorIndex++) {
- struct SCXLNX_SHMEM_DESC *pShmemDesc =
- (struct SCXLNX_SHMEM_DESC *) internal_kmalloc(
- sizeof(*pShmemDesc), GFP_KERNEL);
-
- if (pShmemDesc == NULL) {
- printk(KERN_ERR "SCXLNXConnInitSharedMemory(%p):"
- " failed to pre allocate descriptor %d\n",
- pConn,
- nSharedMemoryDescriptorIndex);
- nError = -ENOMEM;
- goto error;
- }
-
- for (nCoarsePageIndex = 0;
- nCoarsePageIndex < SCX_MAX_COARSE_PAGES;
- nCoarsePageIndex++) {
- struct SCXLNX_COARSE_PAGE_TABLE *pCoarsePageTable;
-
- pCoarsePageTable = SCXLNXAllocateCoarsePageTable(
- &(pConn->sAllocationContext),
- SCXLNX_PAGE_DESCRIPTOR_TYPE_PREALLOCATED);
-
- if (pCoarsePageTable == NULL) {
- printk(KERN_ERR "SCXLNXConnInitSharedMemory(%p)"
- ": descriptor %d coarse page %d - "
- "SCXLNXConnAllocateCoarsePageTable() "
- "failed\n",
- pConn,
- nSharedMemoryDescriptorIndex,
- nCoarsePageIndex);
- nError = -ENOMEM;
- goto error;
- }
-
- pShmemDesc->pCoarsePageTable[nCoarsePageIndex] =
- pCoarsePageTable;
- }
- pShmemDesc->nNumberOfCoarsePageTables = 0;
-
- pShmemDesc->nType = SCXLNX_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM;
- atomic_set(&pShmemDesc->nRefCnt, 1);
-
- /*
- * add this preallocated descriptor to the list of free
- * descriptors Keep the device context specific one at the
- * beginning of the list
- */
- INIT_LIST_HEAD(&(pShmemDesc->list));
- list_add_tail(&(pShmemDesc->list),
- &(pConn->sFreeSharedMemoryList));
- }
-
- /* allocate memory for the vmas structure */
- pConn->ppVmas =
- (struct vm_area_struct **) internal_get_zeroed_page(GFP_KERNEL);
- if (pConn->ppVmas == NULL) {
- printk(KERN_ERR "SCXLNXConnInitSharedMemory(%p):"
- " ppVmas - failed to get_zeroed_page\n",
- pConn);
- nError = -ENOMEM;
- goto error;
- }
-
- return 0;
-
-error:
- SCXLNXConnCleanupSharedMemory(pConn);
- return nError;
-}
-
-/*----------------------------------------------------------------------------
- * Connection operations to the Secure World
- *----------------------------------------------------------------------------*/
-
-int SCXLNXConnCreateDeviceContext(
- struct SCXLNX_CONNECTION *pConn)
-{
- union SCX_COMMAND_MESSAGE sMessage;
- union SCX_ANSWER_MESSAGE sAnswer;
- int nError = 0;
-
- dprintk(KERN_INFO "SCXLNXConnCreateDeviceContext(%p)\n",
- pConn);
-
- sMessage.sCreateDeviceContextMessage.nMessageType =
- SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT;
- sMessage.sCreateDeviceContextMessage.nMessageSize =
- (sizeof(struct SCX_COMMAND_CREATE_DEVICE_CONTEXT)
- - sizeof(struct SCX_COMMAND_HEADER))/sizeof(u32);
- sMessage.sCreateDeviceContextMessage.nOperationID = (u32) &sAnswer;
- sMessage.sCreateDeviceContextMessage.nDeviceContextID = (u32) pConn;
-
- nError = SCXLNXCommSendReceive(
- &pConn->pDevice->sm,
- &sMessage,
- &sAnswer,
- pConn,
- true);
-
- if ((nError != 0) ||
- (sAnswer.sCreateDeviceContextAnswer.nErrorCode != S_SUCCESS))
- goto error;
-
- /*
- * CREATE_DEVICE_CONTEXT succeeded,
- * store device context handler and update connection status
- */
- pConn->hDeviceContext =
- sAnswer.sCreateDeviceContextAnswer.hDeviceContext;
- spin_lock(&(pConn->stateLock));
- pConn->nState = SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT;
- spin_unlock(&(pConn->stateLock));
-
- /* successful completion */
- dprintk(KERN_INFO "SCXLNXConnCreateDeviceContext(%p):"
- " hDeviceContext=0x%08x\n",
- pConn,
- sAnswer.sCreateDeviceContextAnswer.hDeviceContext);
- return 0;
-
-error:
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXConnCreateDeviceContext failed with "
- "error %d\n", nError);
- } else {
- /*
- * We sent a DeviceCreateContext. The state is now
- * SCXLNX_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT It has to be
- * reset if we ever want to send a DeviceCreateContext again
- */
- spin_lock(&(pConn->stateLock));
- pConn->nState = SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT;
- spin_unlock(&(pConn->stateLock));
- dprintk(KERN_ERR "SCXLNXConnCreateDeviceContext failed with "
- "nErrorCode 0x%08X\n",
- sAnswer.sCreateDeviceContextAnswer.nErrorCode);
- if (sAnswer.sCreateDeviceContextAnswer.nErrorCode ==
- S_ERROR_OUT_OF_MEMORY)
- nError = -ENOMEM;
- else
- nError = -EFAULT;
- }
-
- return nError;
-}
-
-/* Check that the current application belongs to the
- * requested GID */
-static bool SCXLNXConnCheckGID(gid_t nRequestedGID)
-{
- if (nRequestedGID == current_egid()) {
- return true;
- } else {
- u32 nSize;
- u32 i;
- /* Look in the supplementary GIDs */
- get_group_info(GROUP_INFO);
- nSize = GROUP_INFO->ngroups;
- for (i = 0; i < nSize; i++)
- if (nRequestedGID == GROUP_AT(GROUP_INFO , i))
- return true;
- }
- return false;
-}
-
-/*
- * Opens a client session to the Secure World
- */
-int SCXLNXConnOpenClientSession(
- struct SCXLNX_CONNECTION *pConn,
- union SCX_COMMAND_MESSAGE *pMessage,
- union SCX_ANSWER_MESSAGE *pAnswer)
-{
- int nError = 0;
- struct SCXLNX_SHMEM_DESC *pShmemDesc[4] = {NULL};
- u32 i;
-
- dprintk(KERN_INFO "SCXLNXConnOpenClientSession(%p)\n", pConn);
-
- /*
- * Initialize the message size with no login data. This will be later
- * adjusted the the cases below
- */
- pMessage->sOpenClientSessionMessage.nMessageSize =
- (sizeof(struct SCX_COMMAND_OPEN_CLIENT_SESSION) - 20
- - sizeof(struct SCX_COMMAND_HEADER))/4;
-
- switch (pMessage->sOpenClientSessionMessage.nLoginType) {
- case SCX_LOGIN_PUBLIC:
- /* Nothing to do */
- break;
-
- case SCX_LOGIN_USER:
- /*
- * Send the EUID of the calling application in the login data.
- * Update message size.
- */
- *(u32 *) &pMessage->sOpenClientSessionMessage.sLoginData =
- current_euid();
-#ifndef CONFIG_ANDROID
- pMessage->sOpenClientSessionMessage.nLoginType =
- (u32) SCX_LOGIN_USER_LINUX_EUID;
-#else
- pMessage->sOpenClientSessionMessage.nLoginType =
- (u32) SCX_LOGIN_USER_ANDROID_EUID;
-#endif
-
- /* Added one word */
- pMessage->sOpenClientSessionMessage.nMessageSize += 1;
- break;
-
- case SCX_LOGIN_GROUP: {
- /* Check requested GID */
- gid_t nRequestedGID =
- *(u32 *) pMessage->sOpenClientSessionMessage.sLoginData;
-
- if (!SCXLNXConnCheckGID(nRequestedGID)) {
- dprintk(KERN_ERR "SCXLNXConnOpenClientSession(%p) "
- "SCX_LOGIN_GROUP: requested GID (0x%x) does "
- "not match real eGID (0x%x)"
- "or any of the supplementary GIDs\n",
- pConn, nRequestedGID, current_egid());
- nError = -EACCES;
- goto error;
- }
-#ifndef CONFIG_ANDROID
- pMessage->sOpenClientSessionMessage.nLoginType =
- SCX_LOGIN_GROUP_LINUX_GID;
-#else
- pMessage->sOpenClientSessionMessage.nLoginType =
- SCX_LOGIN_GROUP_ANDROID_GID;
-#endif
-
- pMessage->sOpenClientSessionMessage.nMessageSize += 1; /* GID */
- break;
- }
-
-#ifndef CONFIG_ANDROID
- case SCX_LOGIN_APPLICATION: {
- /*
- * Compute SHA-1 hash of the application fully-qualified path
- * name. Truncate the hash to 16 bytes and send it as login
- * data. Update message size.
- */
- u8 pSHA1Hash[SHA1_DIGEST_SIZE];
-
- nError = SCXLNXConnHashApplicationPathAndData(pSHA1Hash,
- NULL, 0);
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXConnOpenClientSession: "
- "error in SCXLNXConnHashApplicationPath"
- "AndData\n");
- goto error;
- }
- memcpy(&pMessage->sOpenClientSessionMessage.sLoginData,
- pSHA1Hash, 16);
- pMessage->sOpenClientSessionMessage.nLoginType =
- SCX_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH;
- /* 16 bytes */
- pMessage->sOpenClientSessionMessage.nMessageSize += 4;
- break;
- }
-#else
- case SCX_LOGIN_APPLICATION:
- /*
- * Send the real UID of the calling application in the login
- * data. Update message size.
- */
- *(u32 *) &pMessage->sOpenClientSessionMessage.sLoginData =
- current_uid();
-
- pMessage->sOpenClientSessionMessage.nLoginType =
- (u32) SCX_LOGIN_APPLICATION_ANDROID_UID;
-
- /* Added one word */
- pMessage->sOpenClientSessionMessage.nMessageSize += 1;
- break;
-#endif
-
-#ifndef CONFIG_ANDROID
- case SCX_LOGIN_APPLICATION_USER: {
- /*
- * Compute SHA-1 hash of the concatenation of the application
- * fully-qualified path name and the EUID of the calling
- * application. Truncate the hash to 16 bytes and send it as
- * login data. Update message size.
- */
- u8 pSHA1Hash[SHA1_DIGEST_SIZE];
-
- nError = SCXLNXConnHashApplicationPathAndData(pSHA1Hash,
- (u8 *) &(current_euid()), sizeof(current_euid()));
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXConnOpenClientSession: "
- "error in SCXLNXConnHashApplicationPath"
- "AndData\n");
- goto error;
- }
- memcpy(&pMessage->sOpenClientSessionMessage.sLoginData,
- pSHA1Hash, 16);
- pMessage->sOpenClientSessionMessage.nLoginType =
- SCX_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH;
-
- /* 16 bytes */
- pMessage->sOpenClientSessionMessage.nMessageSize += 4;
-
- break;
- }
-#else
- case SCX_LOGIN_APPLICATION_USER:
- /*
- * Send the real UID and the EUID of the calling application in
- * the login data. Update message size.
- */
- *(u32 *) &pMessage->sOpenClientSessionMessage.sLoginData =
- current_uid();
- *(u32 *) &pMessage->sOpenClientSessionMessage.sLoginData[4] =
- current_euid();
-
- pMessage->sOpenClientSessionMessage.nLoginType =
- SCX_LOGIN_APPLICATION_USER_ANDROID_UID_EUID;
-
- /* Added two words */
- pMessage->sOpenClientSessionMessage.nMessageSize += 2;
- break;
-#endif
-
-#ifndef CONFIG_ANDROID
- case SCX_LOGIN_APPLICATION_GROUP: {
- /*
- * Check requested GID. Compute SHA-1 hash of the concatenation
- * of the application fully-qualified path name and the
- * requested GID. Update message size
- */
- gid_t nRequestedGID;
- u8 pSHA1Hash[SHA1_DIGEST_SIZE];
-
- nRequestedGID = *(u32 *) &pMessage->sOpenClientSessionMessage.
- sLoginData;
-
- if (!SCXLNXConnCheckGID(nRequestedGID)) {
- dprintk(KERN_ERR "SCXLNXConnOpenClientSession(%p) "
- "SCX_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
- "does not match real eGID (0x%x)"
- "or any of the supplementary GIDs\n",
- pConn, nRequestedGID, current_egid());
- nError = -EACCES;
- goto error;
- }
-
- nError = SCXLNXConnHashApplicationPathAndData(pSHA1Hash,
- &nRequestedGID, sizeof(u32));
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXConnOpenClientSession: "
- "error in SCXLNXConnHashApplicationPath"
- "AndData\n");
- goto error;
- }
-
- memcpy(&pMessage->sOpenClientSessionMessage.sLoginData,
- pSHA1Hash, 16);
- pMessage->sOpenClientSessionMessage.nLoginType =
- SCX_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH;
-
- /* 16 bytes */
- pMessage->sOpenClientSessionMessage.nMessageSize += 4;
- break;
- }
-#else
- case SCX_LOGIN_APPLICATION_GROUP: {
- /*
- * Check requested GID. Send the real UID and the requested GID
- * in the login data. Update message size.
- */
- gid_t nRequestedGID;
-
- nRequestedGID = *(u32 *) &pMessage->sOpenClientSessionMessage.
- sLoginData;
-
- if (!SCXLNXConnCheckGID(nRequestedGID)) {
- dprintk(KERN_ERR "SCXLNXConnOpenClientSession(%p) "
- "SCX_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
- "does not match real eGID (0x%x)"
- "or any of the supplementary GIDs\n",
- pConn, nRequestedGID, current_egid());
- nError = -EACCES;
- goto error;
- }
-
- *(u32 *) &pMessage->sOpenClientSessionMessage.sLoginData =
- current_uid();
- *(u32 *) &pMessage->sOpenClientSessionMessage.sLoginData[4] =
- nRequestedGID;
-
- pMessage->sOpenClientSessionMessage.nLoginType =
- SCX_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID;
-
- /* Added two words */
- pMessage->sOpenClientSessionMessage.nMessageSize += 2;
-
- break;
- }
-#endif
-
- case SCX_LOGIN_PRIVILEGED:
- /*
- * Check that calling application either hash EUID=0 or has
- * EGID=0
- */
- if (current_euid() != 0 && current_egid() != 0) {
- dprintk(KERN_ERR "SCXLNXConnOpenClientSession: "
- " user %d, group %d not allowed to open "
- "session with SCX_LOGIN_PRIVILEGED\n",
- current_euid(), current_egid());
- nError = -EACCES;
- goto error;
- }
- pMessage->sOpenClientSessionMessage.nLoginType =
- SCX_LOGIN_PRIVILEGED;
- break;
-
- case SCX_LOGIN_AUTHENTICATION: {
- /*
- * Compute SHA-1 hash of the application binary
- * Send this hash as the login data (20 bytes)
- */
-
- u8 *pHash;
- pHash = &(pMessage->sOpenClientSessionMessage.sLoginData[0]);
-
- nError = SCXLNXConnGetCurrentProcessHash(pHash);
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXConnOpenClientSession: "
- "error in SCXLNXConnGetCurrentProcessHash\n");
- goto error;
- }
- pMessage->sOpenClientSessionMessage.nLoginType =
- SCX_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH;
-
- /* 20 bytes */
- pMessage->sOpenClientSessionMessage.nMessageSize += 5;
- break;
- }
-
- default:
- dprintk(KERN_ERR "SCXLNXConnOpenClientSession: "
- "unknown nLoginType(%08X)\n",
- pMessage->sOpenClientSessionMessage.nLoginType);
- nError = -EOPNOTSUPP;
- goto error;
- }
-
- /* Map the temporary memory references */
- for (i = 0; i < 4; i++) {
- int nParamType;
- nParamType = SCX_GET_PARAM_TYPE(
- pMessage->sOpenClientSessionMessage.nParamTypes, i);
- if ((nParamType & (SCX_PARAM_TYPE_MEMREF_FLAG |
- SCX_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
- == SCX_PARAM_TYPE_MEMREF_FLAG) {
- /* Map temp mem ref */
- nError = SCXLNXConnMapTempShMem(pConn,
- &pMessage->sOpenClientSessionMessage.
- sParams[i].sTempMemref,
- nParamType,
- &pShmemDesc[i]);
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXConnOpenClientSession: "
- "unable to map temporary memory block "
- "(%08X)\n", nError);
- goto error;
- }
- }
- }
-
- /* Fill the handle of the Device Context */
- pMessage->sOpenClientSessionMessage.hDeviceContext =
- pConn->hDeviceContext;
-
- nError = SCXLNXCommSendReceive(
- &pConn->pDevice->sm,
- pMessage,
- pAnswer,
- pConn,
- true);
-
-error:
- /* Unmap the temporary memory references */
- for (i = 0; i < 4; i++)
- if (pShmemDesc[i] != NULL)
- SCXLNXConnUnmapShmem(pConn, pShmemDesc[i], 0);
-
- if (nError != 0)
- dprintk(KERN_ERR "SCXLNXConnOpenClientSession returns %d\n",
- nError);
- else
- dprintk(KERN_ERR "SCXLNXConnOpenClientSession returns "
- "nErrorCode 0x%08X\n",
- pAnswer->sOpenClientSessionAnswer.nErrorCode);
-
- return nError;
-}
-
-
-/*
- * Closes a client session from the Secure World
- */
-int SCXLNXConnCloseClientSession(
- struct SCXLNX_CONNECTION *pConn,
- union SCX_COMMAND_MESSAGE *pMessage,
- union SCX_ANSWER_MESSAGE *pAnswer)
-{
- int nError = 0;
-
- dprintk(KERN_DEBUG "SCXLNXConnCloseClientSession(%p)\n", pConn);
-
- pMessage->sCloseClientSessionMessage.nMessageSize =
- (sizeof(struct SCX_COMMAND_CLOSE_CLIENT_SESSION) -
- sizeof(struct SCX_COMMAND_HEADER)) / 4;
- pMessage->sCloseClientSessionMessage.hDeviceContext =
- pConn->hDeviceContext;
-
- nError = SCXLNXCommSendReceive(
- &pConn->pDevice->sm,
- pMessage,
- pAnswer,
- pConn,
- true);
-
- if (nError != 0)
- dprintk(KERN_ERR "SCXLNXConnCloseClientSession returns %d\n",
- nError);
- else
- dprintk(KERN_ERR "SCXLNXConnCloseClientSession returns "
- "nError 0x%08X\n",
- pAnswer->sCloseClientSessionAnswer.nErrorCode);
-
- return nError;
-}
-
-
-/*
- * Registers a shared memory to the Secure World
- */
-int SCXLNXConnRegisterSharedMemory(
- struct SCXLNX_CONNECTION *pConn,
- union SCX_COMMAND_MESSAGE *pMessage,
- union SCX_ANSWER_MESSAGE *pAnswer)
-{
- int nError = 0;
- struct SCXLNX_SHMEM_DESC *pShmemDesc = NULL;
-
- dprintk(KERN_INFO "SCXLNXConnRegisterSharedMemory(%p) "
- "%p[0x%08X][0x%08x]\n",
- pConn,
- (void *) pMessage->sRegisterSharedMemoryMessage.
- nSharedMemDescriptors[0],
- pMessage->sRegisterSharedMemoryMessage.nSharedMemSize,
- (u32)pMessage->sRegisterSharedMemoryMessage.nMemoryFlags);
-
- nError = SCXLNXConnValidateSharedMemoryBlockAndFlags(
- (void *) pMessage->sRegisterSharedMemoryMessage.
- nSharedMemDescriptors[0],
- pMessage->sRegisterSharedMemoryMessage.nSharedMemSize,
- (u32)pMessage->sRegisterSharedMemoryMessage.nMemoryFlags);
- if (nError != 0)
- goto error;
-
- /* Initialize nMessageSize with no descriptors */
- pMessage->sRegisterSharedMemoryMessage.nMessageSize
- = (sizeof(struct SCX_COMMAND_REGISTER_SHARED_MEMORY) -
- sizeof(struct SCX_COMMAND_HEADER)) / 4;
-
- /* Map the shmem block and update the message */
- if (pMessage->sRegisterSharedMemoryMessage.nSharedMemSize == 0) {
- /* Empty shared mem */
- pMessage->sRegisterSharedMemoryMessage.nSharedMemStartOffset =
- pMessage->sRegisterSharedMemoryMessage.
- nSharedMemDescriptors[0];
- } else {
- u32 nDescriptorCount;
- nError = SCXLNXConnMapShmem(
- pConn,
- pMessage->sRegisterSharedMemoryMessage.
- nSharedMemDescriptors[0],
- pMessage->sRegisterSharedMemoryMessage.nMemoryFlags,
- true,
- pMessage->sRegisterSharedMemoryMessage.
- nSharedMemDescriptors,
- &(pMessage->sRegisterSharedMemoryMessage.
- nSharedMemStartOffset),
- &(pMessage->sRegisterSharedMemoryMessage.
- nSharedMemSize),
- &pShmemDesc,
- &nDescriptorCount);
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXConnRegisterSharedMemory: "
- "unable to map shared memory block\n");
- goto error;
- }
- pMessage->sRegisterSharedMemoryMessage.nMessageSize +=
- nDescriptorCount;
- }
-
- /*
- * write the correct device context handle and the address of the shared
- * memory descriptor in the message
- */
- pMessage->sRegisterSharedMemoryMessage.hDeviceContext =
- pConn->hDeviceContext;
- pMessage->sRegisterSharedMemoryMessage.nBlockID = (u32) pShmemDesc;
-
- /* Send the updated message */
- nError = SCXLNXCommSendReceive(
- &pConn->pDevice->sm,
- pMessage,
- pAnswer,
- pConn,
- true);
-
- if ((nError != 0) ||
- (pAnswer->sRegisterSharedMemoryAnswer.nErrorCode
- != S_SUCCESS)) {
- dprintk(KERN_ERR "SCXLNXConnRegisterSharedMemory: "
- "operation failed. Unmap block\n");
- goto error;
- }
-
- /* Saves the block handle returned by the secure world */
- if (pShmemDesc != NULL)
- pShmemDesc->hIdentifier =
- pAnswer->sRegisterSharedMemoryAnswer.hBlock;
-
- /* successful completion */
- dprintk(KERN_INFO "SCXLNXConnRegisterSharedMemory(%p):"
- " nBlockID=0x%08x hBlock=0x%08x\n",
- pConn, pMessage->sRegisterSharedMemoryMessage.nBlockID,
- pAnswer->sRegisterSharedMemoryAnswer.hBlock);
- return 0;
-
- /* error completion */
-error:
- SCXLNXConnUnmapShmem(
- pConn,
- pShmemDesc,
- 0);
-
- if (nError != 0)
- dprintk(KERN_ERR "SCXLNXConnRegisterSharedMemory returns %d\n",
- nError);
- else
- dprintk(KERN_ERR "SCXLNXConnRegisterSharedMemory returns "
- "nErrorCode 0x%08X\n",
- pAnswer->sRegisterSharedMemoryAnswer.nErrorCode);
-
- return nError;
-}
-
-
-/*
- * Releases a shared memory from the Secure World
- */
-int SCXLNXConnReleaseSharedMemory(
- struct SCXLNX_CONNECTION *pConn,
- union SCX_COMMAND_MESSAGE *pMessage,
- union SCX_ANSWER_MESSAGE *pAnswer)
-{
- int nError = 0;
-
- dprintk(KERN_DEBUG "SCXLNXConnReleaseSharedMemory(%p)\n", pConn);
-
- pMessage->sReleaseSharedMemoryMessage.nMessageSize =
- (sizeof(struct SCX_COMMAND_RELEASE_SHARED_MEMORY) -
- sizeof(struct SCX_COMMAND_HEADER)) / 4;
- pMessage->sReleaseSharedMemoryMessage.hDeviceContext =
- pConn->hDeviceContext;
-
- nError = SCXLNXCommSendReceive(
- &pConn->pDevice->sm,
- pMessage,
- pAnswer,
- pConn,
- true);
-
- if ((nError != 0) ||
- (pAnswer->sReleaseSharedMemoryAnswer.nErrorCode != S_SUCCESS))
- goto error;
-
- /* Use nBlockID to get back the pointer to pShmemDesc */
- SCXLNXConnUnmapShmem(
- pConn,
- (struct SCXLNX_SHMEM_DESC *)
- pAnswer->sReleaseSharedMemoryAnswer.nBlockID,
- 0);
-
- /* successful completion */
- dprintk(KERN_INFO "SCXLNXConnReleaseSharedMemory(%p):"
- " nBlockID=0x%08x hBlock=0x%08x\n",
- pConn, pAnswer->sReleaseSharedMemoryAnswer.nBlockID,
- pMessage->sReleaseSharedMemoryMessage.hBlock);
- return 0;
-
-
-error:
- if (nError != 0)
- dprintk(KERN_ERR "SCXLNXConnReleaseSharedMemory returns %d\n",
- nError);
- else
- dprintk(KERN_ERR "SCXLNXConnReleaseSharedMemory returns "
- "nChannelStatus 0x%08X\n",
- pAnswer->sReleaseSharedMemoryAnswer.nErrorCode);
-
- return nError;
-
-}
-
-
-/*
- * Invokes a client command to the Secure World
- */
-int SCXLNXConnInvokeClientCommand(
- struct SCXLNX_CONNECTION *pConn,
- union SCX_COMMAND_MESSAGE *pMessage,
- union SCX_ANSWER_MESSAGE *pAnswer)
-{
- int nError = 0;
- struct SCXLNX_SHMEM_DESC *pShmemDesc[4] = {NULL};
- int i;
-
- dprintk(KERN_INFO "SCXLNXConnInvokeClientCommand(%p)\n", pConn);
-
- pMessage->sReleaseSharedMemoryMessage.nMessageSize =
- (sizeof(struct SCX_COMMAND_INVOKE_CLIENT_COMMAND) -
- sizeof(struct SCX_COMMAND_HEADER)) / 4;
-
-#ifdef CONFIG_TF_ZEBRA
- nError = SCXPublicCryptoTryShortcutedUpdate(pConn,
- (struct SCX_COMMAND_INVOKE_CLIENT_COMMAND *) pMessage,
- (struct SCX_ANSWER_INVOKE_CLIENT_COMMAND *) pAnswer);
- if (nError == 0)
- return nError;
-#endif
-
- /* Map the tmprefs */
- for (i = 0; i < 4; i++) {
- int nParamType = SCX_GET_PARAM_TYPE(
- pMessage->sInvokeClientCommandMessage.nParamTypes, i);
-
- if ((nParamType & (SCX_PARAM_TYPE_MEMREF_FLAG |
- SCX_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
- == SCX_PARAM_TYPE_MEMREF_FLAG) {
- /* A temporary memref: map it */
- nError = SCXLNXConnMapTempShMem(pConn,
- &pMessage->sInvokeClientCommandMessage.
- sParams[i].sTempMemref,
- nParamType, &pShmemDesc[i]);
- if (nError != 0) {
- dprintk(KERN_ERR
- "SCXLNXConnInvokeClientCommand: "
- "unable to map temporary memory "
- "block\n (%08X)", nError);
- goto error;
- }
- }
- }
-
- pMessage->sInvokeClientCommandMessage.hDeviceContext =
- pConn->hDeviceContext;
-
- nError = SCXLNXCommSendReceive(&pConn->pDevice->sm, pMessage,
- pAnswer, pConn, true);
-
-error:
- /* Unmap de temp mem refs */
- for (i = 0; i < 4; i++) {
- if (pShmemDesc[i] != NULL) {
- dprintk(KERN_INFO "SCXLNXConnInvokeClientCommand: "
- "UnMapTempMemRef %d\n ", i);
-
- SCXLNXConnUnmapShmem(pConn, pShmemDesc[i], 0);
- }
- }
-
- if (nError != 0)
- dprintk(KERN_ERR "SCXLNXConnInvokeClientCommand returns %d\n",
- nError);
- else
- dprintk(KERN_ERR "SCXLNXConnInvokeClientCommand returns "
- "nErrorCode 0x%08X\n",
- pAnswer->sInvokeClientCommandAnswer.nErrorCode);
-
- return nError;
-}
-
-
-/*
- * Cancels a client command from the Secure World
- */
-int SCXLNXConnCancelClientCommand(
- struct SCXLNX_CONNECTION *pConn,
- union SCX_COMMAND_MESSAGE *pMessage,
- union SCX_ANSWER_MESSAGE *pAnswer)
-{
- int nError = 0;
-
- dprintk(KERN_DEBUG "SCXLNXConnCancelClientCommand(%p)\n", pConn);
-
- pMessage->sCancelClientOperationMessage.hDeviceContext =
- pConn->hDeviceContext;
- pMessage->sCancelClientOperationMessage.nMessageSize =
- (sizeof(struct SCX_COMMAND_CANCEL_CLIENT_OPERATION) -
- sizeof(struct SCX_COMMAND_HEADER)) / 4;
-
- nError = SCXLNXCommSendReceive(
- &pConn->pDevice->sm,
- pMessage,
- pAnswer,
- pConn,
- true);
-
- if ((nError != 0) ||
- (pAnswer->sCancelClientOperationAnswer.nErrorCode != S_SUCCESS))
- goto error;
-
-
- /* successful completion */
- return 0;
-
-error:
- if (nError != 0)
- dprintk(KERN_ERR "SCXLNXConnCancelClientCommand returns %d\n",
- nError);
- else
- dprintk(KERN_ERR "SCXLNXConnCancelClientCommand returns "
- "nChannelStatus 0x%08X\n",
- pAnswer->sCancelClientOperationAnswer.nErrorCode);
-
- return nError;
-}
-
-
-
-/*
- * Destroys a device context from the Secure World
- */
-int SCXLNXConnDestroyDeviceContext(
- struct SCXLNX_CONNECTION *pConn)
-{
- int nError;
- /*
- * AFY: better use the specialized SCX_COMMAND_DESTROY_DEVICE_CONTEXT
- * structure: this will save stack
- */
- union SCX_COMMAND_MESSAGE sMessage;
- union SCX_ANSWER_MESSAGE sAnswer;
-
- dprintk(KERN_INFO "SCXLNXConnDestroyDeviceContext(%p)\n", pConn);
-
- BUG_ON(pConn == NULL);
-
- sMessage.sHeader.nMessageType = SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
- sMessage.sHeader.nMessageSize =
- (sizeof(struct SCX_COMMAND_DESTROY_DEVICE_CONTEXT) -
- sizeof(struct SCX_COMMAND_HEADER))/sizeof(u32);
-
- /*
- * fill in the device context handler
- * it is guarantied that the first shared memory descriptor describes
- * the device context
- */
- sMessage.sDestroyDeviceContextMessage.hDeviceContext =
- pConn->hDeviceContext;
-
- nError = SCXLNXCommSendReceive(
- &pConn->pDevice->sm,
- &sMessage,
- &sAnswer,
- pConn,
- false);
-
- if ((nError != 0) ||
- (sAnswer.sDestroyDeviceContextAnswer.nErrorCode != S_SUCCESS))
- goto error;
-
- spin_lock(&(pConn->stateLock));
- pConn->nState = SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT;
- spin_unlock(&(pConn->stateLock));
-
- /* successful completion */
- dprintk(KERN_INFO "SCXLNXConnDestroyDeviceContext(%p)\n",
- pConn);
- return 0;
-
-error:
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXConnDestroyDeviceContext failed with "
- "error %d\n", nError);
- } else {
- dprintk(KERN_ERR "SCXLNXConnDestroyDeviceContext failed with "
- "nErrorCode 0x%08X\n",
- sAnswer.sDestroyDeviceContextAnswer.nErrorCode);
- if (sAnswer.sDestroyDeviceContextAnswer.nErrorCode ==
- S_ERROR_OUT_OF_MEMORY)
- nError = -ENOMEM;
- else
- nError = -EFAULT;
- }
-
- return nError;
-}
-
-
-/*----------------------------------------------------------------------------
- * Connection initialization and cleanup operations
- *----------------------------------------------------------------------------*/
-
-/*
- * Opens a connection to the specified device.
- *
- * The placeholder referenced by ppConn is set to the address of the
- * new connection; it is set to NULL upon failure.
- *
- * Returns zero upon successful completion, or an appropriate error code upon
- * failure.
- */
-int SCXLNXConnOpen(struct SCXLNX_DEVICE *pDevice,
- struct file *file,
- struct SCXLNX_CONNECTION **ppConn)
-{
- int nError;
- struct SCXLNX_CONNECTION *pConn = NULL;
-
- dprintk(KERN_INFO "SCXLNXConnOpen(%p, %p)\n", file, ppConn);
-
- /*
- * Allocate and initialize the connection.
- * kmalloc only allocates sizeof(*pConn) virtual memory
- */
- pConn = (struct SCXLNX_CONNECTION *) internal_kmalloc(sizeof(*pConn),
- GFP_KERNEL);
- if (pConn == NULL) {
- printk(KERN_ERR "SCXLNXConnOpen(): "
- "Out of memory for connection!\n");
- nError = -ENOMEM;
- goto error;
- }
-
- memset(pConn, 0, sizeof(*pConn));
-
- INIT_LIST_HEAD(&(pConn->list));
- pConn->nState = SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT;
- pConn->pDevice = pDevice;
- spin_lock_init(&(pConn->stateLock));
- atomic_set(&(pConn->nPendingOpCounter), 0);
-
- /*
- * Initialize the shared memory
- */
- nError = SCXLNXConnInitSharedMemory(pConn);
- if (nError != 0)
- goto error;
-
-#ifdef CONFIG_TF_ZEBRA
- /*
- * Initialize CUS specifics
- */
- SCXPublicCryptoInitDeviceContext(pConn);
-#endif
-
- /*
- * Successful completion.
- */
-
- *ppConn = pConn;
-
- dprintk(KERN_INFO "SCXLNXConnOpen(): Success (pConn=%p)\n", pConn);
- return 0;
-
- /*
- * Error handling.
- */
-
-error:
- dprintk(KERN_ERR "SCXLNXConnOpen(): Failure (error %d)\n", nError);
- /* Deallocate the descriptor pages if necessary */
- internal_kfree(pConn);
- *ppConn = NULL;
- return nError;
-}
-
-
-/*
- * Closes the specified connection.
- *
- * Upon return, the connection referenced by pConn has been destroyed and cannot
- * be used anymore.
- *
- * This function does nothing if pConn is set to NULL.
- */
-void SCXLNXConnClose(struct SCXLNX_CONNECTION *pConn)
-{
- int nError;
- enum SCXLNX_CONN_STATE nState;
-
- dprintk(KERN_DEBUG "SCXLNXConnClose(%p)\n", pConn);
-
- if (pConn == NULL)
- return;
-
- /*
- * Assumption: Linux guarantees that no other operation is in progress
- * and that no other operation will be started when close is called
- */
- BUG_ON(atomic_read(&(pConn->nPendingOpCounter)) != 0);
-
- /*
- * Exchange a Destroy Device Context message if needed.
- */
- spin_lock(&(pConn->stateLock));
- nState = pConn->nState;
- spin_unlock(&(pConn->stateLock));
- if (nState == SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT) {
- /*
- * A DestroyDeviceContext operation was not performed. Do it
- * now.
- */
- nError = SCXLNXConnDestroyDeviceContext(pConn);
- if (nError != 0)
- /* avoid cleanup if destroy device context fails */
- goto error;
- }
-
- /*
- * Clean up the shared memory
- */
- SCXLNXConnCleanupSharedMemory(pConn);
-
- internal_kfree(pConn);
-
- return;
-
-error:
- dprintk(KERN_DEBUG "SCXLNXConnClose(%p) failed with error code %d\n",
- pConn, nError);
-}
-
diff --git a/security/tf_driver/scxlnx_conn.h b/security/tf_driver/scxlnx_conn.h
deleted file mode 100644
index f080f4ef8027..000000000000
--- a/security/tf_driver/scxlnx_conn.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2006-2010 Trusted Logic S.A.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
- */
-
-#ifndef __SCXLNX_CONN_H__
-#define __SCXLNX_CONN_H__
-
-#include "scxlnx_defs.h"
-
-/*
- * Returns a pointer to the connection referenced by the
- * specified file.
- */
-static inline struct SCXLNX_CONNECTION *SCXLNXConnFromFile(
- struct file *file)
-{
- return file->private_data;
-}
-
-/*----------------------------------------------------------------------------
- * Connection operations to the Secure World
- *----------------------------------------------------------------------------*/
-
-int SCXLNXConnCreateDeviceContext(
- struct SCXLNX_CONNECTION *pConn);
-
-int SCXLNXConnDestroyDeviceContext(
- struct SCXLNX_CONNECTION *pConn);
-
-int SCXLNXConnOpenClientSession(
- struct SCXLNX_CONNECTION *pConn,
- union SCX_COMMAND_MESSAGE *pMessage,
- union SCX_ANSWER_MESSAGE *pAnswer);
-
-int SCXLNXConnCloseClientSession(
- struct SCXLNX_CONNECTION *pConn,
- union SCX_COMMAND_MESSAGE *pMessage,
- union SCX_ANSWER_MESSAGE *pAnswer);
-
-int SCXLNXConnRegisterSharedMemory(
- struct SCXLNX_CONNECTION *pConn,
- union SCX_COMMAND_MESSAGE *pMessage,
- union SCX_ANSWER_MESSAGE *pAnswer);
-
-int SCXLNXConnReleaseSharedMemory(
- struct SCXLNX_CONNECTION *pConn,
- union SCX_COMMAND_MESSAGE *pMessage,
- union SCX_ANSWER_MESSAGE *pAnswer);
-
-int SCXLNXConnInvokeClientCommand(
- struct SCXLNX_CONNECTION *pConn,
- union SCX_COMMAND_MESSAGE *pMessage,
- union SCX_ANSWER_MESSAGE *pAnswer);
-
-int SCXLNXConnCancelClientCommand(
- struct SCXLNX_CONNECTION *pConn,
- union SCX_COMMAND_MESSAGE *pMessage,
- union SCX_ANSWER_MESSAGE *pAnswer);
-
-int SCXLNXConnCheckMessageValidity(
- struct SCXLNX_CONNECTION *pConn,
- union SCX_COMMAND_MESSAGE *pMessage);
-
-/*----------------------------------------------------------------------------
- * Connection initialization and cleanup operations
- *----------------------------------------------------------------------------*/
-
-int SCXLNXConnOpen(struct SCXLNX_DEVICE *pDevice,
- struct file *file,
- struct SCXLNX_CONNECTION **ppConn);
-
-void SCXLNXConnClose(
- struct SCXLNX_CONNECTION *pConn);
-
-
-#endif /* !defined(__SCXLNX_CONN_H__) */
diff --git a/security/tf_driver/scxlnx_device.c b/security/tf_driver/scxlnx_device.c
deleted file mode 100644
index 4c9386714586..000000000000
--- a/security/tf_driver/scxlnx_device.c
+++ /dev/null
@@ -1,697 +0,0 @@
-/*
- * Copyright (c) 2006-2010 Trusted Logic S.A.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
- */
-
-#include <asm/atomic.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/page-flags.h>
-#include <linux/pm.h>
-#include <linux/sysdev.h>
-#include <linux/vmalloc.h>
-#include <linux/signal.h>
-#ifdef CONFIG_ANDROID
-#include <linux/device.h>
-#endif
-
-#include "scx_protocol.h"
-#include "scxlnx_defs.h"
-#include "scxlnx_util.h"
-#include "scxlnx_conn.h"
-#include "scxlnx_comm.h"
-#ifdef CONFIG_TF_ZEBRA
-#include <plat/cpu.h>
-#include "scxlnx_zebra.h"
-#endif
-
-#include "s_version.h"
-
-/*----------------------------------------------------------------------------
- * Forward Declarations
- *----------------------------------------------------------------------------*/
-
-/*
- * Creates and registers the device to be managed by the specified driver.
- *
- * Returns zero upon successful completion, or an appropriate error code upon
- * failure.
- */
-static int SCXLNXDeviceRegister(void);
-
-
-/*
- * Implements the device Open callback.
- */
-static int SCXLNXDeviceOpen(
- struct inode *inode,
- struct file *file);
-
-
-/*
- * Implements the device Release callback.
- */
-static int SCXLNXDeviceRelease(
- struct inode *inode,
- struct file *file);
-
-
-/*
- * Implements the device ioctl callback.
- */
-static long SCXLNXDeviceIoctl(
- struct file *file,
- unsigned int ioctl_num,
- unsigned long ioctl_param);
-
-
-/*
- * Implements the device shutdown callback.
- */
-static int SCXLNXDeviceShutdown(
- struct sys_device *sysdev);
-
-
-/*
- * Implements the device suspend callback.
- */
-static int SCXLNXDeviceSuspend(
- struct sys_device *sysdev,
- pm_message_t state);
-
-
-/*
- * Implements the device resume callback.
- */
-static int SCXLNXDeviceResume(
- struct sys_device *sysdev);
-
-
-/*---------------------------------------------------------------------------
- * Module Parameters
- *---------------------------------------------------------------------------*/
-
-/*
- * The device major number used to register a unique character device driver.
- * Let the default value be 122
- */
-static int device_major_number = 122;
-
-module_param(device_major_number, int, 0000);
-MODULE_PARM_DESC(device_major_number,
- "The device major number used to register a unique character "
- "device driver");
-
-#ifdef CONFIG_TF_TRUSTZONE
-/**
- * The softint interrupt line used by the Secure World.
- */
-static int soft_interrupt = -1;
-
-module_param(soft_interrupt, int, 0000);
-MODULE_PARM_DESC(soft_interrupt,
- "The softint interrupt line used by the Secure world");
-#endif
-
-#ifdef CONFIG_ANDROID
-static struct class *tf_class;
-#endif
-
-/*----------------------------------------------------------------------------
- * Global Variables
- *----------------------------------------------------------------------------*/
-
-/*
- * tf_driver character device definitions.
- * read and write methods are not defined
- * and will return an error if used by user space
- */
-static const struct file_operations g_SCXLNXDeviceFileOps = {
- .owner = THIS_MODULE,
- .open = SCXLNXDeviceOpen,
- .release = SCXLNXDeviceRelease,
- .unlocked_ioctl = SCXLNXDeviceIoctl,
- .llseek = no_llseek,
-};
-
-
-static struct sysdev_class g_SCXLNXDeviceSysClass = {
- .name = SCXLNX_DEVICE_BASE_NAME,
- .shutdown = SCXLNXDeviceShutdown,
- .suspend = SCXLNXDeviceSuspend,
- .resume = SCXLNXDeviceResume,
-};
-
-/* The single device supported by this driver */
-static struct SCXLNX_DEVICE g_SCXLNXDevice = {0, };
-
-/*----------------------------------------------------------------------------
- * Implementations
- *----------------------------------------------------------------------------*/
-
-struct SCXLNX_DEVICE *SCXLNXGetDevice(void)
-{
- return &g_SCXLNXDevice;
-}
-
-/*
- * displays the driver stats
- */
-static ssize_t kobject_show(struct kobject *pkobject,
- struct attribute *pattributes, char *buf)
-{
- struct SCXLNX_DEVICE_STATS *pDeviceStats = &g_SCXLNXDevice.sDeviceStats;
- u32 nStatPagesAllocated;
- u32 nStatPagesLocked;
- u32 nStatMemoriesAllocated;
-
- nStatMemoriesAllocated =
- atomic_read(&(pDeviceStats->stat_memories_allocated));
- nStatPagesAllocated =
- atomic_read(&(pDeviceStats->stat_pages_allocated));
- nStatPagesLocked = atomic_read(&(pDeviceStats->stat_pages_locked));
-
- /*
- * AFY: could we add the number of context switches (call to the SMI
- * instruction)
- */
-
- return snprintf(buf, PAGE_SIZE,
- "stat.memories.allocated: %d\n"
- "stat.pages.allocated: %d\n"
- "stat.pages.locked: %d\n",
- nStatMemoriesAllocated,
- nStatPagesAllocated,
- nStatPagesLocked);
-}
-
-static const struct sysfs_ops kobj_sysfs_operations = {
- .show = kobject_show,
-};
-
-/*----------------------------------------------------------------------------*/
-
-/*
- * First routine called when the kernel module is loaded
- */
-static int __init SCXLNXDeviceRegister(void)
-{
- int nError;
- struct SCXLNX_DEVICE *pDevice = &g_SCXLNXDevice;
- struct SCXLNX_DEVICE_STATS *pDeviceStats = &pDevice->sDeviceStats;
-
- dprintk(KERN_INFO "SCXLNXDeviceRegister()\n");
-
-#ifdef CONFIG_TF_ZEBRA
- nError = SCXLNXCtrlDeviceInit();
- if (nError <= 0)
- return nError;
-#endif
-
- /*
- * Initialize the device
- */
- pDevice->nDevNum = MKDEV(device_major_number,
- SCXLNX_DEVICE_MINOR_NUMBER);
- cdev_init(&pDevice->cdev, &g_SCXLNXDeviceFileOps);
- pDevice->cdev.owner = THIS_MODULE;
-
- pDevice->sysdev.id = 0;
- pDevice->sysdev.cls = &g_SCXLNXDeviceSysClass;
-
- INIT_LIST_HEAD(&pDevice->conns);
- spin_lock_init(&pDevice->connsLock);
-
- /* register the sysfs object driver stats */
- pDeviceStats->kobj_type.sysfs_ops = &kobj_sysfs_operations;
-
- pDeviceStats->kobj_stat_attribute.name = "info";
- pDeviceStats->kobj_stat_attribute.mode = S_IRUGO;
- pDeviceStats->kobj_attribute_list[0] =
- &pDeviceStats->kobj_stat_attribute;
-
- pDeviceStats->kobj_type.default_attrs =
- pDeviceStats->kobj_attribute_list,
- kobject_init_and_add(&(pDeviceStats->kobj),
- &(pDeviceStats->kobj_type), NULL, "%s",
- SCXLNX_DEVICE_BASE_NAME);
-
- /*
- * Register the system device.
- */
-
- nError = sysdev_class_register(&g_SCXLNXDeviceSysClass);
- if (nError != 0) {
- printk(KERN_ERR "SCXLNXDeviceRegister():"
- " sysdev_class_register failed (error %d)!\n",
- nError);
- goto sysdev_class_register_failed;
- }
-
- nError = sysdev_register(&pDevice->sysdev);
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXDeviceRegister(): "
- "sysdev_register failed (error %d)!\n",
- nError);
- goto sysdev_register_failed;
- }
-
- /*
- * Register the char device.
- */
- printk(KERN_INFO "Registering char device %s (%u:%u)\n",
- SCXLNX_DEVICE_BASE_NAME,
- MAJOR(pDevice->nDevNum),
- MINOR(pDevice->nDevNum));
- nError = register_chrdev_region(pDevice->nDevNum, 1,
- SCXLNX_DEVICE_BASE_NAME);
- if (nError != 0) {
- printk(KERN_ERR "SCXLNXDeviceRegister():"
- " register_chrdev_region failed (error %d)!\n",
- nError);
- goto register_chrdev_region_failed;
- }
-
- nError = cdev_add(&pDevice->cdev, pDevice->nDevNum, 1);
- if (nError != 0) {
- printk(KERN_ERR "SCXLNXDeviceRegister(): "
- "cdev_add failed (error %d)!\n",
- nError);
- goto cdev_add_failed;
- }
-
- /*
- * Initialize the communication with the Secure World.
- */
-#ifdef CONFIG_TF_TRUSTZONE
- pDevice->sm.nSoftIntIrq = soft_interrupt;
-#endif
- nError = SCXLNXCommInit(&g_SCXLNXDevice.sm);
- if (nError != S_SUCCESS) {
- dprintk(KERN_ERR "SCXLNXDeviceRegister(): "
- "SCXLNXCommInit failed (error %d)!\n",
- nError);
- goto init_failed;
- }
-
-#ifdef CONFIG_ANDROID
- tf_class = class_create(THIS_MODULE, SCXLNX_DEVICE_BASE_NAME);
- device_create(tf_class, NULL,
- pDevice->nDevNum,
- NULL, SCXLNX_DEVICE_BASE_NAME);
-#endif
-
-#ifdef CONFIG_TF_ZEBRA
- /*
- * Initializes the /dev/tf_ctrl device node.
- */
- nError = SCXLNXCtrlDeviceRegister();
- if (nError)
- goto init_failed;
-#endif
-
-#ifdef CONFIG_BENCH_SECURE_CYCLE
- runBogoMIPS();
- addressCacheProperty((unsigned long) &SCXLNXDeviceRegister);
-#endif
- /*
- * Successful completion.
- */
-
- dprintk(KERN_INFO "SCXLNXDeviceRegister(): Success\n");
- return 0;
-
- /*
- * Error: undo all operations in the reverse order
- */
-init_failed:
- cdev_del(&pDevice->cdev);
-cdev_add_failed:
- unregister_chrdev_region(pDevice->nDevNum, 1);
-register_chrdev_region_failed:
- sysdev_unregister(&(pDevice->sysdev));
-sysdev_register_failed:
- sysdev_class_unregister(&g_SCXLNXDeviceSysClass);
-sysdev_class_register_failed:
- kobject_del(&g_SCXLNXDevice.sDeviceStats.kobj);
-
- dprintk(KERN_INFO "SCXLNXDeviceRegister(): Failure (error %d)\n",
- nError);
- return nError;
-}
-
-/*----------------------------------------------------------------------------*/
-
-static int SCXLNXDeviceOpen(struct inode *inode, struct file *file)
-{
- int nError;
- struct SCXLNX_DEVICE *pDevice = &g_SCXLNXDevice;
- struct SCXLNX_CONNECTION *pConn = NULL;
-
- dprintk(KERN_INFO "SCXLNXDeviceOpen(%u:%u, %p)\n",
- imajor(inode), iminor(inode), file);
-
- /* Dummy lseek for non-seekable driver */
- nError = nonseekable_open(inode, file);
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXDeviceOpen(%p): "
- "nonseekable_open failed (error %d)!\n",
- file, nError);
- goto error;
- }
-
-#ifndef CONFIG_ANDROID
- /*
- * Check file flags. We only autthorize the O_RDWR access
- */
- if (file->f_flags != O_RDWR) {
- dprintk(KERN_ERR "SCXLNXDeviceOpen(%p): "
- "Invalid access mode %u\n",
- file, file->f_flags);
- nError = -EACCES;
- goto error;
- }
-#endif
-
- /*
- * Open a new connection.
- */
-
- nError = SCXLNXConnOpen(pDevice, file, &pConn);
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXDeviceOpen(%p): "
- "SCXLNXConnOpen failed (error %d)!\n",
- file, nError);
- goto error;
- }
-
- /*
- * Attach the connection to the device.
- */
- spin_lock(&(pDevice->connsLock));
- list_add(&(pConn->list), &(pDevice->conns));
- spin_unlock(&(pDevice->connsLock));
-
- file->private_data = pConn;
-
- /*
- * Send the CreateDeviceContext command to the secure
- */
- nError = SCXLNXConnCreateDeviceContext(pConn);
- if (nError != 0) {
- dprintk(KERN_ERR "SCXLNXDeviceOpen(%p): "
- "SCXLNXConnCreateDeviceContext failed (error %d)!\n",
- file, nError);
- goto error1;
- }
-
- /*
- * Successful completion.
- */
-
- dprintk(KERN_INFO "SCXLNXDeviceOpen(%p): Success (pConn=%p)\n",
- file, pConn);
- return 0;
-
- /*
- * Error handling.
- */
-
-error1:
- SCXLNXConnClose(pConn);
-error:
- dprintk(KERN_INFO "SCXLNXDeviceOpen(%p): Failure (error %d)\n",
- file, nError);
- return nError;
-}
-
-/*----------------------------------------------------------------------------*/
-
-static int SCXLNXDeviceRelease(struct inode *inode, struct file *file)
-{
- struct SCXLNX_CONNECTION *pConn;
-
- dprintk(KERN_INFO "SCXLNXDeviceRelease(%u:%u, %p)\n",
- imajor(inode), iminor(inode), file);
-
- pConn = SCXLNXConnFromFile(file);
- spin_lock(&g_SCXLNXDevice.connsLock);
- list_del(&pConn->list);
- spin_unlock(&g_SCXLNXDevice.connsLock);
- SCXLNXConnClose(pConn);
-
- dprintk(KERN_INFO "SCXLNXDeviceRelease(%p): Success\n", file);
- return 0;
-}
-
-/*----------------------------------------------------------------------------*/
-
-static long SCXLNXDeviceIoctl(struct file *file, unsigned int ioctl_num,
- unsigned long ioctl_param)
-{
- int nResult = S_SUCCESS;
- struct SCXLNX_CONNECTION *pConn;
- union SCX_COMMAND_MESSAGE sMessage;
- struct SCX_COMMAND_HEADER sCommandHeader;
- union SCX_ANSWER_MESSAGE sAnswer;
- u32 nCommandSize;
- u32 nAnswerSize;
- void *pUserAnswer;
-
- dprintk(KERN_INFO "SCXLNXDeviceIoctl(%p, %u, %p)\n",
- file, ioctl_num, (void *) ioctl_param);
-
- switch (ioctl_num) {
- case IOCTL_SCX_GET_VERSION:
- /* ioctl is asking for the driver interface version */
- nResult = SCX_DRIVER_INTERFACE_VERSION;
- goto exit;
-
- case IOCTL_SCX_EXCHANGE:
- /*
- * ioctl is asking to perform a message exchange with the Secure
- * Module
- */
-
- /*
- * Make a local copy of the data from the user application
- * This routine checks the data is readable
- *
- * Get the header first.
- */
- if (copy_from_user(&sCommandHeader,
- (struct SCX_COMMAND_HEADER *)ioctl_param,
- sizeof(struct SCX_COMMAND_HEADER))) {
- dprintk(KERN_ERR "SCXLNXDeviceIoctl(%p): "
- "Cannot access ioctl parameter %p\n",
- file, (void *) ioctl_param);
- nResult = -EFAULT;
- goto exit;
- }
-
- /* size in words of u32 */
- nCommandSize = sCommandHeader.nMessageSize +
- sizeof(struct SCX_COMMAND_HEADER)/sizeof(u32);
- if (nCommandSize > sizeof(sMessage)/sizeof(u32)) {
- dprintk(KERN_ERR "SCXLNXDeviceIoctl(%p): "
- "Buffer overflow: too many bytes to copy %d\n",
- file, nCommandSize);
- nResult = -EFAULT;
- goto exit;
- }
-
- if (copy_from_user(&sMessage,
- (union SCX_COMMAND_MESSAGE *)ioctl_param,
- nCommandSize * sizeof(u32))) {
- dprintk(KERN_ERR "SCXLNXDeviceIoctl(%p): "
- "Cannot access ioctl parameter %p\n",
- file, (void *) ioctl_param);
- nResult = -EFAULT;
- goto exit;
- }
-
- pConn = SCXLNXConnFromFile(file);
- BUG_ON(pConn == NULL);
-
- /*
- * The answer memory space address is in the nOperationID field
- */
- pUserAnswer = (void *) sMessage.sHeader.nOperationID;
-
- atomic_inc(&(pConn->nPendingOpCounter));
-
- dprintk(KERN_WARNING "SCXLNXDeviceIoctl(%p): "
- "Sending message type 0x%08x\n",
- file, sMessage.sHeader.nMessageType);
-
- switch (sMessage.sHeader.nMessageType) {
- case SCX_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
- nResult = SCXLNXConnOpenClientSession(pConn,
- &sMessage, &sAnswer);
- break;
-
- case SCX_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
- nResult = SCXLNXConnCloseClientSession(pConn,
- &sMessage, &sAnswer);
- break;
-
- case SCX_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
- nResult = SCXLNXConnRegisterSharedMemory(pConn,
- &sMessage, &sAnswer);
- break;
-
- case SCX_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
- nResult = SCXLNXConnReleaseSharedMemory(pConn,
- &sMessage, &sAnswer);
- break;
-
- case SCX_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
- nResult = SCXLNXConnInvokeClientCommand(pConn,
- &sMessage, &sAnswer);
- break;
-
- case SCX_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
- nResult = SCXLNXConnCancelClientCommand(pConn,
- &sMessage, &sAnswer);
- break;
-
- default:
- dprintk(KERN_ERR "SCXLNXDeviceIoctlExchange(%p): "
- "Incorrect message type (0x%08x)!\n",
- pConn, sMessage.sHeader.nMessageType);
- nResult = -EOPNOTSUPP;
- break;
- }
-
- atomic_dec(&(pConn->nPendingOpCounter));
-
- if (nResult != 0) {
- dprintk(KERN_WARNING "SCXLNXDeviceIoctl(%p): "
- "Operation returning error code 0x%08x)!\n",
- file, nResult);
- goto exit;
- }
-
- /*
- * Copy the answer back to the user space application.
- * The driver does not check this field, only copy back to user
- * space the data handed over by Secure World
- */
- nAnswerSize = sAnswer.sHeader.nMessageSize +
- sizeof(struct SCX_ANSWER_HEADER)/sizeof(u32);
- if (copy_to_user(pUserAnswer,
- &sAnswer, nAnswerSize * sizeof(u32))) {
- dprintk(KERN_WARNING "SCXLNXDeviceIoctl(%p): "
- "Failed to copy back the full command "
- "answer to %p\n", file, pUserAnswer);
- nResult = -EFAULT;
- goto exit;
- }
-
- /* successful completion */
- dprintk(KERN_INFO "SCXLNXDeviceIoctl(%p): Success\n", file);
- break;
-
- case IOCTL_SCX_GET_DESCRIPTION: {
- /* ioctl asking for the version information buffer */
- struct SCX_VERSION_INFORMATION_BUFFER *pInfoBuffer;
-
- dprintk(KERN_INFO "IOCTL_SCX_GET_DESCRIPTION:(%p, %u, %p)\n",
- file, ioctl_num, (void *) ioctl_param);
-
- pInfoBuffer =
- ((struct SCX_VERSION_INFORMATION_BUFFER *) ioctl_param);
-
- dprintk(KERN_INFO "IOCTL_SCX_GET_DESCRIPTION1: "
- "sDriverDescription=\"%64s\"\n", S_VERSION_STRING);
-
- if (copy_to_user(pInfoBuffer->sDriverDescription,
- S_VERSION_STRING,
- strlen(S_VERSION_STRING) + 1)) {
- dprintk(KERN_ERR "SCXLNXDeviceIoctl(%p): "
- "Fail to copy back the driver description "
- "to %p\n",
- file, pInfoBuffer->sDriverDescription);
- nResult = -EFAULT;
- goto exit;
- }
-
- dprintk(KERN_INFO "IOCTL_SCX_GET_DESCRIPTION2: "
- "sSecureWorldDescription=\"%64s\"\n",
- SCXLNXCommGetDescription(&g_SCXLNXDevice.sm));
-
- if (copy_to_user(pInfoBuffer->sSecureWorldDescription,
- SCXLNXCommGetDescription(&g_SCXLNXDevice.sm),
- SCX_DESCRIPTION_BUFFER_LENGTH)) {
- dprintk(KERN_WARNING "SCXLNXDeviceIoctl(%p): "
- "Failed to copy back the secure world "
- "description to %p\n",
- file, pInfoBuffer->sSecureWorldDescription);
- nResult = -EFAULT;
- goto exit;
- }
- break;
- }
-
- default:
- dprintk(KERN_ERR "SCXLNXDeviceIoctl(%p): "
- "Unknown IOCTL code 0x%08x!\n",
- file, ioctl_num);
- nResult = -EOPNOTSUPP;
- goto exit;
- }
-
-exit:
- return nResult;
-}
-
-/*----------------------------------------------------------------------------*/
-
-static int SCXLNXDeviceShutdown(struct sys_device *sysdev)
-{
-
- return SCXLNXCommPowerManagement(&g_SCXLNXDevice.sm,
- SCXLNX_POWER_OPERATION_SHUTDOWN);
-}
-
-/*----------------------------------------------------------------------------*/
-
-static int SCXLNXDeviceSuspend(struct sys_device *sysdev, pm_message_t state)
-{
- printk(KERN_INFO "SCXLNXDeviceSuspend: Enter\n");
- return SCXLNXCommPowerManagement(&g_SCXLNXDevice.sm,
- SCXLNX_POWER_OPERATION_HIBERNATE);
-}
-
-
-/*----------------------------------------------------------------------------*/
-
-static int SCXLNXDeviceResume(struct sys_device *sysdev)
-{
- return SCXLNXCommPowerManagement(&g_SCXLNXDevice.sm,
- SCXLNX_POWER_OPERATION_RESUME);
-}
-
-
-/*----------------------------------------------------------------------------*/
-
-module_init(SCXLNXDeviceRegister);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Trusted Logic S.A.");
diff --git a/security/tf_driver/scxlnx_util.c b/security/tf_driver/scxlnx_util.c
deleted file mode 100644
index df928a4ec2c5..000000000000
--- a/security/tf_driver/scxlnx_util.c
+++ /dev/null
@@ -1,1141 +0,0 @@
-/*
- * Copyright (c) 2006-2010 Trusted Logic S.A.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
- */
-#include <linux/mman.h>
-#include "scxlnx_util.h"
-
-/*----------------------------------------------------------------------------
- * Debug printing routines
- *----------------------------------------------------------------------------*/
-#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
-
-void addressCacheProperty(unsigned long va)
-{
- unsigned long pa;
- unsigned long inner;
- unsigned long outer;
-
- asm volatile ("mcr p15, 0, %0, c7, c8, 0" : : "r" (va));
- asm volatile ("mrc p15, 0, %0, c7, c4, 0" : "=r" (pa));
-
- dprintk(KERN_INFO "VA:%x, PA:%x\n",
- (unsigned int) va,
- (unsigned int) pa);
-
- if (pa & 1) {
- dprintk(KERN_INFO "Prop Error\n");
- return;
- }
-
- outer = (pa >> 2) & 3;
- dprintk(KERN_INFO "\touter : %x", (unsigned int) outer);
-
- switch (outer) {
- case 3:
- dprintk(KERN_INFO "Write-Back, no Write-Allocate\n");
- break;
- case 2:
- dprintk(KERN_INFO "Write-Through, no Write-Allocate.\n");
- break;
- case 1:
- dprintk(KERN_INFO "Write-Back, Write-Allocate.\n");
- break;
- case 0:
- dprintk(KERN_INFO "Non-cacheable.\n");
- break;
- }
-
- inner = (pa >> 4) & 7;
- dprintk(KERN_INFO "\tinner : %x", (unsigned int)inner);
-
- switch (inner) {
- case 7:
- dprintk(KERN_INFO "Write-Back, no Write-Allocate\n");
- break;
- case 6:
- dprintk(KERN_INFO "Write-Through.\n");
- break;
- case 5:
- dprintk(KERN_INFO "Write-Back, Write-Allocate.\n");
- break;
- case 3:
- dprintk(KERN_INFO "Device.\n");
- break;
- case 1:
- dprintk(KERN_INFO "Strongly-ordered.\n");
- break;
- case 0:
- dprintk(KERN_INFO "Non-cacheable.\n");
- break;
- }
-
- if (pa & 0x00000002)
- dprintk(KERN_INFO "SuperSection.\n");
- if (pa & 0x00000080)
- dprintk(KERN_INFO "Memory is shareable.\n");
- else
- dprintk(KERN_INFO "Memory is non-shareable.\n");
-
- if (pa & 0x00000200)
- dprintk(KERN_INFO "Non-secure.\n");
-}
-
-#ifdef CONFIG_BENCH_SECURE_CYCLE
-
-#define LOOP_SIZE (100000)
-
-void runBogoMIPS(void)
-{
- uint32_t nCycles;
- void *pAddress = &runBogoMIPS;
-
- dprintk(KERN_INFO "BogoMIPS:\n");
-
- setupCounters();
- nCycles = runCodeSpeed(LOOP_SIZE);
- dprintk(KERN_INFO "%u cycles with code access\n", nCycles);
- nCycles = runDataSpeed(LOOP_SIZE, (unsigned long)pAddress);
- dprintk(KERN_INFO "%u cycles to access %x\n", nCycles,
- (unsigned int) pAddress);
-}
-
-#endif /* CONFIG_BENCH_SECURE_CYCLE */
-
-/*
- * Dump the L1 shared buffer.
- */
-void SCXLNXDumpL1SharedBuffer(struct SCHANNEL_C1S_BUFFER *pBuf)
-{
- dprintk(KERN_INFO
- "buffer@%p:\n"
- " nConfigFlags_S=%08X\n"
- " sVersionDescription=%64s\n"
- " nStatus_S=%08X\n"
- " nSyncSerial_N=%08X\n"
- " nSyncSerial_S=%08X\n"
- " sTime_N[0]=%016llX\n"
- " sTime_N[1]=%016llX\n"
- " sTimeout_S[0]=%016llX\n"
- " sTimeout_S[1]=%016llX\n"
- " nFirstCommand=%08X\n"
- " nFirstFreeCommand=%08X\n"
- " nFirstAnswer=%08X\n"
- " nFirstFreeAnswer=%08X\n\n",
- pBuf,
- pBuf->nConfigFlags_S,
- pBuf->sVersionDescription,
- pBuf->nStatus_S,
- pBuf->nSyncSerial_N,
- pBuf->nSyncSerial_S,
- pBuf->sTime_N[0],
- pBuf->sTime_N[1],
- pBuf->sTimeout_S[0],
- pBuf->sTimeout_S[1],
- pBuf->nFirstCommand,
- pBuf->nFirstFreeCommand,
- pBuf->nFirstAnswer,
- pBuf->nFirstFreeAnswer);
-}
-
-
-/*
- * Dump the specified SChannel message using dprintk.
- */
-void SCXLNXDumpMessage(union SCX_COMMAND_MESSAGE *pMessage)
-{
- u32 i;
-
- dprintk(KERN_INFO "message@%p:\n", pMessage);
-
- switch (pMessage->sHeader.nMessageType) {
- case SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT\n"
- " nOperationID = 0x%08X\n"
- " nDeviceContextID = 0x%08X\n",
- pMessage->sHeader.nMessageSize,
- pMessage->sHeader.nMessageType,
- pMessage->sHeader.nOperationID,
- pMessage->sCreateDeviceContextMessage.nDeviceContextID
- );
- break;
-
- case SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT\n"
- " nOperationID = 0x%08X\n"
- " hDeviceContext = 0x%08X\n",
- pMessage->sHeader.nMessageSize,
- pMessage->sHeader.nMessageType,
- pMessage->sHeader.nOperationID,
- pMessage->sDestroyDeviceContextMessage.hDeviceContext);
- break;
-
- case SCX_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "SCX_MESSAGE_TYPE_OPEN_CLIENT_SESSION\n"
- " nParamTypes = 0x%04X\n"
- " nOperationID = 0x%08X\n"
- " hDeviceContext = 0x%08X\n"
- " nCancellationID = 0x%08X\n"
- " sTimeout = 0x%016llX\n"
- " sDestinationUUID = "
- "%08X-%04X-%04X-%02X%02X-"
- "%02X%02X%02X%02X%02X%02X\n",
- pMessage->sHeader.nMessageSize,
- pMessage->sHeader.nMessageType,
- pMessage->sOpenClientSessionMessage.nParamTypes,
- pMessage->sHeader.nOperationID,
- pMessage->sOpenClientSessionMessage.hDeviceContext,
- pMessage->sOpenClientSessionMessage.nCancellationID,
- pMessage->sOpenClientSessionMessage.sTimeout,
- pMessage->sOpenClientSessionMessage.sDestinationUUID.
- time_low,
- pMessage->sOpenClientSessionMessage.sDestinationUUID.
- time_mid,
- pMessage->sOpenClientSessionMessage.sDestinationUUID.
- time_hi_and_version,
- pMessage->sOpenClientSessionMessage.sDestinationUUID.
- clock_seq_and_node[0],
- pMessage->sOpenClientSessionMessage.sDestinationUUID.
- clock_seq_and_node[1],
- pMessage->sOpenClientSessionMessage.sDestinationUUID.
- clock_seq_and_node[2],
- pMessage->sOpenClientSessionMessage.sDestinationUUID.
- clock_seq_and_node[3],
- pMessage->sOpenClientSessionMessage.sDestinationUUID.
- clock_seq_and_node[4],
- pMessage->sOpenClientSessionMessage.sDestinationUUID.
- clock_seq_and_node[5],
- pMessage->sOpenClientSessionMessage.sDestinationUUID.
- clock_seq_and_node[6],
- pMessage->sOpenClientSessionMessage.sDestinationUUID.
- clock_seq_and_node[7]
- );
-
- for (i = 0; i < 4; i++) {
- uint32_t *pParam = (uint32_t *) &pMessage->
- sOpenClientSessionMessage.sParams[i];
- dprintk(KERN_INFO " sParams[%d] = "
- "0x%08X:0x%08X:0x%08X\n",
- i, pParam[0], pParam[1], pParam[2]);
- }
-
- switch (SCX_LOGIN_GET_MAIN_TYPE(
- pMessage->sOpenClientSessionMessage.nLoginType)) {
- case SCX_LOGIN_PUBLIC:
- dprintk(
- KERN_INFO " nLoginType = "
- "SCX_LOGIN_PUBLIC\n");
- break;
- case SCX_LOGIN_USER:
- dprintk(
- KERN_INFO " nLoginType = "
- "SCX_LOGIN_USER\n");
- break;
- case SCX_LOGIN_GROUP:
- dprintk(
- KERN_INFO " nLoginType = "
- "SCX_LOGIN_GROUP\n");
- break;
- case SCX_LOGIN_APPLICATION:
- dprintk(
- KERN_INFO " nLoginType = "
- "SCX_LOGIN_APPLICATION\n");
- break;
- case SCX_LOGIN_APPLICATION_USER:
- dprintk(
- KERN_INFO " nLoginType = "
- "SCX_LOGIN_APPLICATION_USER\n");
- break;
- case SCX_LOGIN_APPLICATION_GROUP:
- dprintk(
- KERN_INFO " nLoginType = "
- "SCX_LOGIN_APPLICATION_GROUP\n");
- break;
- case SCX_LOGIN_AUTHENTICATION:
- dprintk(
- KERN_INFO " nLoginType = "
- "SCX_LOGIN_AUTHENTICATION\n");
- break;
- case SCX_LOGIN_PRIVILEGED:
- dprintk(
- KERN_INFO " nLoginType = "
- "SCX_LOGIN_PRIVILEGED\n");
- break;
- default:
- dprintk(
- KERN_ERR " nLoginType = "
- "0x%08X (Unknown login type)\n",
- pMessage->sOpenClientSessionMessage.nLoginType);
- break;
- }
-
- dprintk(
- KERN_INFO " sLoginData = ");
- for (i = 0; i < 20; i++)
- dprintk(
- KERN_INFO "%d",
- pMessage->sOpenClientSessionMessage.
- sLoginData[i]);
- dprintk("\n");
- break;
-
- case SCX_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "SCX_MESSAGE_TYPE_CLOSE_CLIENT_SESSION\n"
- " nOperationID = 0x%08X\n"
- " hDeviceContext = 0x%08X\n"
- " hClientSession = 0x%08X\n",
- pMessage->sHeader.nMessageSize,
- pMessage->sHeader.nMessageType,
- pMessage->sHeader.nOperationID,
- pMessage->sCloseClientSessionMessage.hDeviceContext,
- pMessage->sCloseClientSessionMessage.hClientSession
- );
- break;
-
- case SCX_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "SCX_MESSAGE_TYPE_REGISTER_SHARED_MEMORY\n"
- " nMemoryFlags = 0x%04X\n"
- " nOperationID = 0x%08X\n"
- " hDeviceContext = 0x%08X\n"
- " nBlockID = 0x%08X\n"
- " nSharedMemSize = 0x%08X\n"
- " nSharedMemStartOffset = 0x%08X\n"
- " nSharedMemDescriptors[0] = 0x%08X\n"
- " nSharedMemDescriptors[1] = 0x%08X\n"
- " nSharedMemDescriptors[2] = 0x%08X\n"
- " nSharedMemDescriptors[3] = 0x%08X\n"
- " nSharedMemDescriptors[4] = 0x%08X\n"
- " nSharedMemDescriptors[5] = 0x%08X\n"
- " nSharedMemDescriptors[6] = 0x%08X\n"
- " nSharedMemDescriptors[7] = 0x%08X\n",
- pMessage->sHeader.nMessageSize,
- pMessage->sHeader.nMessageType,
- pMessage->sRegisterSharedMemoryMessage.nMemoryFlags,
- pMessage->sHeader.nOperationID,
- pMessage->sRegisterSharedMemoryMessage.hDeviceContext,
- pMessage->sRegisterSharedMemoryMessage.nBlockID,
- pMessage->sRegisterSharedMemoryMessage.nSharedMemSize,
- pMessage->sRegisterSharedMemoryMessage.
- nSharedMemStartOffset,
- pMessage->sRegisterSharedMemoryMessage.
- nSharedMemDescriptors[0],
- pMessage->sRegisterSharedMemoryMessage.
- nSharedMemDescriptors[1],
- pMessage->sRegisterSharedMemoryMessage.
- nSharedMemDescriptors[2],
- pMessage->sRegisterSharedMemoryMessage.
- nSharedMemDescriptors[3],
- pMessage->sRegisterSharedMemoryMessage.
- nSharedMemDescriptors[4],
- pMessage->sRegisterSharedMemoryMessage.
- nSharedMemDescriptors[5],
- pMessage->sRegisterSharedMemoryMessage.
- nSharedMemDescriptors[6],
- pMessage->sRegisterSharedMemoryMessage.
- nSharedMemDescriptors[7]);
- break;
-
- case SCX_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "SCX_MESSAGE_TYPE_RELEASE_SHARED_MEMORY\n"
- " nOperationID = 0x%08X\n"
- " hDeviceContext = 0x%08X\n"
- " hBlock = 0x%08X\n",
- pMessage->sHeader.nMessageSize,
- pMessage->sHeader.nMessageType,
- pMessage->sHeader.nOperationID,
- pMessage->sReleaseSharedMemoryMessage.hDeviceContext,
- pMessage->sReleaseSharedMemoryMessage.hBlock);
- break;
-
- case SCX_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "SCX_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND\n"
- " nParamTypes = 0x%04X\n"
- " nOperationID = 0x%08X\n"
- " hDeviceContext = 0x%08X\n"
- " hClientSession = 0x%08X\n"
- " sTimeout = 0x%016llX\n"
- " nCancellationID = 0x%08X\n"
- " nClientCommandIdentifier = 0x%08X\n",
- pMessage->sHeader.nMessageSize,
- pMessage->sHeader.nMessageType,
- pMessage->sInvokeClientCommandMessage.nParamTypes,
- pMessage->sHeader.nOperationID,
- pMessage->sInvokeClientCommandMessage.hDeviceContext,
- pMessage->sInvokeClientCommandMessage.hClientSession,
- pMessage->sInvokeClientCommandMessage.sTimeout,
- pMessage->sInvokeClientCommandMessage.nCancellationID,
- pMessage->sInvokeClientCommandMessage.
- nClientCommandIdentifier
- );
-
- for (i = 0; i < 4; i++) {
- uint32_t *pParam = (uint32_t *) &pMessage->
- sOpenClientSessionMessage.sParams[i];
- dprintk(KERN_INFO " sParams[%d] = "
- "0x%08X:0x%08X:0x%08X\n", i,
- pParam[0], pParam[1], pParam[2]);
- }
- break;
-
- case SCX_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "SCX_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND\n"
- " nOperationID = 0x%08X\n"
- " hDeviceContext = 0x%08X\n"
- " hClientSession = 0x%08X\n",
- pMessage->sHeader.nMessageSize,
- pMessage->sHeader.nMessageType,
- pMessage->sHeader.nOperationID,
- pMessage->sCancelClientOperationMessage.hDeviceContext,
- pMessage->sCancelClientOperationMessage.hClientSession);
- break;
-
- case SCX_MESSAGE_TYPE_MANAGEMENT:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "SCX_MESSAGE_TYPE_MANAGEMENT\n"
- " nOperationID = 0x%08X\n"
- " nCommand = 0x%08X\n"
- " nW3BSize = 0x%08X\n"
- " nW3BStartOffset = 0x%08X\n",
- pMessage->sHeader.nMessageSize,
- pMessage->sHeader.nMessageType,
- pMessage->sHeader.nOperationID,
- pMessage->sManagementMessage.nCommand,
- pMessage->sManagementMessage.nW3BSize,
- pMessage->sManagementMessage.nW3BStartOffset);
- break;
-
- default:
- dprintk(
- KERN_ERR " nMessageType = 0x%08X "
- "(Unknown message type)\n",
- pMessage->sHeader.nMessageType);
- break;
- }
-}
-
-
-/*
- * Dump the specified SChannel answer using dprintk.
- */
-void SCXLNXDumpAnswer(union SCX_ANSWER_MESSAGE *pAnswer)
-{
- u32 i;
- dprintk(
- KERN_INFO "answer@%p:\n",
- pAnswer);
-
- switch (pAnswer->sHeader.nMessageType) {
- case SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "SCX_ANSWER_CREATE_DEVICE_CONTEXT\n"
- " nOperationID = 0x%08X\n"
- " nErrorCode = 0x%08X\n"
- " hDeviceContext = 0x%08X\n",
- pAnswer->sHeader.nMessageSize,
- pAnswer->sHeader.nMessageType,
- pAnswer->sHeader.nOperationID,
- pAnswer->sCreateDeviceContextAnswer.nErrorCode,
- pAnswer->sCreateDeviceContextAnswer.hDeviceContext);
- break;
-
- case SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "ANSWER_DESTROY_DEVICE_CONTEXT\n"
- " nOperationID = 0x%08X\n"
- " nErrorCode = 0x%08X\n"
- " nDeviceContextID = 0x%08X\n",
- pAnswer->sHeader.nMessageSize,
- pAnswer->sHeader.nMessageType,
- pAnswer->sHeader.nOperationID,
- pAnswer->sDestroyDeviceContextAnswer.nErrorCode,
- pAnswer->sDestroyDeviceContextAnswer.nDeviceContextID);
- break;
-
-
- case SCX_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "SCX_ANSWER_OPEN_CLIENT_SESSION\n"
- " nReturnOrigin = 0x%02X\n"
- " nOperationID = 0x%08X\n"
- " nErrorCode = 0x%08X\n"
- " hClientSession = 0x%08X\n",
- pAnswer->sHeader.nMessageSize,
- pAnswer->sHeader.nMessageType,
- pAnswer->sOpenClientSessionAnswer.nReturnOrigin,
- pAnswer->sHeader.nOperationID,
- pAnswer->sOpenClientSessionAnswer.nErrorCode,
- pAnswer->sOpenClientSessionAnswer.hClientSession);
- for (i = 0; i < 4; i++) {
- dprintk(KERN_INFO " sAnswers[%d]=0x%08X:0x%08X\n",
- i,
- pAnswer->sOpenClientSessionAnswer.sAnswers[i].
- sValue.a,
- pAnswer->sOpenClientSessionAnswer.sAnswers[i].
- sValue.b);
- }
- break;
-
- case SCX_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "ANSWER_CLOSE_CLIENT_SESSION\n"
- " nOperationID = 0x%08X\n"
- " nErrorCode = 0x%08X\n",
- pAnswer->sHeader.nMessageSize,
- pAnswer->sHeader.nMessageType,
- pAnswer->sHeader.nOperationID,
- pAnswer->sCloseClientSessionAnswer.nErrorCode);
- break;
-
- case SCX_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "SCX_ANSWER_REGISTER_SHARED_MEMORY\n"
- " nOperationID = 0x%08X\n"
- " nErrorCode = 0x%08X\n"
- " hBlock = 0x%08X\n",
- pAnswer->sHeader.nMessageSize,
- pAnswer->sHeader.nMessageType,
- pAnswer->sHeader.nOperationID,
- pAnswer->sRegisterSharedMemoryAnswer.nErrorCode,
- pAnswer->sRegisterSharedMemoryAnswer.hBlock);
- break;
-
- case SCX_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "ANSWER_RELEASE_SHARED_MEMORY\n"
- " nOperationID = 0x%08X\n"
- " nErrorCode = 0x%08X\n"
- " nBlockID = 0x%08X\n",
- pAnswer->sHeader.nMessageSize,
- pAnswer->sHeader.nMessageType,
- pAnswer->sHeader.nOperationID,
- pAnswer->sReleaseSharedMemoryAnswer.nErrorCode,
- pAnswer->sReleaseSharedMemoryAnswer.nBlockID);
- break;
-
- case SCX_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "SCX_ANSWER_INVOKE_CLIENT_COMMAND\n"
- " nReturnOrigin = 0x%02X\n"
- " nOperationID = 0x%08X\n"
- " nErrorCode = 0x%08X\n",
- pAnswer->sHeader.nMessageSize,
- pAnswer->sHeader.nMessageType,
- pAnswer->sInvokeClientCommandAnswer.nReturnOrigin,
- pAnswer->sHeader.nOperationID,
- pAnswer->sInvokeClientCommandAnswer.nErrorCode
- );
- for (i = 0; i < 4; i++) {
- dprintk(KERN_INFO " sAnswers[%d]=0x%08X:0x%08X\n",
- i,
- pAnswer->sInvokeClientCommandAnswer.sAnswers[i].
- sValue.a,
- pAnswer->sInvokeClientCommandAnswer.sAnswers[i].
- sValue.b);
- }
- break;
-
- case SCX_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "SCX_ANSWER_CANCEL_CLIENT_COMMAND\n"
- " nOperationID = 0x%08X\n"
- " nErrorCode = 0x%08X\n",
- pAnswer->sHeader.nMessageSize,
- pAnswer->sHeader.nMessageType,
- pAnswer->sHeader.nOperationID,
- pAnswer->sCancelClientOperationAnswer.nErrorCode);
- break;
-
- case SCX_MESSAGE_TYPE_MANAGEMENT:
- dprintk(KERN_INFO
- " nMessageSize = 0x%02X\n"
- " nMessageType = 0x%02X "
- "SCX_MESSAGE_TYPE_MANAGEMENT\n"
- " nOperationID = 0x%08X\n"
- " nErrorCode = 0x%08X\n",
- pAnswer->sHeader.nMessageSize,
- pAnswer->sHeader.nMessageType,
- pAnswer->sHeader.nOperationID,
- pAnswer->sHeader.nErrorCode);
- break;
-
- default:
- dprintk(
- KERN_ERR " nMessageType = 0x%02X "
- "(Unknown message type)\n",
- pAnswer->sHeader.nMessageType);
- break;
-
- }
-}
-
-#endif /* defined(TF_DRIVER_DEBUG_SUPPORT) */
-
-/*----------------------------------------------------------------------------
- * SHA-1 implementation
- * This is taken from the Linux kernel source crypto/sha1.c
- *----------------------------------------------------------------------------*/
-
-struct sha1_ctx {
- u64 count;
- u32 state[5];
- u8 buffer[64];
-};
-
-static inline u32 rol(u32 value, u32 bits)
-{
- return ((value) << (bits)) | ((value) >> (32 - (bits)));
-}
-
-/* blk0() and blk() perform the initial expand. */
-/* I got the idea of expanding during the round function from SSLeay */
-#define blk0(i) block32[i]
-
-#define blk(i) (block32[i & 15] = rol( \
- block32[(i + 13) & 15] ^ block32[(i + 8) & 15] ^ \
- block32[(i + 2) & 15] ^ block32[i & 15], 1))
-
-/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
-#define R0(v, w, x, y, z, i) do { \
- z += ((w & (x ^ y)) ^ y) + blk0(i) + 0x5A827999 + rol(v, 5); \
- w = rol(w, 30); } while (0)
-
-#define R1(v, w, x, y, z, i) do { \
- z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \
- w = rol(w, 30); } while (0)
-
-#define R2(v, w, x, y, z, i) do { \
- z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); \
- w = rol(w, 30); } while (0)
-
-#define R3(v, w, x, y, z, i) do { \
- z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \
- w = rol(w, 30); } while (0)
-
-#define R4(v, w, x, y, z, i) do { \
- z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \
- w = rol(w, 30); } while (0)
-
-
-/* Hash a single 512-bit block. This is the core of the algorithm. */
-static void sha1_transform(u32 *state, const u8 *in)
-{
- u32 a, b, c, d, e;
- u32 block32[16];
-
- /* convert/copy data to workspace */
- for (a = 0; a < sizeof(block32)/sizeof(u32); a++)
- block32[a] = ((u32) in[4 * a]) << 24 |
- ((u32) in[4 * a + 1]) << 16 |
- ((u32) in[4 * a + 2]) << 8 |
- ((u32) in[4 * a + 3]);
-
- /* Copy context->state[] to working vars */
- a = state[0];
- b = state[1];
- c = state[2];
- d = state[3];
- e = state[4];
-
- /* 4 rounds of 20 operations each. Loop unrolled. */
- R0(a, b, c, d, e, 0); R0(e, a, b, c, d, 1);
- R0(d, e, a, b, c, 2); R0(c, d, e, a, b, 3);
- R0(b, c, d, e, a, 4); R0(a, b, c, d, e, 5);
- R0(e, a, b, c, d, 6); R0(d, e, a, b, c, 7);
- R0(c, d, e, a, b, 8); R0(b, c, d, e, a, 9);
- R0(a, b, c, d, e, 10); R0(e, a, b, c, d, 11);
- R0(d, e, a, b, c, 12); R0(c, d, e, a, b, 13);
- R0(b, c, d, e, a, 14); R0(a, b, c, d, e, 15);
-
- R1(e, a, b, c, d, 16); R1(d, e, a, b, c, 17);
- R1(c, d, e, a, b, 18); R1(b, c, d, e, a, 19);
-
- R2(a, b, c, d, e, 20); R2(e, a, b, c, d, 21);
- R2(d, e, a, b, c, 22); R2(c, d, e, a, b, 23);
- R2(b, c, d, e, a, 24); R2(a, b, c, d, e, 25);
- R2(e, a, b, c, d, 26); R2(d, e, a, b, c, 27);
- R2(c, d, e, a, b, 28); R2(b, c, d, e, a, 29);
- R2(a, b, c, d, e, 30); R2(e, a, b, c, d, 31);
- R2(d, e, a, b, c, 32); R2(c, d, e, a, b, 33);
- R2(b, c, d, e, a, 34); R2(a, b, c, d, e, 35);
- R2(e, a, b, c, d, 36); R2(d, e, a, b, c, 37);
- R2(c, d, e, a, b, 38); R2(b, c, d, e, a, 39);
-
- R3(a, b, c, d, e, 40); R3(e, a, b, c, d, 41);
- R3(d, e, a, b, c, 42); R3(c, d, e, a, b, 43);
- R3(b, c, d, e, a, 44); R3(a, b, c, d, e, 45);
- R3(e, a, b, c, d, 46); R3(d, e, a, b, c, 47);
- R3(c, d, e, a, b, 48); R3(b, c, d, e, a, 49);
- R3(a, b, c, d, e, 50); R3(e, a, b, c, d, 51);
- R3(d, e, a, b, c, 52); R3(c, d, e, a, b, 53);
- R3(b, c, d, e, a, 54); R3(a, b, c, d, e, 55);
- R3(e, a, b, c, d, 56); R3(d, e, a, b, c, 57);
- R3(c, d, e, a, b, 58); R3(b, c, d, e, a, 59);
-
- R4(a, b, c, d, e, 60); R4(e, a, b, c, d, 61);
- R4(d, e, a, b, c, 62); R4(c, d, e, a, b, 63);
- R4(b, c, d, e, a, 64); R4(a, b, c, d, e, 65);
- R4(e, a, b, c, d, 66); R4(d, e, a, b, c, 67);
- R4(c, d, e, a, b, 68); R4(b, c, d, e, a, 69);
- R4(a, b, c, d, e, 70); R4(e, a, b, c, d, 71);
- R4(d, e, a, b, c, 72); R4(c, d, e, a, b, 73);
- R4(b, c, d, e, a, 74); R4(a, b, c, d, e, 75);
- R4(e, a, b, c, d, 76); R4(d, e, a, b, c, 77);
- R4(c, d, e, a, b, 78); R4(b, c, d, e, a, 79);
-
- /* Add the working vars back into context.state[] */
- state[0] += a;
- state[1] += b;
- state[2] += c;
- state[3] += d;
- state[4] += e;
- /* Wipe variables */
- a = b = c = d = e = 0;
- memset(block32, 0x00, sizeof(block32));
-}
-
-
-static void sha1_init(void *ctx)
-{
- struct sha1_ctx *sctx = ctx;
- static const struct sha1_ctx initstate = {
- 0,
- { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 },
- { 0, }
- };
-
- *sctx = initstate;
-}
-
-
-static void sha1_update(void *ctx, const u8 *data, unsigned int len)
-{
- struct sha1_ctx *sctx = ctx;
- unsigned int i, j;
-
- j = (sctx->count >> 3) & 0x3f;
- sctx->count += len << 3;
-
- if ((j + len) > 63) {
- memcpy(&sctx->buffer[j], data, (i = 64 - j));
- sha1_transform(sctx->state, sctx->buffer);
- for ( ; i + 63 < len; i += 64)
- sha1_transform(sctx->state, &data[i]);
- j = 0;
- } else
- i = 0;
- memcpy(&sctx->buffer[j], &data[i], len - i);
-}
-
-
-/* Add padding and return the message digest. */
-static void sha1_final(void *ctx, u8 *out)
-{
- struct sha1_ctx *sctx = ctx;
- u32 i, j, index, padlen;
- u64 t;
- u8 bits[8] = { 0, };
- static const u8 padding[64] = { 0x80, };
-
- t = sctx->count;
- bits[7] = 0xff & t; t >>= 8;
- bits[6] = 0xff & t; t >>= 8;
- bits[5] = 0xff & t; t >>= 8;
- bits[4] = 0xff & t; t >>= 8;
- bits[3] = 0xff & t; t >>= 8;
- bits[2] = 0xff & t; t >>= 8;
- bits[1] = 0xff & t; t >>= 8;
- bits[0] = 0xff & t;
-
- /* Pad out to 56 mod 64 */
- index = (sctx->count >> 3) & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- sha1_update(sctx, padding, padlen);
-
- /* Append length */
- sha1_update(sctx, bits, sizeof(bits));
-
- /* Store state in digest */
- for (i = j = 0; i < 5; i++, j += 4) {
- u32 t2 = sctx->state[i];
- out[j+3] = t2 & 0xff; t2 >>= 8;
- out[j+2] = t2 & 0xff; t2 >>= 8;
- out[j+1] = t2 & 0xff; t2 >>= 8;
- out[j] = t2 & 0xff;
- }
-
- /* Wipe context */
- memset(sctx, 0, sizeof(*sctx));
-}
-
-
-
-
-/*----------------------------------------------------------------------------
- * Process identification
- *----------------------------------------------------------------------------*/
-
-/* This function generates a processes hash table for authentication */
-int SCXLNXConnGetCurrentProcessHash(void *pHash)
-{
- int nResult = 0;
- void *buffer;
- struct mm_struct *mm;
- struct vm_area_struct *vma;
-
- buffer = internal_kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (buffer == NULL) {
- dprintk(
- KERN_ERR "SCXLNXConnGetCurrentProcessHash:"
- KERN_ERR " Out of memory for buffer!\n");
- return -ENOMEM;
- }
-
- mm = current->mm;
-
- down_read(&(mm->mmap_sem));
- for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
- if ((vma->vm_flags & VM_EXECUTABLE) != 0 && vma->vm_file
- != NULL) {
- struct dentry *dentry;
- unsigned long start;
- unsigned long cur;
- unsigned long end;
- struct sha1_ctx sha1Context;
-
- dentry = dget(vma->vm_file->f_dentry);
-
- dprintk(
- KERN_DEBUG "SCXLNXConnGetCurrentProcessHash: "
- "Found executable VMA for inode %lu "
- "(%lu bytes).\n",
- dentry->d_inode->i_ino,
- (unsigned long) (dentry->d_inode->
- i_size));
-
- start = do_mmap(vma->vm_file, 0,
- dentry->d_inode->i_size,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE, 0);
- if (start < 0) {
- dprintk(
- KERN_ERR "SCXLNXConnGetCurrentProcess"
- "Hash: do_mmap failed (error %d)!\n",
- (int) start);
- dput(dentry);
- nResult = -EFAULT;
- goto vma_out;
- }
-
- end = start + dentry->d_inode->i_size;
-
- sha1_init(&sha1Context);
- cur = start;
- while (cur < end) {
- unsigned long chunk;
-
- chunk = end - cur;
- if (chunk > PAGE_SIZE)
- chunk = PAGE_SIZE;
- if (copy_from_user(buffer, (const void *) cur,
- chunk) != 0) {
- dprintk(
- KERN_ERR "SCXLNXConnGetCurrent"
- "ProcessHash: copy_from_user "
- "failed!\n");
- nResult = -EINVAL;
- (void) do_munmap(mm, start,
- dentry->d_inode->i_size);
- dput(dentry);
- goto vma_out;
- }
- sha1_update(&sha1Context, buffer, chunk);
- cur += chunk;
- }
- sha1_final(&sha1Context, pHash);
- nResult = 0;
-
- (void) do_munmap(mm, start, dentry->d_inode->i_size);
- dput(dentry);
- break;
- }
- }
-vma_out:
- up_read(&(mm->mmap_sem));
-
- internal_kfree(buffer);
-
- if (nResult == -ENOENT)
- dprintk(
- KERN_ERR "SCXLNXConnGetCurrentProcessHash: "
- "No executable VMA found for process!\n");
- return nResult;
-}
-
-
-/* This function hashes the path of the current application.
- * If pData = NULL ,nothing else is added to the hash
- else add pData to the hash
- */
-int SCXLNXConnHashApplicationPathAndData(char *pBuffer, void *pData,
- u32 nDataLen)
-{
- int nResult = -ENOENT;
- char *buffer = NULL;
- struct mm_struct *mm;
- struct vm_area_struct *vma;
-
- buffer = internal_kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (buffer == NULL) {
- nResult = -ENOMEM;
- goto end;
- }
-
- mm = current->mm;
-
- down_read(&(mm->mmap_sem));
- for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
- if ((vma->vm_flags & VM_EXECUTABLE) != 0
- && vma->vm_file != NULL) {
- struct path *path;
- char *endpath;
- size_t pathlen;
- struct sha1_ctx sha1Context;
- u8 pHashData[SHA1_DIGEST_SIZE];
-
- path = &vma->vm_file->f_path;
-
- endpath = d_path(path, buffer, PAGE_SIZE);
- if (IS_ERR(path)) {
- nResult = PTR_ERR(endpath);
- up_read(&(mm->mmap_sem));
- goto end;
- }
- pathlen = (buffer + PAGE_SIZE) - endpath;
-
-#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
- {
- char *pChar;
- dprintk(KERN_DEBUG "current process path = ");
- for (pChar = endpath;
- pChar < buffer + PAGE_SIZE;
- pChar++)
- dprintk("%c", *pChar);
-
- dprintk(", uid=%d, euid=%d\n", current_uid(),
- current_euid());
- }
-#endif /*defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
-
- sha1_init(&sha1Context);
- sha1_update(&sha1Context, endpath, pathlen);
- if (pData != NULL) {
- dprintk(KERN_INFO "SCXLNXConnHashApplication"
- "PathAndData: Hashing additional"
- "data\n");
- sha1_update(&sha1Context, pData, nDataLen);
- }
- sha1_final(&sha1Context, pHashData);
- memcpy(pBuffer, pHashData, sizeof(pHashData));
-
- nResult = 0;
-
- break;
- }
- }
- up_read(&(mm->mmap_sem));
-
- end:
- if (buffer != NULL)
- internal_kfree(buffer);
-
- return nResult;
-}
-
-void *internal_kmalloc(size_t nSize, int nPriority)
-{
- void *pResult;
- struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
-
- pResult = kmalloc(nSize, nPriority);
-
- if (pResult != NULL)
- atomic_inc(
- &pDevice->sDeviceStats.stat_memories_allocated);
-
- return pResult;
-}
-
-void internal_kfree(void *pMemory)
-{
- struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
-
- if (pMemory != NULL)
- atomic_dec(
- &pDevice->sDeviceStats.stat_memories_allocated);
- return kfree(pMemory);
-}
-
-void internal_vunmap(void *pMemory)
-{
- struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
-
- if (pMemory != NULL)
- atomic_dec(
- &pDevice->sDeviceStats.stat_memories_allocated);
-
- vunmap((void *) (((unsigned int)pMemory) & 0xFFFFF000));
-}
-
-void *internal_vmalloc(size_t nSize)
-{
- void *pResult;
- struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
-
- pResult = vmalloc(nSize);
-
- if (pResult != NULL)
- atomic_inc(
- &pDevice->sDeviceStats.stat_memories_allocated);
-
- return pResult;
-}
-
-void internal_vfree(void *pMemory)
-{
- struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
-
- if (pMemory != NULL)
- atomic_dec(
- &pDevice->sDeviceStats.stat_memories_allocated);
- return vfree(pMemory);
-}
-
-unsigned long internal_get_zeroed_page(int nPriority)
-{
- unsigned long nResult;
- struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
-
- nResult = get_zeroed_page(nPriority);
-
- if (nResult != 0)
- atomic_inc(&pDevice->sDeviceStats.
- stat_pages_allocated);
-
- return nResult;
-}
-
-void internal_free_page(unsigned long pPage)
-{
- struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
-
- if (pPage != 0)
- atomic_dec(
- &pDevice->sDeviceStats.stat_pages_allocated);
- return free_page(pPage);
-}
-
-int internal_get_user_pages(
- struct task_struct *tsk,
- struct mm_struct *mm,
- unsigned long start,
- int len,
- int write,
- int force,
- struct page **pages,
- struct vm_area_struct **vmas)
-{
- int nResult;
- struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
-
- nResult = get_user_pages(
- tsk,
- mm,
- start,
- len,
- write,
- force,
- pages,
- vmas);
-
- if (nResult > 0)
- atomic_add(nResult,
- &pDevice->sDeviceStats.stat_pages_locked);
-
- return nResult;
-}
-
-void internal_get_page(struct page *page)
-{
- struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
-
- atomic_inc(&pDevice->sDeviceStats.stat_pages_locked);
-
- get_page(page);
-}
-
-void internal_page_cache_release(struct page *page)
-{
- struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
-
- atomic_dec(&pDevice->sDeviceStats.stat_pages_locked);
-
- page_cache_release(page);
-}
-
-
diff --git a/security/tf_driver/tf_comm.c b/security/tf_driver/tf_comm.c
new file mode 100644
index 000000000000..49608fa988b9
--- /dev/null
+++ b/security/tf_driver/tf_comm.c
@@ -0,0 +1,1766 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/div64.h>
+#include <asm/system.h>
+#include <linux/version.h>
+#include <asm/cputype.h>
+#include <linux/interrupt.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/jiffies.h>
+#include <linux/freezer.h>
+
+#include "tf_defs.h"
+#include "tf_comm.h"
+#include "tf_protocol.h"
+#include "tf_util.h"
+#include "tf_conn.h"
+
+#ifdef CONFIG_TF_ZEBRA
+#include "tf_zebra.h"
+#endif
+
+/*---------------------------------------------------------------------------
+ * Internal Constants
+ *---------------------------------------------------------------------------*/
+
+/*
+ * shared memories descriptor constants
+ */
+#define DESCRIPTOR_B_MASK (1 << 2)
+#define DESCRIPTOR_C_MASK (1 << 3)
+#define DESCRIPTOR_S_MASK (1 << 10)
+
+#define L1_COARSE_DESCRIPTOR_BASE (0x00000001)
+#define L1_COARSE_DESCRIPTOR_ADDR_MASK (0xFFFFFC00)
+#define L1_COARSE_DESCRIPTOR_V13_12_SHIFT (5)
+
+#define L2_PAGE_DESCRIPTOR_BASE (0x00000003)
+#define L2_PAGE_DESCRIPTOR_AP_APX_READ (0x220)
+#define L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE (0x30)
+
+#define L2_INIT_DESCRIPTOR_BASE (0x00000003)
+#define L2_INIT_DESCRIPTOR_V13_12_SHIFT (4)
+
+/*
+ * Reject an attempt to share a strongly-Ordered or Device memory
+ * Strongly-Ordered: TEX=0b000, C=0, B=0
+ * Shared Device: TEX=0b000, C=0, B=1
+ * Non-Shared Device: TEX=0b010, C=0, B=0
+ */
+#define L2_TEX_C_B_MASK \
+ ((1<<8) | (1<<7) | (1<<6) | (1<<3) | (1<<2))
+#define L2_TEX_C_B_STRONGLY_ORDERED \
+ ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (0<<2))
+#define L2_TEX_C_B_SHARED_DEVICE \
+ ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (1<<2))
+#define L2_TEX_C_B_NON_SHARED_DEVICE \
+ ((0<<8) | (1<<7) | (0<<6) | (0<<3) | (0<<2))
+
+#define CACHE_S(x) ((x) & (1 << 24))
+#define CACHE_DSIZE(x) (((x) >> 12) & 4095)
+
+#define TIME_IMMEDIATE ((u64) 0x0000000000000000ULL)
+#define TIME_INFINITE ((u64) 0xFFFFFFFFFFFFFFFFULL)
+
+/*---------------------------------------------------------------------------
+ * atomic operation definitions
+ *---------------------------------------------------------------------------*/
+
+/*
+ * Atomically updates the sync_serial_n and time_n register
+ * sync_serial_n and time_n modifications are thread safe
+ */
+void tf_set_current_time(struct tf_comm *comm)
+{
+ u32 new_sync_serial;
+ struct timeval now;
+ u64 time64;
+
+ /*
+ * lock the structure while updating the L1 shared memory fields
+ */
+ spin_lock(&comm->lock);
+
+ /* read sync_serial_n and change the TimeSlot bit field */
+ new_sync_serial =
+ tf_read_reg32(&comm->l1_buffer->sync_serial_n) + 1;
+
+ do_gettimeofday(&now);
+ time64 = now.tv_sec;
+ time64 = (time64 * 1000) + (now.tv_usec / 1000);
+
+ /* Write the new time64 and nSyncSerial into shared memory */
+ tf_write_reg64(&comm->l1_buffer->time_n[new_sync_serial &
+ TF_SYNC_SERIAL_TIMESLOT_N], time64);
+ tf_write_reg32(&comm->l1_buffer->sync_serial_n,
+ new_sync_serial);
+
+ spin_unlock(&comm->lock);
+}
+
+/*
+ * Performs the specific read timeout operation
+ * The difficulty here is to read atomically 2 u32
+ * values from the L1 shared buffer.
+ * This is guaranteed by reading before and after the operation
+ * the timeslot given by the Secure World
+ */
+static inline void tf_read_timeout(struct tf_comm *comm, u64 *time)
+{
+ u32 sync_serial_s_initial = 0;
+ u32 sync_serial_s_final = 1;
+ u64 time64;
+
+ spin_lock(&comm->lock);
+
+ while (sync_serial_s_initial != sync_serial_s_final) {
+ sync_serial_s_initial = tf_read_reg32(
+ &comm->l1_buffer->sync_serial_s);
+ time64 = tf_read_reg64(
+ &comm->l1_buffer->timeout_s[sync_serial_s_initial&1]);
+
+ sync_serial_s_final = tf_read_reg32(
+ &comm->l1_buffer->sync_serial_s);
+ }
+
+ spin_unlock(&comm->lock);
+
+ *time = time64;
+}
+
+/*----------------------------------------------------------------------------
+ * SIGKILL signal handling
+ *----------------------------------------------------------------------------*/
+
+static bool sigkill_pending(void)
+{
+ if (signal_pending(current)) {
+ dprintk(KERN_INFO "A signal is pending\n");
+ if (sigismember(&current->pending.signal, SIGKILL)) {
+ dprintk(KERN_INFO "A SIGKILL is pending\n");
+ return true;
+ } else if (sigismember(
+ &current->signal->shared_pending.signal, SIGKILL)) {
+ dprintk(KERN_INFO "A SIGKILL is pending (shared)\n");
+ return true;
+ }
+ }
+ return false;
+}
+
+/*----------------------------------------------------------------------------
+ * Shared memory related operations
+ *----------------------------------------------------------------------------*/
+
+struct tf_coarse_page_table *tf_alloc_coarse_page_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ u32 type)
+{
+ struct tf_coarse_page_table *coarse_pg_table = NULL;
+
+ spin_lock(&(alloc_context->lock));
+
+ if (!(list_empty(&(alloc_context->free_coarse_page_tables)))) {
+ /*
+ * The free list can provide us a coarse page table
+ * descriptor
+ */
+ coarse_pg_table = list_first_entry(
+ &alloc_context->free_coarse_page_tables,
+ struct tf_coarse_page_table, list);
+ list_del(&(coarse_pg_table->list));
+
+ coarse_pg_table->parent->ref_count++;
+ } else {
+ /* no array of coarse page tables, create a new one */
+ struct tf_coarse_page_table_array *array;
+ void *page;
+ int i;
+
+ spin_unlock(&(alloc_context->lock));
+
+ /* first allocate a new page descriptor */
+ array = internal_kmalloc(sizeof(*array), GFP_KERNEL);
+ if (array == NULL) {
+ dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):"
+ " failed to allocate a table array\n",
+ alloc_context);
+ return NULL;
+ }
+
+ array->type = type;
+ INIT_LIST_HEAD(&(array->list));
+
+ /* now allocate the actual page the page descriptor describes */
+ page = (void *) internal_get_zeroed_page(GFP_KERNEL);
+ if (page == NULL) {
+ dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):"
+ " failed allocate a page\n",
+ alloc_context);
+ internal_kfree(array);
+ return NULL;
+ }
+
+ spin_lock(&(alloc_context->lock));
+
+ /* initialize the coarse page table descriptors */
+ for (i = 0; i < 4; i++) {
+ INIT_LIST_HEAD(&(array->coarse_page_tables[i].list));
+ array->coarse_page_tables[i].descriptors =
+ page + (i * SIZE_1KB);
+ array->coarse_page_tables[i].parent = array;
+
+ if (i == 0) {
+ /*
+ * the first element is kept for the current
+ * coarse page table allocation
+ */
+ coarse_pg_table =
+ &(array->coarse_page_tables[i]);
+ array->ref_count++;
+ } else {
+ /*
+ * The other elements are added to the free list
+ */
+ list_add(&(array->coarse_page_tables[i].list),
+ &(alloc_context->
+ free_coarse_page_tables));
+ }
+ }
+
+ list_add(&(array->list),
+ &(alloc_context->coarse_page_table_arrays));
+ }
+ spin_unlock(&(alloc_context->lock));
+
+ return coarse_pg_table;
+}
+
+
+void tf_free_coarse_page_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_coarse_page_table *coarse_pg_table,
+ int force)
+{
+ struct tf_coarse_page_table_array *array;
+
+ spin_lock(&(alloc_context->lock));
+
+ array = coarse_pg_table->parent;
+
+ (array->ref_count)--;
+
+ if (array->ref_count == 0) {
+ /*
+ * no coarse page table descriptor is used
+ * check if we should free the whole page
+ */
+
+ if ((array->type == TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED)
+ && (force == 0))
+ /*
+ * This is a preallocated page,
+ * add the page back to the free list
+ */
+ list_add(&(coarse_pg_table->list),
+ &(alloc_context->free_coarse_page_tables));
+ else {
+ /*
+ * None of the page's coarse page table descriptors
+ * are in use, free the whole page
+ */
+ int i;
+ u32 *descriptors;
+
+ /*
+ * remove the page's associated coarse page table
+ * descriptors from the free list
+ */
+ for (i = 0; i < 4; i++)
+ if (&(array->coarse_page_tables[i]) !=
+ coarse_pg_table)
+ list_del(&(array->
+ coarse_page_tables[i].list));
+
+ descriptors =
+ array->coarse_page_tables[0].descriptors;
+ array->coarse_page_tables[0].descriptors = NULL;
+
+ /* remove the coarse page table from the array */
+ list_del(&(array->list));
+
+ spin_unlock(&(alloc_context->lock));
+ /*
+ * Free the page.
+ * The address of the page is contained in the first
+ * element
+ */
+ internal_free_page((unsigned long) descriptors);
+ /* finaly free the array */
+ internal_kfree(array);
+
+ spin_lock(&(alloc_context->lock));
+ }
+ } else {
+ /*
+ * Some coarse page table descriptors are in use.
+ * Add the descriptor to the free list
+ */
+ list_add(&(coarse_pg_table->list),
+ &(alloc_context->free_coarse_page_tables));
+ }
+
+ spin_unlock(&(alloc_context->lock));
+}
+
+
+void tf_init_coarse_page_table_allocator(
+ struct tf_coarse_page_table_allocation_context *alloc_context)
+{
+ spin_lock_init(&(alloc_context->lock));
+ INIT_LIST_HEAD(&(alloc_context->coarse_page_table_arrays));
+ INIT_LIST_HEAD(&(alloc_context->free_coarse_page_tables));
+}
+
+void tf_release_coarse_page_table_allocator(
+ struct tf_coarse_page_table_allocation_context *alloc_context)
+{
+ spin_lock(&(alloc_context->lock));
+
+ /* now clean up the list of page descriptors */
+ while (!list_empty(&(alloc_context->coarse_page_table_arrays))) {
+ struct tf_coarse_page_table_array *page_desc;
+ u32 *descriptors;
+
+ page_desc = list_first_entry(
+ &alloc_context->coarse_page_table_arrays,
+ struct tf_coarse_page_table_array, list);
+
+ descriptors = page_desc->coarse_page_tables[0].descriptors;
+ list_del(&(page_desc->list));
+
+ spin_unlock(&(alloc_context->lock));
+
+ if (descriptors != NULL)
+ internal_free_page((unsigned long)descriptors);
+
+ internal_kfree(page_desc);
+
+ spin_lock(&(alloc_context->lock));
+ }
+
+ spin_unlock(&(alloc_context->lock));
+}
+
+/*
+ * Returns the L1 coarse page descriptor for
+ * a coarse page table located at address coarse_pg_table_descriptors
+ */
+u32 tf_get_l1_coarse_descriptor(
+ u32 coarse_pg_table_descriptors[256])
+{
+ u32 descriptor = L1_COARSE_DESCRIPTOR_BASE;
+ unsigned int info = read_cpuid(CPUID_CACHETYPE);
+
+ descriptor |= (virt_to_phys((void *) coarse_pg_table_descriptors)
+ & L1_COARSE_DESCRIPTOR_ADDR_MASK);
+
+ if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11))) {
+ dprintk(KERN_DEBUG "tf_get_l1_coarse_descriptor "
+ "V31-12 added to descriptor\n");
+ /* the 16k alignment restriction applies */
+ descriptor |= (DESCRIPTOR_V13_12_GET(
+ (u32)coarse_pg_table_descriptors) <<
+ L1_COARSE_DESCRIPTOR_V13_12_SHIFT);
+ }
+
+ return descriptor;
+}
+
+
+#define dprintk_desc(...)
+/*
+ * Returns the L2 descriptor for the specified user page.
+ */
+u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep;
+ u32 *hwpte;
+ u32 tex = 0;
+ u32 descriptor = 0;
+
+ dprintk_desc(KERN_INFO "VirtAddr = %x\n", vaddr);
+ pgd = pgd_offset(mm, vaddr);
+ dprintk_desc(KERN_INFO "pgd = %x, value=%x\n", (unsigned int) pgd,
+ (unsigned int) *pgd);
+ if (pgd_none(*pgd))
+ goto error;
+ pud = pud_offset(pgd, vaddr);
+ dprintk_desc(KERN_INFO "pud = %x, value=%x\n", (unsigned int) pud,
+ (unsigned int) *pud);
+ if (pud_none(*pud))
+ goto error;
+ pmd = pmd_offset(pud, vaddr);
+ dprintk_desc(KERN_INFO "pmd = %x, value=%x\n", (unsigned int) pmd,
+ (unsigned int) *pmd);
+ if (pmd_none(*pmd))
+ goto error;
+
+ if (PMD_TYPE_SECT&(*pmd)) {
+ /* We have a section */
+ dprintk_desc(KERN_INFO "Section descr=%x\n",
+ (unsigned int)*pmd);
+ if ((*pmd) & PMD_SECT_BUFFERABLE)
+ descriptor |= DESCRIPTOR_B_MASK;
+ if ((*pmd) & PMD_SECT_CACHEABLE)
+ descriptor |= DESCRIPTOR_C_MASK;
+ if ((*pmd) & PMD_SECT_S)
+ descriptor |= DESCRIPTOR_S_MASK;
+ tex = ((*pmd) >> 12) & 7;
+ } else {
+ /* We have a table */
+ ptep = pte_offset_map(pmd, vaddr);
+ if (pte_present(*ptep)) {
+ dprintk_desc(KERN_INFO "L2 descr=%x\n",
+ (unsigned int) *ptep);
+ if ((*ptep) & L_PTE_MT_BUFFERABLE)
+ descriptor |= DESCRIPTOR_B_MASK;
+ if ((*ptep) & L_PTE_MT_WRITETHROUGH)
+ descriptor |= DESCRIPTOR_C_MASK;
+ if ((*ptep) & L_PTE_MT_DEV_SHARED)
+ descriptor |= DESCRIPTOR_S_MASK;
+
+ /*
+ * Linux's pte doesn't keep track of TEX value.
+ * Have to jump to hwpte see include/asm/pgtable.h
+ */
+ hwpte = (u32 *) (((u32) ptep) - 0x800);
+ if (((*hwpte) & L2_DESCRIPTOR_ADDR_MASK) !=
+ ((*ptep) & L2_DESCRIPTOR_ADDR_MASK))
+ goto error;
+ dprintk_desc(KERN_INFO "hw descr=%x\n", *hwpte);
+ tex = ((*hwpte) >> 6) & 7;
+ pte_unmap(ptep);
+ } else {
+ pte_unmap(ptep);
+ goto error;
+ }
+ }
+
+ descriptor |= (tex << 6);
+
+ return descriptor;
+
+error:
+ dprintk(KERN_ERR "Error occured in %s\n", __func__);
+ return 0;
+}
+
+
+/*
+ * Changes an L2 page descriptor back to a pointer to a physical page
+ */
+inline struct page *tf_l2_page_descriptor_to_page(u32 l2_page_descriptor)
+{
+ return pte_page(l2_page_descriptor & L2_DESCRIPTOR_ADDR_MASK);
+}
+
+
+/*
+ * Returns the L1 descriptor for the 1KB-aligned coarse page table. The address
+ * must be in the kernel address space.
+ */
+static void tf_get_l2_page_descriptor(
+ u32 *l2_page_descriptor,
+ u32 flags, struct mm_struct *mm)
+{
+ unsigned long page_vaddr;
+ u32 descriptor;
+ struct page *page;
+ bool unmap_page = false;
+
+#if 0
+ dprintk(KERN_INFO
+ "tf_get_l2_page_descriptor():"
+ "*l2_page_descriptor=%x\n",
+ *l2_page_descriptor);
+#endif
+
+ if (*l2_page_descriptor == L2_DESCRIPTOR_FAULT)
+ return;
+
+ page = (struct page *) (*l2_page_descriptor);
+
+ page_vaddr = (unsigned long) page_address(page);
+ if (page_vaddr == 0) {
+ dprintk(KERN_INFO "page_address returned 0\n");
+ /* Should we use kmap_atomic(page, KM_USER0) instead ? */
+ page_vaddr = (unsigned long) kmap(page);
+ if (page_vaddr == 0) {
+ *l2_page_descriptor = L2_DESCRIPTOR_FAULT;
+ dprintk(KERN_ERR "kmap returned 0\n");
+ return;
+ }
+ unmap_page = true;
+ }
+
+ descriptor = tf_get_l2_descriptor_common(page_vaddr, mm);
+ if (descriptor == 0) {
+ *l2_page_descriptor = L2_DESCRIPTOR_FAULT;
+ return;
+ }
+ descriptor |= L2_PAGE_DESCRIPTOR_BASE;
+
+ descriptor |= (page_to_phys(page) & L2_DESCRIPTOR_ADDR_MASK);
+
+ if (!(flags & TF_SHMEM_TYPE_WRITE))
+ /* only read access */
+ descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ;
+ else
+ /* read and write access */
+ descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE;
+
+ if (unmap_page)
+ kunmap(page);
+
+ *l2_page_descriptor = descriptor;
+}
+
+
+/*
+ * Unlocks the physical memory pages
+ * and frees the coarse pages that need to
+ */
+void tf_cleanup_shared_memory(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_shmem_desc *shmem_desc,
+ u32 full_cleanup)
+{
+ u32 coarse_page_index;
+
+ dprintk(KERN_INFO "tf_cleanup_shared_memory(%p)\n",
+ shmem_desc);
+
+#ifdef DEBUG_COARSE_TABLES
+ printk(KERN_DEBUG "tf_cleanup_shared_memory "
+ "- number of coarse page tables=%d\n",
+ shmem_desc->coarse_pg_table_count);
+
+ for (coarse_page_index = 0;
+ coarse_page_index < shmem_desc->coarse_pg_table_count;
+ coarse_page_index++) {
+ u32 j;
+
+ printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n",
+ shmem_desc->coarse_pg_table[coarse_page_index],
+ shmem_desc->coarse_pg_table[coarse_page_index]->
+ descriptors,
+ coarse_page_index);
+ if (shmem_desc->coarse_pg_table[coarse_page_index] != NULL) {
+ for (j = 0;
+ j < TF_DESCRIPTOR_TABLE_CAPACITY;
+ j += 8) {
+ int k;
+ printk(KERN_DEBUG " ");
+ for (k = j; k < j + 8; k++)
+ printk(KERN_DEBUG "%p ",
+ shmem_desc->coarse_pg_table[
+ coarse_page_index]->
+ descriptors);
+ printk(KERN_DEBUG "\n");
+ }
+ }
+ }
+ printk(KERN_DEBUG "tf_cleanup_shared_memory() - done\n\n");
+#endif
+
+ /* Parse the coarse page descriptors */
+ for (coarse_page_index = 0;
+ coarse_page_index < shmem_desc->coarse_pg_table_count;
+ coarse_page_index++) {
+ u32 j;
+ u32 found = 0;
+
+ /* parse the page descriptors of the coarse page */
+ for (j = 0; j < TF_DESCRIPTOR_TABLE_CAPACITY; j++) {
+ u32 l2_page_descriptor = (u32) (shmem_desc->
+ coarse_pg_table[coarse_page_index]->
+ descriptors[j]);
+
+ if (l2_page_descriptor != L2_DESCRIPTOR_FAULT) {
+ struct page *page =
+ tf_l2_page_descriptor_to_page(
+ l2_page_descriptor);
+
+ if (!PageReserved(page))
+ SetPageDirty(page);
+ internal_page_cache_release(page);
+
+ found = 1;
+ } else if (found == 1) {
+ break;
+ }
+ }
+
+ /*
+ * Only free the coarse pages of descriptors not preallocated
+ */
+ if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
+ (full_cleanup != 0))
+ tf_free_coarse_page_table(alloc_context,
+ shmem_desc->coarse_pg_table[coarse_page_index],
+ 0);
+ }
+
+ shmem_desc->coarse_pg_table_count = 0;
+ dprintk(KERN_INFO "tf_cleanup_shared_memory(%p) done\n",
+ shmem_desc);
+}
+
+/*
+ * Make sure the coarse pages are allocated. If not allocated, do it.
+ * Locks down the physical memory pages.
+ * Verifies the memory attributes depending on flags.
+ */
+int tf_fill_descriptor_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_shmem_desc *shmem_desc,
+ u32 buffer,
+ struct vm_area_struct **vmas,
+ u32 descriptors[TF_MAX_COARSE_PAGES],
+ u32 buffer_size,
+ u32 *buffer_start_offset,
+ bool in_user_space,
+ u32 flags,
+ u32 *descriptor_count)
+{
+ u32 coarse_page_index;
+ u32 coarse_page_count;
+ u32 page_count;
+ u32 page_shift = 0;
+ int ret = 0;
+ unsigned int info = read_cpuid(CPUID_CACHETYPE);
+
+ dprintk(KERN_INFO "tf_fill_descriptor_table"
+ "(%p, buffer=0x%08X, size=0x%08X, user=%01x "
+ "flags = 0x%08x)\n",
+ shmem_desc,
+ buffer,
+ buffer_size,
+ in_user_space,
+ flags);
+
+ /*
+ * Compute the number of pages
+ * Compute the number of coarse pages
+ * Compute the page offset
+ */
+ page_count = ((buffer & ~PAGE_MASK) +
+ buffer_size + ~PAGE_MASK) >> PAGE_SHIFT;
+
+ /* check whether the 16k alignment restriction applies */
+ if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11)))
+ /*
+ * The 16k alignment restriction applies.
+ * Shift data to get them 16k aligned
+ */
+ page_shift = DESCRIPTOR_V13_12_GET(buffer);
+ page_count += page_shift;
+
+
+ /*
+ * Check the number of pages fit in the coarse pages
+ */
+ if (page_count > (TF_DESCRIPTOR_TABLE_CAPACITY *
+ TF_MAX_COARSE_PAGES)) {
+ dprintk(KERN_ERR "tf_fill_descriptor_table(%p): "
+ "%u pages required to map shared memory!\n",
+ shmem_desc, page_count);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* coarse page describe 256 pages */
+ coarse_page_count = ((page_count +
+ TF_DESCRIPTOR_TABLE_CAPACITY_MASK) >>
+ TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT);
+
+ /*
+ * Compute the buffer offset
+ */
+ *buffer_start_offset = (buffer & ~PAGE_MASK) |
+ (page_shift << PAGE_SHIFT);
+
+ /* map each coarse page */
+ for (coarse_page_index = 0;
+ coarse_page_index < coarse_page_count;
+ coarse_page_index++) {
+ u32 j;
+ struct tf_coarse_page_table *coarse_pg_table;
+
+ /* compute a virtual address with appropriate offset */
+ u32 buffer_offset_vaddr = buffer +
+ (coarse_page_index * TF_MAX_COARSE_PAGE_MAPPED_SIZE);
+ u32 pages_to_get;
+
+ /*
+ * Compute the number of pages left for this coarse page.
+ * Decrement page_count each time
+ */
+ pages_to_get = (page_count >>
+ TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT) ?
+ TF_DESCRIPTOR_TABLE_CAPACITY : page_count;
+ page_count -= pages_to_get;
+
+ /*
+ * Check if the coarse page has already been allocated
+ * If not, do it now
+ */
+ if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM)
+ || (shmem_desc->type ==
+ TF_SHMEM_TYPE_PM_HIBERNATE)) {
+ coarse_pg_table = tf_alloc_coarse_page_table(
+ alloc_context,
+ TF_PAGE_DESCRIPTOR_TYPE_NORMAL);
+
+ if (coarse_pg_table == NULL) {
+ dprintk(KERN_ERR
+ "tf_fill_descriptor_table(%p): "
+ "tf_alloc_coarse_page_table "
+ "failed for coarse page %d\n",
+ shmem_desc, coarse_page_index);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ shmem_desc->coarse_pg_table[coarse_page_index] =
+ coarse_pg_table;
+ } else {
+ coarse_pg_table =
+ shmem_desc->coarse_pg_table[coarse_page_index];
+ }
+
+ /*
+ * The page is not necessarily filled with zeroes.
+ * Set the fault descriptors ( each descriptor is 4 bytes long)
+ */
+ memset(coarse_pg_table->descriptors, 0x00,
+ TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
+
+ if (in_user_space) {
+ int pages;
+
+ /*
+ * TRICK: use pCoarsePageDescriptor->descriptors to
+ * hold the (struct page*) items before getting their
+ * physical address
+ */
+ down_read(&(current->mm->mmap_sem));
+ pages = internal_get_user_pages(
+ current,
+ current->mm,
+ buffer_offset_vaddr,
+ /*
+ * page_shift is cleared after retrieving first
+ * coarse page
+ */
+ (pages_to_get - page_shift),
+ (flags & TF_SHMEM_TYPE_WRITE) ? 1 : 0,
+ 0,
+ (struct page **) (coarse_pg_table->descriptors
+ + page_shift),
+ vmas);
+ up_read(&(current->mm->mmap_sem));
+
+ if ((pages <= 0) ||
+ (pages != (pages_to_get - page_shift))) {
+ dprintk(KERN_ERR "tf_fill_descriptor_table:"
+ " get_user_pages got %d pages while "
+ "trying to get %d pages!\n",
+ pages, pages_to_get - page_shift);
+ ret = -EFAULT;
+ goto error;
+ }
+
+ for (j = page_shift;
+ j < page_shift + pages;
+ j++) {
+ /* Get the actual L2 descriptors */
+ tf_get_l2_page_descriptor(
+ &coarse_pg_table->descriptors[j],
+ flags,
+ current->mm);
+ /*
+ * Reject Strongly-Ordered or Device Memory
+ */
+#define IS_STRONGLY_ORDERED_OR_DEVICE_MEM(x) \
+ ((((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_STRONGLY_ORDERED) || \
+ (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_SHARED_DEVICE) || \
+ (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_NON_SHARED_DEVICE))
+
+ if (IS_STRONGLY_ORDERED_OR_DEVICE_MEM(
+ coarse_pg_table->
+ descriptors[j])) {
+ dprintk(KERN_ERR
+ "tf_fill_descriptor_table:"
+ " descriptor 0x%08X use "
+ "strongly-ordered or device "
+ "memory. Rejecting!\n",
+ coarse_pg_table->
+ descriptors[j]);
+ ret = -EFAULT;
+ goto error;
+ }
+ }
+ } else {
+ /* Kernel-space memory */
+ for (j = page_shift;
+ j < pages_to_get;
+ j++) {
+ struct page *page;
+ void *addr =
+ (void *)(buffer_offset_vaddr +
+ (j - page_shift) * PAGE_SIZE);
+ if (!is_vmalloc_addr(addr)) {
+ dprintk(KERN_ERR
+ "tf_fill_descriptor_table: "
+ "cannot handle address %p\n",
+ addr);
+ ret = -EFAULT;
+ goto error;
+ }
+ page = vmalloc_to_page(addr);
+ if (page == NULL) {
+ dprintk(KERN_ERR
+ "tf_fill_descriptor_table: "
+ "cannot map %p to page\n",
+ addr);
+ ret = -EFAULT;
+ goto error;
+ }
+ coarse_pg_table->descriptors[j] = (u32)page;
+ get_page(page);
+
+ /* change coarse page "page address" */
+ tf_get_l2_page_descriptor(
+ &coarse_pg_table->descriptors[j],
+ flags,
+ &init_mm);
+ }
+ }
+
+ dmac_flush_range((void *)coarse_pg_table->descriptors,
+ (void *)(((u32)(coarse_pg_table->descriptors)) +
+ TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32)));
+
+ outer_clean_range(
+ __pa(coarse_pg_table->descriptors),
+ __pa(coarse_pg_table->descriptors) +
+ TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
+ wmb();
+
+ /* Update the coarse page table address */
+ descriptors[coarse_page_index] =
+ tf_get_l1_coarse_descriptor(
+ coarse_pg_table->descriptors);
+
+ /*
+ * The next coarse page has no page shift, reset the
+ * page_shift
+ */
+ page_shift = 0;
+ }
+
+ *descriptor_count = coarse_page_count;
+ shmem_desc->coarse_pg_table_count = coarse_page_count;
+
+#ifdef DEBUG_COARSE_TABLES
+ printk(KERN_DEBUG "ntf_fill_descriptor_table - size=0x%08X "
+ "numberOfCoarsePages=%d\n", buffer_size,
+ shmem_desc->coarse_pg_table_count);
+ for (coarse_page_index = 0;
+ coarse_page_index < shmem_desc->coarse_pg_table_count;
+ coarse_page_index++) {
+ u32 j;
+ struct tf_coarse_page_table *coarse_page_table =
+ shmem_desc->coarse_pg_table[coarse_page_index];
+
+ printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n",
+ coarse_page_table,
+ coarse_page_table->descriptors,
+ coarse_page_index);
+ for (j = 0;
+ j < TF_DESCRIPTOR_TABLE_CAPACITY;
+ j += 8) {
+ int k;
+ printk(KERN_DEBUG " ");
+ for (k = j; k < j + 8; k++)
+ printk(KERN_DEBUG "0x%08X ",
+ coarse_page_table->descriptors[k]);
+ printk(KERN_DEBUG "\n");
+ }
+ }
+ printk(KERN_DEBUG "ntf_fill_descriptor_table() - done\n\n");
+#endif
+
+ return 0;
+
+error:
+ tf_cleanup_shared_memory(
+ alloc_context,
+ shmem_desc,
+ 0);
+
+ return ret;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Standard communication operations
+ *----------------------------------------------------------------------------*/
+
+u8 *tf_get_description(struct tf_comm *comm)
+{
+ if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
+ return comm->l1_buffer->version_description;
+
+ return NULL;
+}
+
+/*
+ * Returns a non-zero value if the specified S-timeout has expired, zero
+ * otherwise.
+ *
+ * The placeholder referenced to by relative_timeout_jiffies gives the relative
+ * timeout from now in jiffies. It is set to zero if the S-timeout has expired,
+ * or to MAX_SCHEDULE_TIMEOUT if the S-timeout is infinite.
+ */
+static int tf_test_s_timeout(
+ u64 timeout,
+ signed long *relative_timeout_jiffies)
+{
+ struct timeval now;
+ u64 time64;
+
+ *relative_timeout_jiffies = 0;
+
+ /* immediate timeout */
+ if (timeout == TIME_IMMEDIATE)
+ return 1;
+
+ /* infinite timeout */
+ if (timeout == TIME_INFINITE) {
+ dprintk(KERN_DEBUG "tf_test_s_timeout: "
+ "timeout is infinite\n");
+ *relative_timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+ return 0;
+ }
+
+ do_gettimeofday(&now);
+ time64 = now.tv_sec;
+ /* will not overflow as operations are done on 64bit values */
+ time64 = (time64 * 1000) + (now.tv_usec / 1000);
+
+ /* timeout expired */
+ if (time64 >= timeout) {
+ dprintk(KERN_DEBUG "tf_test_s_timeout: timeout expired\n");
+ return 1;
+ }
+
+ /*
+ * finite timeout, compute relative_timeout_jiffies
+ */
+ /* will not overflow as time64 < timeout */
+ timeout -= time64;
+
+ /* guarantee *relative_timeout_jiffies is a valid timeout */
+ if ((timeout >> 32) != 0)
+ *relative_timeout_jiffies = MAX_JIFFY_OFFSET;
+ else
+ *relative_timeout_jiffies =
+ msecs_to_jiffies((unsigned int) timeout);
+
+ dprintk(KERN_DEBUG "tf_test_s_timeout: timeout is 0x%lx\n",
+ *relative_timeout_jiffies);
+ return 0;
+}
+
+static void tf_copy_answers(struct tf_comm *comm)
+{
+ u32 first_answer;
+ u32 first_free_answer;
+ struct tf_answer_struct *answerStructureTemp;
+
+ if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
+ spin_lock(&comm->lock);
+ first_free_answer = tf_read_reg32(
+ &comm->l1_buffer->first_free_answer);
+ first_answer = tf_read_reg32(
+ &comm->l1_buffer->first_answer);
+
+ while (first_answer != first_free_answer) {
+ /* answer queue not empty */
+ union tf_answer sComAnswer;
+ struct tf_answer_header header;
+
+ /*
+ * the size of the command in words of 32bit, not in
+ * bytes
+ */
+ u32 command_size;
+ u32 i;
+ u32 *temp = (uint32_t *) &header;
+
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_answers(%p): "
+ "Read answers from L1\n",
+ current->pid, comm);
+
+ /* Read the answer header */
+ for (i = 0;
+ i < sizeof(struct tf_answer_header)/sizeof(u32);
+ i++)
+ temp[i] = comm->l1_buffer->answer_queue[
+ (first_answer + i) %
+ TF_S_ANSWER_QUEUE_CAPACITY];
+
+ /* Read the answer from the L1_Buffer*/
+ command_size = header.message_size +
+ sizeof(struct tf_answer_header)/sizeof(u32);
+ temp = (uint32_t *) &sComAnswer;
+ for (i = 0; i < command_size; i++)
+ temp[i] = comm->l1_buffer->answer_queue[
+ (first_answer + i) %
+ TF_S_ANSWER_QUEUE_CAPACITY];
+
+ answerStructureTemp = (struct tf_answer_struct *)
+ sComAnswer.header.operation_id;
+
+ tf_dump_answer(&sComAnswer);
+
+ memcpy(answerStructureTemp->answer, &sComAnswer,
+ command_size * sizeof(u32));
+ answerStructureTemp->answer_copied = true;
+
+ first_answer += command_size;
+ tf_write_reg32(&comm->l1_buffer->first_answer,
+ first_answer);
+ }
+ spin_unlock(&(comm->lock));
+ }
+}
+
+static void tf_copy_command(
+ struct tf_comm *comm,
+ union tf_command *command,
+ struct tf_connection *connection,
+ enum TF_COMMAND_STATE *command_status)
+{
+ if ((test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
+ && (command != NULL)) {
+ /*
+ * Write the message in the message queue.
+ */
+
+ if (*command_status == TF_COMMAND_STATE_PENDING) {
+ u32 command_size;
+ u32 queue_words_count;
+ u32 i;
+ u32 first_free_command;
+ u32 first_command;
+
+ spin_lock(&comm->lock);
+
+ first_command = tf_read_reg32(
+ &comm->l1_buffer->first_command);
+ first_free_command = tf_read_reg32(
+ &comm->l1_buffer->first_free_command);
+
+ queue_words_count = first_free_command - first_command;
+ command_size = command->header.message_size +
+ sizeof(struct tf_command_header)/sizeof(u32);
+ if ((queue_words_count + command_size) <
+ TF_N_MESSAGE_QUEUE_CAPACITY) {
+ /*
+ * Command queue is not full.
+ * If the Command queue is full,
+ * the command will be copied at
+ * another iteration
+ * of the current function.
+ */
+
+ /*
+ * Change the conn state
+ */
+ if (connection == NULL)
+ goto copy;
+
+ spin_lock(&(connection->state_lock));
+
+ if ((connection->state ==
+ TF_CONN_STATE_NO_DEVICE_CONTEXT)
+ &&
+ (command->header.message_type ==
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
+
+ dprintk(KERN_INFO
+ "tf_copy_command(%p):"
+ "Conn state is DEVICE_CONTEXT_SENT\n",
+ connection);
+ connection->state =
+ TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT;
+ } else if ((connection->state !=
+ TF_CONN_STATE_VALID_DEVICE_CONTEXT)
+ &&
+ (command->header.message_type !=
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
+ /* The connection
+ * is no longer valid.
+ * We may not send any command on it,
+ * not even another
+ * DESTROY_DEVICE_CONTEXT.
+ */
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_command(%p): "
+ "Connection no longer valid."
+ "ABORT\n",
+ current->pid, connection);
+ *command_status =
+ TF_COMMAND_STATE_ABORTED;
+ spin_unlock(
+ &(connection->state_lock));
+ spin_unlock(
+ &comm->lock);
+ return;
+ } else if (
+ (command->header.message_type ==
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) &&
+ (connection->state ==
+ TF_CONN_STATE_VALID_DEVICE_CONTEXT)
+ ) {
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_command(%p): "
+ "Conn state is "
+ "DESTROY_DEVICE_CONTEXT_SENT\n",
+ current->pid, connection);
+ connection->state =
+ TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT;
+ }
+ spin_unlock(&(connection->state_lock));
+copy:
+ /*
+ * Copy the command to L1 Buffer
+ */
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_command(%p): "
+ "Write Message in the queue\n",
+ current->pid, command);
+ tf_dump_command(command);
+
+ for (i = 0; i < command_size; i++)
+ comm->l1_buffer->command_queue[
+ (first_free_command + i) %
+ TF_N_MESSAGE_QUEUE_CAPACITY] =
+ ((uint32_t *) command)[i];
+
+ *command_status =
+ TF_COMMAND_STATE_SENT;
+ first_free_command += command_size;
+
+ tf_write_reg32(
+ &comm->
+ l1_buffer->first_free_command,
+ first_free_command);
+ }
+ spin_unlock(&comm->lock);
+ }
+ }
+}
+
+/*
+ * Sends the specified message through the specified communication channel.
+ *
+ * This function sends the command and waits for the answer
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static int tf_send_recv(struct tf_comm *comm,
+ union tf_command *command,
+ struct tf_answer_struct *answerStruct,
+ struct tf_connection *connection,
+ int bKillable
+ #ifdef CONFIG_TF_ZEBRA
+ , bool *secure_is_idle
+ #endif
+ )
+{
+ int result;
+ u64 timeout;
+ signed long nRelativeTimeoutJiffies;
+ bool wait_prepared = false;
+ enum TF_COMMAND_STATE command_status = TF_COMMAND_STATE_PENDING;
+ DEFINE_WAIT(wait);
+#ifdef CONFIG_FREEZER
+ unsigned long saved_flags;
+#endif
+ dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p)\n",
+ current->pid, command);
+
+#ifdef CONFIG_FREEZER
+ saved_flags = current->flags;
+ current->flags |= PF_FREEZER_NOSIG;
+#endif
+
+ /*
+ * Read all answers from the answer queue
+ */
+copy_answers:
+ tf_copy_answers(comm);
+
+ tf_copy_command(comm, command, connection, &command_status);
+
+ /*
+ * Notify all waiting threads
+ */
+ wake_up(&(comm->wait_queue));
+
+#ifdef CONFIG_FREEZER
+ if (unlikely(freezing(current))) {
+
+#ifdef CONFIG_TF_ZEBRA
+ if (!(*secure_is_idle)) {
+ if (tf_schedule_secure_world(comm, true) ==
+ STATUS_PENDING)
+ goto copy_answers;
+
+ tf_l4sec_clkdm_allow_idle(true);
+ *secure_is_idle = true;
+ }
+#endif
+
+ dprintk(KERN_INFO
+ "Entering refrigerator.\n");
+ refrigerator();
+ dprintk(KERN_INFO
+ "Left refrigerator.\n");
+ goto copy_answers;
+ }
+#endif
+
+#ifndef CONFIG_PREEMPT
+ if (need_resched())
+ schedule();
+#endif
+
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * Handle RPC (if any)
+ */
+ if (tf_rpc_execute(comm) == RPC_NON_YIELD)
+ goto schedule_secure_world;
+#endif
+
+ /*
+ * Join wait queue
+ */
+ /*dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p): Prepare to wait\n",
+ current->pid, command);*/
+ prepare_to_wait(&comm->wait_queue, &wait,
+ bKillable ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ wait_prepared = true;
+
+ /*
+ * Check if our answer is available
+ */
+ if (command_status == TF_COMMAND_STATE_ABORTED) {
+ /* Not waiting for an answer, return error code */
+ result = -EINTR;
+ dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+ "Command status is ABORTED."
+ "Exit with 0x%x\n",
+ current->pid, result);
+ goto exit;
+ }
+ if (answerStruct->answer_copied) {
+ dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
+ "Received answer (type 0x%02X)\n",
+ current->pid,
+ answerStruct->answer->header.message_type);
+ result = 0;
+ goto exit;
+ }
+
+ /*
+ * Check if a signal is pending
+ */
+ if (bKillable && (sigkill_pending())) {
+ if (command_status == TF_COMMAND_STATE_PENDING)
+ /*Command was not sent. */
+ result = -EINTR;
+ else
+ /* Command was sent but no answer was received yet. */
+ result = -EIO;
+
+ dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+ "Signal Pending. Return error %d\n",
+ current->pid, result);
+ goto exit;
+ }
+
+ /*
+ * Check if secure world is schedulable. It is schedulable if at
+ * least one of the following conditions holds:
+ * + it is still initializing (TF_COMM_FLAG_L1_SHARED_ALLOCATED
+ * is not set);
+ * + there is a command in the queue;
+ * + the secure world timeout is zero.
+ */
+ if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
+ u32 first_free_command;
+ u32 first_command;
+ spin_lock(&comm->lock);
+ first_command = tf_read_reg32(
+ &comm->l1_buffer->first_command);
+ first_free_command = tf_read_reg32(
+ &comm->l1_buffer->first_free_command);
+ spin_unlock(&comm->lock);
+ tf_read_timeout(comm, &timeout);
+ if ((first_free_command == first_command) &&
+ (tf_test_s_timeout(timeout,
+ &nRelativeTimeoutJiffies) == 0))
+ /*
+ * If command queue is empty and if timeout has not
+ * expired secure world is not schedulable
+ */
+ goto wait;
+ }
+
+ finish_wait(&comm->wait_queue, &wait);
+ wait_prepared = false;
+
+ /*
+ * Yield to the Secure World
+ */
+#ifdef CONFIG_TF_ZEBRA
+schedule_secure_world:
+ if (*secure_is_idle) {
+ tf_l4sec_clkdm_wakeup(true, false);
+ *secure_is_idle = false;
+ }
+#endif
+
+ result = tf_schedule_secure_world(comm, false);
+ if (result < 0)
+ goto exit;
+ goto copy_answers;
+
+wait:
+ if (bKillable && (sigkill_pending())) {
+ if (command_status == TF_COMMAND_STATE_PENDING)
+ result = -EINTR; /* Command was not sent. */
+ else
+ /* Command was sent but no answer was received yet. */
+ result = -EIO;
+
+ dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+ "Signal Pending while waiting. Return error %d\n",
+ current->pid, result);
+ goto exit;
+ }
+
+ if (nRelativeTimeoutJiffies == MAX_SCHEDULE_TIMEOUT)
+ dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
+ "prepare to sleep infinitely\n", current->pid);
+ else
+ dprintk(KERN_INFO "tf_send_recv: "
+ "prepare to sleep 0x%lx jiffies\n",
+ nRelativeTimeoutJiffies);
+
+#ifdef CONFIG_TF_ZEBRA
+ if (!(*secure_is_idle)) {
+ if (tf_schedule_secure_world(comm, true) == STATUS_PENDING) {
+ finish_wait(&comm->wait_queue, &wait);
+ wait_prepared = false;
+ goto copy_answers;
+ }
+ tf_l4sec_clkdm_allow_idle(true);
+ *secure_is_idle = true;
+ }
+#endif
+
+ /* go to sleep */
+ if (schedule_timeout(nRelativeTimeoutJiffies) == 0)
+ dprintk(KERN_INFO
+ "tf_send_recv: timeout expired\n");
+ else
+ dprintk(KERN_INFO
+ "tf_send_recv: signal delivered\n");
+
+ finish_wait(&comm->wait_queue, &wait);
+ wait_prepared = false;
+ goto copy_answers;
+
+exit:
+ if (wait_prepared) {
+ finish_wait(&comm->wait_queue, &wait);
+ wait_prepared = false;
+ }
+
+#ifdef CONFIG_TF_ZEBRA
+ if ((!(*secure_is_idle)) && (result != -EIO)) {
+ if (tf_schedule_secure_world(comm, true) == STATUS_PENDING)
+ goto copy_answers;
+
+ tf_l4sec_clkdm_allow_idle(true);
+ *secure_is_idle = true;
+ }
+#endif
+
+#ifdef CONFIG_FREEZER
+ current->flags &= ~(PF_FREEZER_NOSIG);
+ current->flags |= (saved_flags & PF_FREEZER_NOSIG);
+#endif
+
+ return result;
+}
+
+/*
+ * Sends the specified message through the specified communication channel.
+ *
+ * This function sends the message and waits for the corresponding answer
+ * It may return if a signal needs to be delivered.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+int tf_send_receive(struct tf_comm *comm,
+ union tf_command *command,
+ union tf_answer *answer,
+ struct tf_connection *connection,
+ bool bKillable)
+{
+ int error;
+ struct tf_answer_struct answerStructure;
+#ifdef CONFIG_SMP
+ long ret_affinity;
+ cpumask_t saved_cpu_mask;
+ cpumask_t local_cpu_mask = CPU_MASK_NONE;
+#endif
+#ifdef CONFIG_TF_ZEBRA
+ bool secure_is_idle = true;
+#endif
+
+ answerStructure.answer = answer;
+ answerStructure.answer_copied = false;
+
+ if (command != NULL)
+ command->header.operation_id = (u32) &answerStructure;
+
+ dprintk(KERN_INFO "tf_send_receive\n");
+
+#ifdef CONFIG_TF_ZEBRA
+ if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
+ dprintk(KERN_ERR "tf_send_receive(%p): "
+ "Secure world not started\n", comm);
+
+ return -EFAULT;
+ }
+#endif
+
+ if (test_bit(TF_COMM_FLAG_TERMINATING, &(comm->flags)) != 0) {
+ dprintk(KERN_DEBUG
+ "tf_send_receive: Flag Terminating is set\n");
+ return 0;
+ }
+
+#ifdef CONFIG_SMP
+ cpu_set(0, local_cpu_mask);
+ sched_getaffinity(0, &saved_cpu_mask);
+ ret_affinity = sched_setaffinity(0, &local_cpu_mask);
+ if (ret_affinity != 0)
+ dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity);
+#endif
+
+
+ /*
+ * Send the command
+ */
+ error = tf_send_recv(comm,
+ command, &answerStructure, connection, bKillable
+ #ifdef CONFIG_TF_ZEBRA
+ , &secure_is_idle
+ #endif
+ );
+
+ if (!bKillable && sigkill_pending()) {
+ if ((command->header.message_type ==
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) &&
+ (answer->create_device_context.error_code ==
+ S_SUCCESS)) {
+
+ /*
+ * CREATE_DEVICE_CONTEXT was interrupted.
+ */
+ dprintk(KERN_INFO "tf_send_receive: "
+ "sending DESTROY_DEVICE_CONTEXT\n");
+ answerStructure.answer = answer;
+ answerStructure.answer_copied = false;
+
+ command->header.message_type =
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+ command->header.message_size =
+ (sizeof(struct
+ tf_command_destroy_device_context) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+ command->header.operation_id =
+ (u32) &answerStructure;
+ command->destroy_device_context.device_context =
+ answer->create_device_context.
+ device_context;
+
+ goto destroy_context;
+ }
+ }
+
+ if (error == 0) {
+ /*
+ * tf_send_recv returned Success.
+ */
+ if (command->header.message_type ==
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) {
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+ } else if (command->header.message_type ==
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+ }
+ } else if (error == -EINTR) {
+ /*
+ * No command was sent, return failure.
+ */
+ dprintk(KERN_ERR
+ "tf_send_receive: "
+ "tf_send_recv failed (error %d) !\n",
+ error);
+ } else if (error == -EIO) {
+ /*
+ * A command was sent but its answer is still pending.
+ */
+
+ /* means bKillable is true */
+ dprintk(KERN_ERR
+ "tf_send_receive: "
+ "tf_send_recv interrupted (error %d)."
+ "Send DESTROY_DEVICE_CONTEXT.\n", error);
+
+ /* Send the DESTROY_DEVICE_CONTEXT. */
+ answerStructure.answer = answer;
+ answerStructure.answer_copied = false;
+
+ command->header.message_type =
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+ command->header.message_size =
+ (sizeof(struct tf_command_destroy_device_context) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+ command->header.operation_id =
+ (u32) &answerStructure;
+ command->destroy_device_context.device_context =
+ connection->device_context;
+
+ error = tf_send_recv(comm,
+ command, &answerStructure, connection, false
+ #ifdef CONFIG_TF_ZEBRA
+ , &secure_is_idle
+ #endif
+ );
+ if (error == -EINTR) {
+ /*
+ * Another thread already sent
+ * DESTROY_DEVICE_CONTEXT.
+ * We must still wait for the answer
+ * to the original command.
+ */
+ command = NULL;
+ goto destroy_context;
+ } else {
+ /* An answer was received.
+ * Check if it is the answer
+ * to the DESTROY_DEVICE_CONTEXT.
+ */
+ spin_lock(&comm->lock);
+ if (answer->header.message_type !=
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
+ answerStructure.answer_copied = false;
+ }
+ spin_unlock(&comm->lock);
+ if (!answerStructure.answer_copied) {
+ /* Answer to DESTROY_DEVICE_CONTEXT
+ * was not yet received.
+ * Wait for the answer.
+ */
+ dprintk(KERN_INFO
+ "[pid=%d] tf_send_receive:"
+ "Answer to DESTROY_DEVICE_CONTEXT"
+ "not yet received.Retry\n",
+ current->pid);
+ command = NULL;
+ goto destroy_context;
+ }
+ }
+ }
+
+ dprintk(KERN_INFO "tf_send_receive(): Message answer ready\n");
+ goto exit;
+
+destroy_context:
+ error = tf_send_recv(comm,
+ command, &answerStructure, connection, false
+ #ifdef CONFIG_TF_ZEBRA
+ , &secure_is_idle
+ #endif
+ );
+
+ /*
+ * tf_send_recv cannot return an error because
+ * it's not killable and not within a connection
+ */
+ BUG_ON(error != 0);
+
+ /* Reset the state, so a new CREATE DEVICE CONTEXT can be sent */
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+
+exit:
+
+#ifdef CONFIG_SMP
+ ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
+ if (ret_affinity != 0)
+ dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity);
+#endif
+ return error;
+}
+
+/*----------------------------------------------------------------------------
+ * Power management
+ *----------------------------------------------------------------------------*/
+
+
+/*
+ * Handles all the power management calls.
+ * The operation is the type of power management
+ * operation to be performed.
+ *
+ * This routine will only return if a failure occured or if
+ * the required opwer management is of type "resume".
+ * "Hibernate" and "Shutdown" should lock when doing the
+ * corresponding SMC to the Secure World
+ */
+int tf_power_management(struct tf_comm *comm,
+ enum TF_POWER_OPERATION operation)
+{
+ u32 status;
+ int error = 0;
+
+ dprintk(KERN_INFO "tf_power_management(%d)\n", operation);
+
+#ifdef CONFIG_TF_ZEBRA
+ if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
+ dprintk(KERN_INFO "tf_power_management(%p): "
+ "succeeded (not started)\n", comm);
+
+ return 0;
+ }
+#endif
+
+ status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
+ & TF_STATUS_POWER_STATE_MASK)
+ >> TF_STATUS_POWER_STATE_SHIFT);
+
+ switch (operation) {
+ case TF_POWER_OPERATION_SHUTDOWN:
+ switch (status) {
+ case TF_POWER_MODE_ACTIVE:
+ error = tf_pm_shutdown(comm);
+
+ if (error) {
+ dprintk(KERN_ERR "tf_power_management(): "
+ "Failed with error code 0x%08x\n",
+ error);
+ goto error;
+ }
+ break;
+
+ default:
+ goto not_allowed;
+ }
+ break;
+
+ case TF_POWER_OPERATION_HIBERNATE:
+ switch (status) {
+ case TF_POWER_MODE_ACTIVE:
+ error = tf_pm_hibernate(comm);
+
+ if (error) {
+ dprintk(KERN_ERR "tf_power_management(): "
+ "Failed with error code 0x%08x\n",
+ error);
+ goto error;
+ }
+ break;
+
+ default:
+ goto not_allowed;
+ }
+ break;
+
+ case TF_POWER_OPERATION_RESUME:
+ error = tf_pm_resume(comm);
+
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_power_management(): "
+ "Failed with error code 0x%08x\n",
+ error);
+ goto error;
+ }
+ break;
+ }
+
+ dprintk(KERN_INFO "tf_power_management(): succeeded\n");
+ return 0;
+
+not_allowed:
+ dprintk(KERN_ERR "tf_power_management(): "
+ "Power command not allowed in current "
+ "Secure World state %d\n", status);
+ error = -ENOTTY;
+error:
+ return error;
+}
+
diff --git a/security/tf_driver/scxlnx_comm.h b/security/tf_driver/tf_comm.h
index 24512a7bdd23..48bd93450a06 100644
--- a/security/tf_driver/scxlnx_comm.h
+++ b/security/tf_driver/tf_comm.h
@@ -1,5 +1,5 @@
-/*
- * Copyright (c) 2006-2010 Trusted Logic S.A.
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -17,45 +17,45 @@
* MA 02111-1307 USA
*/
-#ifndef __SCXLNX_COMM_H__
-#define __SCXLNX_COMM_H__
+#ifndef __TF_COMM_H__
+#define __TF_COMM_H__
-#include "scxlnx_defs.h"
-#include "scx_protocol.h"
+#include "tf_defs.h"
+#include "tf_protocol.h"
/*----------------------------------------------------------------------------
* Misc
*----------------------------------------------------------------------------*/
-void SCXLNXCommSetCurrentTime(struct SCXLNX_COMM *pComm);
+void tf_set_current_time(struct tf_comm *comm);
/*
* Atomic accesses to 32-bit variables in the L1 Shared buffer
*/
-static inline u32 SCXLNXCommReadReg32(const u32 *pCommBuffer)
+static inline u32 tf_read_reg32(const u32 *comm_buffer)
{
u32 result;
- __asm__ __volatile__("@ SCXLNXCommReadReg32\n"
+ __asm__ __volatile__("@ tf_read_reg32\n"
"ldrex %0, [%1]\n"
: "=&r" (result)
- : "r" (pCommBuffer)
+ : "r" (comm_buffer)
);
return result;
}
-static inline void SCXLNXCommWriteReg32(void *pCommBuffer, u32 nValue)
+static inline void tf_write_reg32(void *comm_buffer, u32 value)
{
u32 tmp;
- __asm__ __volatile__("@ SCXLNXCommWriteReg32\n"
+ __asm__ __volatile__("@ tf_write_reg32\n"
"1: ldrex %0, [%2]\n"
" strex %0, %1, [%2]\n"
" teq %0, #0\n"
" bne 1b"
: "=&r" (tmp)
- : "r" (nValue), "r" (pCommBuffer)
+ : "r" (value), "r" (comm_buffer)
: "cc"
);
}
@@ -63,30 +63,30 @@ static inline void SCXLNXCommWriteReg32(void *pCommBuffer, u32 nValue)
/*
* Atomic accesses to 64-bit variables in the L1 Shared buffer
*/
-static inline u64 SCXLNXCommReadReg64(void *pCommBuffer)
+static inline u64 tf_read_reg64(void *comm_buffer)
{
u64 result;
- __asm__ __volatile__("@ SCXLNXCommReadReg64\n"
+ __asm__ __volatile__("@ tf_read_reg64\n"
"ldrexd %0, [%1]\n"
: "=&r" (result)
- : "r" (pCommBuffer)
+ : "r" (comm_buffer)
);
return result;
}
-static inline void SCXLNXCommWriteReg64(void *pCommBuffer, u64 nValue)
+static inline void tf_write_reg64(void *comm_buffer, u64 value)
{
u64 tmp;
- __asm__ __volatile__("@ SCXLNXCommWriteReg64\n"
+ __asm__ __volatile__("@ tf_write_reg64\n"
"1: ldrexd %0, [%2]\n"
" strexd %0, %1, [%2]\n"
" teq %0, #0\n"
" bne 1b"
: "=&r" (tmp)
- : "r" (nValue), "r" (pCommBuffer)
+ : "r" (value), "r" (comm_buffer)
: "cc"
);
}
@@ -100,7 +100,7 @@ static inline void SCXLNXCommWriteReg64(void *pCommBuffer, u64 nValue)
#define RPC_YIELD 0x01 /* Yield RPC */
#define RPC_NON_YIELD 0x02 /* non-Yield RPC */
-int SCXLNXCommExecuteRPCCommand(struct SCXLNX_COMM *pComm);
+int tf_rpc_execute(struct tf_comm *comm);
/*----------------------------------------------------------------------------
* Shared memory related operations
@@ -114,41 +114,41 @@ int SCXLNXCommExecuteRPCCommand(struct SCXLNX_COMM *pComm);
#define DESCRIPTOR_V13_12_MASK (0x3 << PAGE_SHIFT)
#define DESCRIPTOR_V13_12_GET(a) ((a & DESCRIPTOR_V13_12_MASK) >> PAGE_SHIFT)
-struct SCXLNX_COARSE_PAGE_TABLE *SCXLNXAllocateCoarsePageTable(
- struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext,
- u32 nType);
+struct tf_coarse_page_table *tf_alloc_coarse_page_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ u32 type);
-void SCXLNXFreeCoarsePageTable(
- struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext,
- struct SCXLNX_COARSE_PAGE_TABLE *pCoarsePageTable,
- int nForce);
+void tf_free_coarse_page_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_coarse_page_table *coarse_pg_table,
+ int force);
-void SCXLNXInitializeCoarsePageTableAllocator(
- struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext);
+void tf_init_coarse_page_table_allocator(
+ struct tf_coarse_page_table_allocation_context *alloc_context);
-void SCXLNXReleaseCoarsePageTableAllocator(
- struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext);
+void tf_release_coarse_page_table_allocator(
+ struct tf_coarse_page_table_allocation_context *alloc_context);
-struct page *SCXLNXCommL2PageDescriptorToPage(u32 nL2PageDescriptor);
+struct page *tf_l2_page_descriptor_to_page(u32 l2_page_descriptor);
-u32 SCXLNXCommGetL2DescriptorCommon(u32 nVirtAddr, struct mm_struct *mm);
+u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm);
-void SCXLNXCommReleaseSharedMemory(
- struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext,
- struct SCXLNX_SHMEM_DESC *pShmemDesc,
- u32 nFullCleanup);
+void tf_cleanup_shared_memory(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_shmem_desc *shmem_desc,
+ u32 full_cleanup);
-int SCXLNXCommFillDescriptorTable(
- struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext,
- struct SCXLNX_SHMEM_DESC *pShmemDesc,
- u32 nBufferVAddr,
- struct vm_area_struct **ppVmas,
- u32 pDescriptors[SCX_MAX_COARSE_PAGES],
- u32 *pBufferSize,
- u32 *pBufferStartOffset,
- bool bInUserSpace,
- u32 nFlags,
- u32 *pnDescriptorCount);
+int tf_fill_descriptor_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_shmem_desc *shmem_desc,
+ u32 buffer,
+ struct vm_area_struct **vmas,
+ u32 descriptors[TF_MAX_COARSE_PAGES],
+ u32 buffer_size,
+ u32 *buffer_start_offset,
+ bool in_user_space,
+ u32 flags,
+ u32 *descriptor_count);
/*----------------------------------------------------------------------------
* Standard communication operations
@@ -156,13 +156,13 @@ int SCXLNXCommFillDescriptorTable(
#define STATUS_PENDING 0x00000001
-int tf_schedule_secure_world(struct SCXLNX_COMM *pComm, bool prepare_exit);
+int tf_schedule_secure_world(struct tf_comm *comm, bool prepare_exit);
-int SCXLNXCommSendReceive(
- struct SCXLNX_COMM *pComm,
- union SCX_COMMAND_MESSAGE *pMessage,
- union SCX_ANSWER_MESSAGE *pAnswer,
- struct SCXLNX_CONNECTION *pConn,
+int tf_send_receive(
+ struct tf_comm *comm,
+ union tf_command *command,
+ union tf_answer *answer,
+ struct tf_connection *connection,
bool bKillable);
@@ -172,33 +172,33 @@ int SCXLNXCommSendReceive(
* and is valid only once the communication has
* been initialized
**/
-u8 *SCXLNXCommGetDescription(struct SCXLNX_COMM *pComm);
+u8 *tf_get_description(struct tf_comm *comm);
/*----------------------------------------------------------------------------
* Power management
*----------------------------------------------------------------------------*/
-enum SCXLNX_POWER_OPERATION {
- SCXLNX_POWER_OPERATION_HIBERNATE = 1,
- SCXLNX_POWER_OPERATION_SHUTDOWN = 2,
- SCXLNX_POWER_OPERATION_RESUME = 3,
+enum TF_POWER_OPERATION {
+ TF_POWER_OPERATION_HIBERNATE = 1,
+ TF_POWER_OPERATION_SHUTDOWN = 2,
+ TF_POWER_OPERATION_RESUME = 3,
};
-int SCXLNXCommHibernate(struct SCXLNX_COMM *pComm);
-int SCXLNXCommResume(struct SCXLNX_COMM *pComm);
-int SCXLNXCommShutdown(struct SCXLNX_COMM *pComm);
+int tf_pm_hibernate(struct tf_comm *comm);
+int tf_pm_resume(struct tf_comm *comm);
+int tf_pm_shutdown(struct tf_comm *comm);
-int SCXLNXCommPowerManagement(struct SCXLNX_COMM *pComm,
- enum SCXLNX_POWER_OPERATION nOperation);
+int tf_power_management(struct tf_comm *comm,
+ enum TF_POWER_OPERATION operation);
/*----------------------------------------------------------------------------
* Communication initialization and termination
*----------------------------------------------------------------------------*/
-int SCXLNXCommInit(struct SCXLNX_COMM *pComm);
+int tf_init(struct tf_comm *comm);
-void SCXLNXCommTerminate(struct SCXLNX_COMM *pComm);
+void tf_terminate(struct tf_comm *comm);
-#endif /* __SCXLNX_COMM_H__ */
+#endif /* __TF_COMM_H__ */
diff --git a/security/tf_driver/tf_comm_tz.c b/security/tf_driver/tf_comm_tz.c
new file mode 100644
index 000000000000..2d359c2138ba
--- /dev/null
+++ b/security/tf_driver/tf_comm_tz.c
@@ -0,0 +1,885 @@
+/**
+ * Copyright (c) 2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/div64.h>
+#include <asm/system.h>
+#include <linux/version.h>
+#include <asm/cputype.h>
+#include <linux/interrupt.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/jiffies.h>
+
+#include "tf_defs.h"
+#include "tf_comm.h"
+#include "tf_protocol.h"
+#include "tf_util.h"
+#include "tf_conn.h"
+
+/*
+ * Structure common to all SMC operations
+ */
+struct tf_generic_smc {
+ u32 reg0;
+ u32 reg1;
+ u32 reg2;
+ u32 reg3;
+ u32 reg4;
+};
+
+/*----------------------------------------------------------------------------
+ * SMC operations
+ *----------------------------------------------------------------------------*/
+
+static inline void tf_smc_generic_call(
+ struct tf_generic_smc *generic_smc)
+{
+#ifdef CONFIG_SMP
+ long ret;
+ cpumask_t saved_cpu_mask;
+ cpumask_t local_cpu_mask = CPU_MASK_NONE;
+
+ cpu_set(0, local_cpu_mask);
+ sched_getaffinity(0, &saved_cpu_mask);
+ ret = sched_setaffinity(0, &local_cpu_mask);
+ if (ret != 0)
+ dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret);
+#endif
+
+ __asm__ volatile(
+ "mov r0, %2\n"
+ "mov r1, %3\n"
+ "mov r2, %4\n"
+ "mov r3, %5\n"
+ "mov r4, %6\n"
+ ".word 0xe1600070 @ SMC 0\n"
+ "mov %0, r0\n"
+ "mov %1, r1\n"
+ : "=r" (generic_smc->reg0), "=r" (generic_smc->reg1)
+ : "r" (generic_smc->reg0), "r" (generic_smc->reg1),
+ "r" (generic_smc->reg2), "r" (generic_smc->reg3),
+ "r" (generic_smc->reg4)
+ : "r0", "r1", "r2", "r3", "r4");
+
+#ifdef CONFIG_SMP
+ ret = sched_setaffinity(0, &saved_cpu_mask);
+ if (ret != 0)
+ dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret);
+#endif
+}
+
+/*
+ * Calls the get protocol version SMC.
+ * Fills the parameter pProtocolVersion with the version number returned by the
+ * SMC
+ */
+static inline void tf_smc_get_protocol_version(u32 *protocol_version)
+{
+ struct tf_generic_smc generic_smc;
+
+ generic_smc.reg0 = TF_SMC_GET_PROTOCOL_VERSION;
+ generic_smc.reg1 = 0;
+ generic_smc.reg2 = 0;
+ generic_smc.reg3 = 0;
+ generic_smc.reg4 = 0;
+
+ tf_smc_generic_call(&generic_smc);
+ *protocol_version = generic_smc.reg1;
+}
+
+
+/*
+ * Calls the init SMC with the specified parameters.
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static inline int tf_smc_init(u32 shared_page_descriptor)
+{
+ struct tf_generic_smc generic_smc;
+
+ generic_smc.reg0 = TF_SMC_INIT;
+ /* Descriptor for the layer 1 shared buffer */
+ generic_smc.reg1 = shared_page_descriptor;
+ generic_smc.reg2 = 0;
+ generic_smc.reg3 = 0;
+ generic_smc.reg4 = 0;
+
+ tf_smc_generic_call(&generic_smc);
+ if (generic_smc.reg0 != S_SUCCESS)
+ printk(KERN_ERR "tf_smc_init:"
+ " r0=0x%08X upon return (expected 0x%08X)!\n",
+ generic_smc.reg0,
+ S_SUCCESS);
+
+ return generic_smc.reg0;
+}
+
+
+/*
+ * Calls the reset irq SMC.
+ */
+static inline void tf_smc_reset_irq(void)
+{
+ struct tf_generic_smc generic_smc;
+
+ generic_smc.reg0 = TF_SMC_RESET_IRQ;
+ generic_smc.reg1 = 0;
+ generic_smc.reg2 = 0;
+ generic_smc.reg3 = 0;
+ generic_smc.reg4 = 0;
+
+ tf_smc_generic_call(&generic_smc);
+}
+
+
+/*
+ * Calls the WAKE_UP SMC.
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static inline int tf_smc_wake_up(u32 l1_shared_buffer_descriptor,
+ u32 shared_mem_start_offset,
+ u32 shared_mem_size)
+{
+ struct tf_generic_smc generic_smc;
+
+ generic_smc.reg0 = TF_SMC_WAKE_UP;
+ generic_smc.reg1 = shared_mem_start_offset;
+ /* long form command */
+ generic_smc.reg2 = shared_mem_size | 0x80000000;
+ generic_smc.reg3 = l1_shared_buffer_descriptor;
+ generic_smc.reg4 = 0;
+
+ tf_smc_generic_call(&generic_smc);
+
+ if (generic_smc.reg0 != S_SUCCESS)
+ printk(KERN_ERR "tf_smc_wake_up:"
+ " r0=0x%08X upon return (expected 0x%08X)!\n",
+ generic_smc.reg0,
+ S_SUCCESS);
+
+ return generic_smc.reg0;
+}
+
+/*
+ * Calls the N-Yield SMC.
+ */
+static inline void tf_smc_nyield(void)
+{
+ struct tf_generic_smc generic_smc;
+
+ generic_smc.reg0 = TF_SMC_N_YIELD;
+ generic_smc.reg1 = 0;
+ generic_smc.reg2 = 0;
+ generic_smc.reg3 = 0;
+ generic_smc.reg4 = 0;
+
+ tf_smc_generic_call(&generic_smc);
+}
+
+/* Yields the Secure World */
+int tf_schedule_secure_world(struct tf_comm *comm, bool prepare_exit)
+{
+ tf_set_current_time(comm);
+
+ /* yield to the Secure World */
+ tf_smc_nyield();
+
+ return 0;
+}
+
+/*
+ * Returns the L2 descriptor for the specified user page.
+ */
+
+#define L2_INIT_DESCRIPTOR_BASE (0x00000003)
+#define L2_INIT_DESCRIPTOR_V13_12_SHIFT (4)
+
+static u32 tf_get_l2init_descriptor(u32 vaddr)
+{
+ struct page *page;
+ u32 paddr;
+ u32 descriptor;
+
+ descriptor = L2_INIT_DESCRIPTOR_BASE;
+
+ /* get physical address and add to descriptor */
+ page = virt_to_page(vaddr);
+ paddr = page_to_phys(page);
+ descriptor |= (paddr & L2_DESCRIPTOR_ADDR_MASK);
+
+ /* Add virtual address v[13:12] bits to descriptor */
+ descriptor |= (DESCRIPTOR_V13_12_GET(vaddr)
+ << L2_INIT_DESCRIPTOR_V13_12_SHIFT);
+
+ descriptor |= tf_get_l2_descriptor_common(vaddr, &init_mm);
+
+
+ return descriptor;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Power management
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Free the memory used by the W3B buffer for the specified comm.
+ * This function does nothing if no W3B buffer is allocated for the device.
+ */
+static inline void tf_free_w3b(struct tf_comm *comm)
+{
+ tf_cleanup_shared_memory(
+ &(comm->w3b_cpt_alloc_context),
+ &(comm->w3b_shmem_desc),
+ 0);
+
+ tf_release_coarse_page_table_allocator(&(comm->w3b_cpt_alloc_context));
+
+ internal_vfree((void *)comm->w3b);
+ comm->w3b = 0;
+ comm->w3b_shmem_size = 0;
+ clear_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags));
+}
+
+
+/*
+ * Allocates the W3B buffer for the specified comm.
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static inline int tf_allocate_w3b(struct tf_comm *comm)
+{
+ int error;
+ u32 flags;
+ u32 config_flag_s;
+ u32 *w3b_descriptors;
+ u32 w3b_descriptor_count;
+ u32 w3b_current_size;
+
+ config_flag_s = tf_read_reg32(&comm->l1_buffer->config_flag_s);
+
+retry:
+ if ((test_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags))) == 0) {
+ /*
+ * Initialize the shared memory for the W3B
+ */
+ tf_init_coarse_page_table_allocator(
+ &comm->w3b_cpt_alloc_context);
+ } else {
+ /*
+ * The W3B is allocated but do we have to reallocate a bigger
+ * one?
+ */
+ /* Check H bit */
+ if ((config_flag_s & (1<<4)) != 0) {
+ /* The size of the W3B may change after SMC_INIT */
+ /* Read the current value */
+ w3b_current_size = tf_read_reg32(
+ &comm->l1_buffer->w3b_size_current_s);
+ if (comm->w3b_shmem_size > w3b_current_size)
+ return 0;
+
+ tf_free_w3b(comm);
+ goto retry;
+ } else {
+ return 0;
+ }
+ }
+
+ /* check H bit */
+ if ((config_flag_s & (1<<4)) != 0)
+ /* The size of the W3B may change after SMC_INIT */
+ /* Read the current value */
+ comm->w3b_shmem_size = tf_read_reg32(
+ &comm->l1_buffer->w3b_size_current_s);
+ else
+ comm->w3b_shmem_size = tf_read_reg32(
+ &comm->l1_buffer->w3b_size_max_s);
+
+ comm->w3b = (u32) internal_vmalloc(comm->w3b_shmem_size);
+ if (comm->w3b == 0) {
+ printk(KERN_ERR "tf_allocate_w3b():"
+ " Out of memory for W3B buffer (%u bytes)!\n",
+ (unsigned int)(comm->w3b_shmem_size));
+ error = -ENOMEM;
+ goto error;
+ }
+
+ /* initialize the w3b_shmem_desc structure */
+ comm->w3b_shmem_desc.type = TF_SHMEM_TYPE_PM_HIBERNATE;
+ INIT_LIST_HEAD(&(comm->w3b_shmem_desc.list));
+
+ flags = (TF_SHMEM_TYPE_READ | TF_SHMEM_TYPE_WRITE);
+
+ /* directly point to the L1 shared buffer W3B descriptors */
+ w3b_descriptors = comm->l1_buffer->w3b_descriptors;
+
+ /*
+ * tf_fill_descriptor_table uses the following parameter as an
+ * IN/OUT
+ */
+
+ error = tf_fill_descriptor_table(
+ &(comm->w3b_cpt_alloc_context),
+ &(comm->w3b_shmem_desc),
+ comm->w3b,
+ NULL,
+ w3b_descriptors,
+ comm->w3b_shmem_size,
+ &(comm->w3b_shmem_offset),
+ false,
+ flags,
+ &w3b_descriptor_count);
+ if (error != 0) {
+ printk(KERN_ERR "tf_allocate_w3b():"
+ " tf_fill_descriptor_table failed with "
+ "error code 0x%08x!\n",
+ error);
+ goto error;
+ }
+
+ set_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags));
+
+ /* successful completion */
+ return 0;
+
+error:
+ tf_free_w3b(comm);
+
+ return error;
+}
+
+/*
+ * Perform a Secure World shutdown operation.
+ * The routine does not return if the operation succeeds.
+ * the routine returns an appropriate error code if
+ * the operation fails.
+ */
+int tf_pm_shutdown(struct tf_comm *comm)
+{
+#ifdef CONFIG_TFN
+ /* this function is useless for the TEGRA product */
+ return 0;
+#else
+ int error;
+ union tf_command command;
+ union tf_answer answer;
+
+ dprintk(KERN_INFO "tf_pm_shutdown()\n");
+
+ memset(&command, 0, sizeof(command));
+
+ command.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
+ command.header.message_size =
+ (sizeof(struct tf_command_management) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+
+ command.management.command = TF_MANAGEMENT_SHUTDOWN;
+
+ error = tf_send_receive(
+ comm,
+ &command,
+ &answer,
+ NULL,
+ false);
+
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_pm_shutdown(): "
+ "tf_send_receive failed (error %d)!\n",
+ error);
+ return error;
+ }
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+ if (answer.header.error_code != 0)
+ dprintk(KERN_ERR "tf_driver: shutdown failed.\n");
+ else
+ dprintk(KERN_INFO "tf_driver: shutdown succeeded.\n");
+#endif
+
+ return answer.header.error_code;
+#endif
+}
+
+
+/*
+ * Perform a Secure World hibernate operation.
+ * The routine does not return if the operation succeeds.
+ * the routine returns an appropriate error code if
+ * the operation fails.
+ */
+int tf_pm_hibernate(struct tf_comm *comm)
+{
+#ifdef CONFIG_TFN
+ /* this function is useless for the TEGRA product */
+ return 0;
+#else
+ int error;
+ union tf_command command;
+ union tf_answer answer;
+ u32 first_command;
+ u32 first_free_command;
+
+ dprintk(KERN_INFO "tf_pm_hibernate()\n");
+
+ error = tf_allocate_w3b(comm);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_pm_hibernate(): "
+ "tf_allocate_w3b failed (error %d)!\n",
+ error);
+ return error;
+ }
+
+ /*
+ * As the polling thread is already hibernating, we
+ * should send the message and receive the answer ourself
+ */
+
+ /* build the "prepare to hibernate" message */
+ command.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
+ command.management.command = TF_MANAGEMENT_HIBERNATE;
+ /* Long Form Command */
+ command.management.shared_mem_descriptors[0] = 0;
+ command.management.shared_mem_descriptors[1] = 0;
+ command.management.w3b_size =
+ comm->w3b_shmem_size | 0x80000000;
+ command.management.w3b_start_offset =
+ comm->w3b_shmem_offset;
+ command.header.operation_id = (u32) &answer;
+
+ tf_dump_command(&command);
+
+ /* find a slot to send the message in */
+
+ /* AFY: why not use the function tf_send_receive?? We are
+ * duplicating a lot of subtle code here. And it's not going to be
+ * tested because power management is currently not supported by the
+ * secure world. */
+ for (;;) {
+ int queue_words_count, command_size;
+
+ spin_lock(&(comm->lock));
+
+ first_command = tf_read_reg32(
+ &comm->l1_buffer->first_command);
+ first_free_command = tf_read_reg32(
+ &comm->l1_buffer->first_free_command);
+
+ queue_words_count = first_free_command - first_command;
+ command_size = command.header.message_size
+ + sizeof(struct tf_command_header);
+ if ((queue_words_count + command_size) <
+ TF_N_MESSAGE_QUEUE_CAPACITY) {
+ /* Command queue is not full */
+ memcpy(&comm->l1_buffer->command_queue[
+ first_free_command %
+ TF_N_MESSAGE_QUEUE_CAPACITY],
+ &command,
+ command_size * sizeof(u32));
+
+ tf_write_reg32(&comm->l1_buffer->first_free_command,
+ first_free_command + command_size);
+
+ spin_unlock(&(comm->lock));
+ break;
+ }
+
+ spin_unlock(&(comm->lock));
+ (void)tf_schedule_secure_world(comm, false);
+ }
+
+ /* now wait for the answer, dispatching other answers */
+ while (1) {
+ u32 first_answer;
+ u32 first_free_answer;
+
+ /* check all the answers */
+ first_free_answer = tf_read_reg32(
+ &comm->l1_buffer->first_free_answer);
+ first_answer = tf_read_reg32(
+ &comm->l1_buffer->first_answer);
+
+ if (first_answer != first_free_answer) {
+ int bFoundAnswer = 0;
+
+ do {
+ /* answer queue not empty */
+ union tf_answer tmp_answer;
+ struct tf_answer_header header;
+ /* size of the command in words of 32bit */
+ int command_size;
+
+ /* get the message_size */
+ memcpy(&header,
+ &comm->l1_buffer->answer_queue[
+ first_answer %
+ TF_S_ANSWER_QUEUE_CAPACITY],
+ sizeof(struct tf_answer_header));
+ command_size = header.message_size +
+ sizeof(struct tf_answer_header);
+
+ /*
+ * NOTE: message_size is the number of words
+ * following the first word
+ */
+ memcpy(&tmp_answer,
+ &comm->l1_buffer->answer_queue[
+ first_answer %
+ TF_S_ANSWER_QUEUE_CAPACITY],
+ command_size * sizeof(u32));
+
+ tf_dump_answer(&tmp_answer);
+
+ if (tmp_answer.header.operation_id ==
+ (u32) &answer) {
+ /*
+ * this is the answer to the "prepare to
+ * hibernate" message
+ */
+ memcpy(&answer,
+ &tmp_answer,
+ command_size * sizeof(u32));
+
+ bFoundAnswer = 1;
+ tf_write_reg32(
+ &comm->l1_buffer->first_answer,
+ first_answer + command_size);
+ break;
+ } else {
+ /*
+ * this is a standard message answer,
+ * dispatch it
+ */
+ struct tf_answer_struct
+ *answerStructure;
+
+ answerStructure =
+ (struct tf_answer_struct *)
+ tmp_answer.header.operation_id;
+
+ memcpy(answerStructure->answer,
+ &tmp_answer,
+ command_size * sizeof(u32));
+
+ answerStructure->answer_copied = true;
+ }
+
+ tf_write_reg32(
+ &comm->l1_buffer->first_answer,
+ first_answer + command_size);
+ } while (first_answer != first_free_answer);
+
+ if (bFoundAnswer)
+ break;
+ }
+
+ /*
+ * since the Secure World is at least running the "prepare to
+ * hibernate" message, its timeout must be immediate So there is
+ * no need to check its timeout and schedule() the current
+ * thread
+ */
+ (void)tf_schedule_secure_world(comm, false);
+ } /* while (1) */
+
+ printk(KERN_INFO "tf_driver: hibernate.\n");
+ return 0;
+#endif
+}
+
+
+/*
+ * Perform a Secure World resume operation.
+ * The routine returns once the Secure World is active again
+ * or if an error occurs during the "resume" process
+ */
+int tf_pm_resume(struct tf_comm *comm)
+{
+#ifdef CONFIG_TFN
+ /* this function is useless for the TEGRA product */
+ return 0;
+#else
+ int error;
+ u32 status;
+
+ dprintk(KERN_INFO "tf_pm_resume()\n");
+
+ error = tf_smc_wake_up(
+ tf_get_l2init_descriptor((u32)comm->l1_buffer),
+ comm->w3b_shmem_offset,
+ comm->w3b_shmem_size);
+
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_pm_resume(): "
+ "tf_smc_wake_up failed (error %d)!\n",
+ error);
+ return error;
+ }
+
+ status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
+ & TF_STATUS_POWER_STATE_MASK)
+ >> TF_STATUS_POWER_STATE_SHIFT);
+
+ while ((status != TF_POWER_MODE_ACTIVE)
+ && (status != TF_POWER_MODE_PANIC)) {
+ tf_smc_nyield();
+
+ status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
+ & TF_STATUS_POWER_STATE_MASK)
+ >> TF_STATUS_POWER_STATE_SHIFT);
+
+ /*
+ * As this may last quite a while, call the kernel scheduler to
+ * hand over CPU for other operations
+ */
+ schedule();
+ }
+
+ switch (status) {
+ case TF_POWER_MODE_ACTIVE:
+ break;
+
+ case TF_POWER_MODE_PANIC:
+ dprintk(KERN_ERR "tf_pm_resume(): "
+ "Secure World POWER_MODE_PANIC!\n");
+ return -EINVAL;
+
+ default:
+ dprintk(KERN_ERR "tf_pm_resume(): "
+ "unexpected Secure World POWER_MODE (%d)!\n", status);
+ return -EINVAL;
+ }
+
+ dprintk(KERN_INFO "tf_pm_resume() succeeded\n");
+ return 0;
+#endif
+}
+
+/*----------------------------------------------------------------------------
+ * Communication initialization and termination
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Handles the software interrupts issued by the Secure World.
+ */
+static irqreturn_t tf_soft_int_handler(int irq, void *dev_id)
+{
+ struct tf_comm *comm = (struct tf_comm *) dev_id;
+
+ if (comm->l1_buffer == NULL)
+ return IRQ_NONE;
+
+ if ((tf_read_reg32(&comm->l1_buffer->status_s) &
+ TF_STATUS_P_MASK) == 0)
+ /* interrupt not issued by the Trusted Foundations Software */
+ return IRQ_NONE;
+
+ tf_smc_reset_irq();
+
+ /* signal N_SM_EVENT */
+ wake_up(&comm->wait_queue);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Initializes the communication with the Secure World.
+ * The L1 shared buffer is allocated and the Secure World
+ * is yielded for the first time.
+ * returns successfuly once the communication with
+ * the Secure World is up and running
+ *
+ * Returns 0 upon success or appropriate error code
+ * upon failure
+ */
+int tf_init(struct tf_comm *comm)
+{
+ int error;
+ struct page *buffer_page;
+ u32 protocol_version;
+
+ dprintk(KERN_INFO "tf_init()\n");
+
+ spin_lock_init(&(comm->lock));
+ comm->flags = 0;
+ comm->l1_buffer = NULL;
+ init_waitqueue_head(&(comm->wait_queue));
+
+ /*
+ * Check the Secure World protocol version is the expected one.
+ */
+ tf_smc_get_protocol_version(&protocol_version);
+
+ if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version))
+ != TF_S_PROTOCOL_MAJOR_VERSION) {
+ printk(KERN_ERR "tf_init():"
+ " Unsupported Secure World Major Version "
+ "(0x%02X, expected 0x%02X)!\n",
+ GET_PROTOCOL_MAJOR_VERSION(protocol_version),
+ TF_S_PROTOCOL_MAJOR_VERSION);
+ error = -EIO;
+ goto error;
+ }
+
+ /*
+ * Register the software interrupt handler if required to.
+ */
+ if (comm->soft_int_irq != -1) {
+ dprintk(KERN_INFO "tf_init(): "
+ "Registering software interrupt handler (IRQ %d)\n",
+ comm->soft_int_irq);
+
+ error = request_irq(comm->soft_int_irq,
+ tf_soft_int_handler,
+ IRQF_SHARED,
+ TF_DEVICE_BASE_NAME,
+ comm);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_init(): "
+ "request_irq failed for irq %d (error %d)\n",
+ comm->soft_int_irq, error);
+ goto error;
+ }
+ set_bit(TF_COMM_FLAG_IRQ_REQUESTED, &(comm->flags));
+ }
+
+ /*
+ * Allocate and initialize the L1 shared buffer.
+ */
+ comm->l1_buffer = (void *) internal_get_zeroed_page(GFP_KERNEL);
+ if (comm->l1_buffer == NULL) {
+ printk(KERN_ERR "tf_init():"
+ " get_zeroed_page failed for L1 shared buffer!\n");
+ error = -ENOMEM;
+ goto error;
+ }
+
+ /*
+ * Ensure the page storing the L1 shared buffer is mapped.
+ */
+ buffer_page = virt_to_page(comm->l1_buffer);
+ trylock_page(buffer_page);
+
+ dprintk(KERN_INFO "tf_init(): "
+ "L1 shared buffer allocated at virtual:%p, "
+ "physical:%p (page:%p)\n",
+ comm->l1_buffer,
+ (void *)virt_to_phys(comm->l1_buffer),
+ buffer_page);
+
+ set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags));
+
+ /*
+ * Init SMC
+ */
+ error = tf_smc_init(
+ tf_get_l2init_descriptor((u32)comm->l1_buffer));
+ if (error != S_SUCCESS) {
+ dprintk(KERN_ERR "tf_init(): "
+ "tf_smc_init failed (error 0x%08X)!\n",
+ error);
+ goto error;
+ }
+
+ /*
+ * check whether the interrupts are actually enabled
+ * If not, remove irq handler
+ */
+ if ((tf_read_reg32(&comm->l1_buffer->config_flag_s) &
+ TF_CONFIG_FLAG_S) == 0) {
+ if (test_and_clear_bit(TF_COMM_FLAG_IRQ_REQUESTED,
+ &(comm->flags)) != 0) {
+ dprintk(KERN_INFO "tf_init(): "
+ "Interrupts not used, unregistering "
+ "softint (IRQ %d)\n",
+ comm->soft_int_irq);
+
+ free_irq(comm->soft_int_irq, comm);
+ }
+ } else {
+ if (test_bit(TF_COMM_FLAG_IRQ_REQUESTED,
+ &(comm->flags)) == 0) {
+ /*
+ * Interrupts are enabled in the Secure World, but not
+ * handled by driver
+ */
+ dprintk(KERN_ERR "tf_init(): "
+ "soft_interrupt argument not provided\n");
+ error = -EINVAL;
+ goto error;
+ }
+ }
+
+ /*
+ * Successful completion.
+ */
+
+ /* yield for the first time */
+ (void)tf_schedule_secure_world(comm, false);
+
+ dprintk(KERN_INFO "tf_init(): Success\n");
+ return S_SUCCESS;
+
+error:
+ /*
+ * Error handling.
+ */
+ dprintk(KERN_INFO "tf_init(): Failure (error %d)\n",
+ error);
+ tf_terminate(comm);
+ return error;
+}
+
+
+/*
+ * Attempt to terminate the communication with the Secure World.
+ * The L1 shared buffer is freed.
+ * Calling this routine terminates definitaly the communication
+ * with the Secure World : there is no way to inform the Secure World of a new
+ * L1 shared buffer to be used once it has been initialized.
+ */
+void tf_terminate(struct tf_comm *comm)
+{
+ dprintk(KERN_INFO "tf_terminate()\n");
+
+ set_bit(TF_COMM_FLAG_TERMINATING, &(comm->flags));
+
+ if ((test_bit(TF_COMM_FLAG_W3B_ALLOCATED,
+ &(comm->flags))) != 0) {
+ dprintk(KERN_INFO "tf_terminate(): "
+ "Freeing the W3B buffer...\n");
+ tf_free_w3b(comm);
+ }
+
+ if ((test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED,
+ &(comm->flags))) != 0) {
+ __clear_page_locked(virt_to_page(comm->l1_buffer));
+ internal_free_page((unsigned long) comm->l1_buffer);
+ }
+
+ if ((test_bit(TF_COMM_FLAG_IRQ_REQUESTED,
+ &(comm->flags))) != 0) {
+ dprintk(KERN_INFO "tf_terminate(): "
+ "Unregistering softint (IRQ %d)\n",
+ comm->soft_int_irq);
+ free_irq(comm->soft_int_irq, comm);
+ }
+}
diff --git a/security/tf_driver/tf_conn.c b/security/tf_driver/tf_conn.c
new file mode 100644
index 000000000000..48f0cb777998
--- /dev/null
+++ b/security/tf_driver/tf_conn.c
@@ -0,0 +1,1566 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+#include "s_version.h"
+
+#include "tf_protocol.h"
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_comm.h"
+#include "tf_conn.h"
+
+#ifdef CONFIG_TF_ZEBRA
+#include "tf_crypto.h"
+#endif
+
+/*----------------------------------------------------------------------------
+ * Management of the shared memory blocks.
+ *
+ * Shared memory blocks are the blocks registered through
+ * the commands REGISTER_SHARED_MEMORY and POWER_MANAGEMENT
+ *----------------------------------------------------------------------------*/
+
+/**
+ * Unmaps a shared memory
+ **/
+void tf_unmap_shmem(
+ struct tf_connection *connection,
+ struct tf_shmem_desc *shmem_desc,
+ u32 full_cleanup)
+{
+ /* check shmem_desc contains a descriptor */
+ if (shmem_desc == NULL)
+ return;
+
+ dprintk(KERN_DEBUG "tf_unmap_shmem(%p)\n", shmem_desc);
+
+retry:
+ mutex_lock(&(connection->shmem_mutex));
+ if (atomic_read(&shmem_desc->ref_count) > 1) {
+ /*
+ * Shared mem still in use, wait for other operations completion
+ * before actually unmapping it.
+ */
+ dprintk(KERN_INFO "Descriptor in use\n");
+ mutex_unlock(&(connection->shmem_mutex));
+ schedule();
+ goto retry;
+ }
+
+ tf_cleanup_shared_memory(
+ &(connection->cpt_alloc_context),
+ shmem_desc,
+ full_cleanup);
+
+ list_del(&(shmem_desc->list));
+
+ if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
+ (full_cleanup != 0)) {
+ internal_kfree(shmem_desc);
+
+ atomic_dec(&(connection->shmem_count));
+ } else {
+ /*
+ * This is a preallocated shared memory, add to free list
+ * Since the device context is unmapped last, it is
+ * always the first element of the free list if no
+ * device context has been created
+ */
+ shmem_desc->block_identifier = 0;
+ list_add(&(shmem_desc->list), &(connection->free_shmem_list));
+ }
+
+ mutex_unlock(&(connection->shmem_mutex));
+}
+
+
+/**
+ * Find the first available slot for a new block of shared memory
+ * and map the user buffer.
+ * Update the descriptors to L1 descriptors
+ * Update the buffer_start_offset and buffer_size fields
+ * shmem_desc is updated to the mapped shared memory descriptor
+ **/
+int tf_map_shmem(
+ struct tf_connection *connection,
+ u32 buffer,
+ /* flags for read-write access rights on the memory */
+ u32 flags,
+ bool in_user_space,
+ u32 descriptors[TF_MAX_COARSE_PAGES],
+ u32 *buffer_start_offset,
+ u32 buffer_size,
+ struct tf_shmem_desc **shmem_desc,
+ u32 *descriptor_count)
+{
+ struct tf_shmem_desc *desc = NULL;
+ int error;
+
+ dprintk(KERN_INFO "tf_map_shmem(%p, %p, flags = 0x%08x)\n",
+ connection,
+ (void *) buffer,
+ flags);
+
+ mutex_lock(&(connection->shmem_mutex));
+
+ /*
+ * Check the list of free shared memory
+ * is not empty
+ */
+ if (list_empty(&(connection->free_shmem_list))) {
+ if (atomic_read(&(connection->shmem_count)) ==
+ TF_SHMEM_MAX_COUNT) {
+ printk(KERN_ERR "tf_map_shmem(%p):"
+ " maximum shared memories already registered\n",
+ connection);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ /* no descriptor available, allocate a new one */
+
+ desc = (struct tf_shmem_desc *) internal_kmalloc(
+ sizeof(*desc), GFP_KERNEL);
+ if (desc == NULL) {
+ printk(KERN_ERR "tf_map_shmem(%p):"
+ " failed to allocate descriptor\n",
+ connection);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ /* Initialize the structure */
+ desc->type = TF_SHMEM_TYPE_REGISTERED_SHMEM;
+ atomic_set(&desc->ref_count, 1);
+ INIT_LIST_HEAD(&(desc->list));
+
+ atomic_inc(&(connection->shmem_count));
+ } else {
+ /* take the first free shared memory descriptor */
+ desc = list_first_entry(&(connection->free_shmem_list),
+ struct tf_shmem_desc, list);
+ list_del(&(desc->list));
+ }
+
+ /* Add the descriptor to the used list */
+ list_add(&(desc->list), &(connection->used_shmem_list));
+
+ error = tf_fill_descriptor_table(
+ &(connection->cpt_alloc_context),
+ desc,
+ buffer,
+ connection->vmas,
+ descriptors,
+ buffer_size,
+ buffer_start_offset,
+ in_user_space,
+ flags,
+ descriptor_count);
+
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_map_shmem(%p):"
+ " tf_fill_descriptor_table failed with error "
+ "code %d!\n",
+ connection,
+ error);
+ goto error;
+ }
+ desc->client_buffer = (u8 *) buffer;
+
+ /*
+ * Successful completion.
+ */
+ *shmem_desc = desc;
+ mutex_unlock(&(connection->shmem_mutex));
+ dprintk(KERN_DEBUG "tf_map_shmem: success\n");
+ return 0;
+
+
+ /*
+ * Error handling.
+ */
+error:
+ mutex_unlock(&(connection->shmem_mutex));
+ dprintk(KERN_ERR "tf_map_shmem: failure with error code %d\n",
+ error);
+
+ tf_unmap_shmem(
+ connection,
+ desc,
+ 0);
+
+ return error;
+}
+
+
+
+/* This function is a copy of the find_vma() function
+in linux kernel 2.6.15 version with some fixes :
+ - memory block may end on vm_end
+ - check the full memory block is in the memory area
+ - guarantee NULL is returned if no memory area is found */
+struct vm_area_struct *tf_find_vma(struct mm_struct *mm,
+ unsigned long addr, unsigned long size)
+{
+ struct vm_area_struct *vma = NULL;
+
+ dprintk(KERN_INFO
+ "tf_find_vma addr=0x%lX size=0x%lX\n", addr, size);
+
+ if (mm) {
+ /* Check the cache first. */
+ /* (Cache hit rate is typically around 35%.) */
+ vma = mm->mmap_cache;
+ if (!(vma && vma->vm_end >= (addr+size) &&
+ vma->vm_start <= addr)) {
+ struct rb_node *rb_node;
+
+ rb_node = mm->mm_rb.rb_node;
+ vma = NULL;
+
+ while (rb_node) {
+ struct vm_area_struct *vma_tmp;
+
+ vma_tmp = rb_entry(rb_node,
+ struct vm_area_struct, vm_rb);
+
+ dprintk(KERN_INFO
+ "vma_tmp->vm_start=0x%lX"
+ "vma_tmp->vm_end=0x%lX\n",
+ vma_tmp->vm_start,
+ vma_tmp->vm_end);
+
+ if (vma_tmp->vm_end >= (addr+size)) {
+ vma = vma_tmp;
+ if (vma_tmp->vm_start <= addr)
+ break;
+
+ rb_node = rb_node->rb_left;
+ } else {
+ rb_node = rb_node->rb_right;
+ }
+ }
+
+ if (vma)
+ mm->mmap_cache = vma;
+ if (rb_node == NULL)
+ vma = NULL;
+ }
+ }
+ return vma;
+}
+
+int tf_validate_shmem_and_flags(
+ u32 shmem,
+ u32 shmem_size,
+ u32 flags)
+{
+ struct vm_area_struct *vma;
+ u32 chunk;
+
+ if (shmem_size == 0)
+ /* This is always valid */
+ return 0;
+
+ if ((shmem + shmem_size) < shmem)
+ /* Overflow */
+ return -EINVAL;
+
+ down_read(&current->mm->mmap_sem);
+
+ /*
+ * When looking for a memory address, split buffer into chunks of
+ * size=PAGE_SIZE.
+ */
+ chunk = PAGE_SIZE - (shmem & (PAGE_SIZE-1));
+ if (chunk > shmem_size)
+ chunk = shmem_size;
+
+ do {
+ vma = tf_find_vma(current->mm, shmem, chunk);
+
+ if (vma == NULL) {
+ dprintk(KERN_ERR "%s: area not found\n", __func__);
+ goto error;
+ }
+
+ if (flags & TF_SHMEM_TYPE_READ)
+ if (!(vma->vm_flags & VM_READ)) {
+ dprintk(KERN_ERR "%s: no read permission\n",
+ __func__);
+ goto error;
+ }
+ if (flags & TF_SHMEM_TYPE_WRITE)
+ if (!(vma->vm_flags & VM_WRITE)) {
+ dprintk(KERN_ERR "%s: no write permission\n",
+ __func__);
+ goto error;
+ }
+
+ shmem_size -= chunk;
+ shmem += chunk;
+ chunk = (shmem_size <= PAGE_SIZE ?
+ shmem_size : PAGE_SIZE);
+ } while (shmem_size != 0);
+
+ up_read(&current->mm->mmap_sem);
+ return 0;
+
+error:
+ up_read(&current->mm->mmap_sem);
+ return -EFAULT;
+}
+
+
+static int tf_map_temp_shmem(struct tf_connection *connection,
+ struct tf_command_param_temp_memref *temp_memref,
+ u32 param_type,
+ struct tf_shmem_desc **shmem_desc)
+{
+ u32 flags;
+ u32 error = S_SUCCESS;
+ bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL;
+
+ dprintk(KERN_INFO "tf_map_temp_shmem(%p, "
+ "0x%08x[size=0x%08x], offset=0x%08x)\n",
+ connection,
+ temp_memref->descriptor,
+ temp_memref->size,
+ temp_memref->offset);
+
+ switch (param_type) {
+ case TF_PARAM_TYPE_MEMREF_TEMP_INPUT:
+ flags = TF_SHMEM_TYPE_READ;
+ break;
+ case TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT:
+ flags = TF_SHMEM_TYPE_WRITE;
+ break;
+ case TF_PARAM_TYPE_MEMREF_TEMP_INOUT:
+ flags = TF_SHMEM_TYPE_WRITE | TF_SHMEM_TYPE_READ;
+ break;
+ default:
+ error = -EINVAL;
+ goto error;
+ }
+
+ if (temp_memref->descriptor == 0) {
+ /* NULL tmpref */
+ temp_memref->offset = 0;
+ *shmem_desc = NULL;
+ } else if ((temp_memref->descriptor != 0) &&
+ (temp_memref->size == 0)) {
+ /* Empty tmpref */
+ temp_memref->offset = temp_memref->descriptor;
+ temp_memref->descriptor = 0;
+ temp_memref->size = 0;
+ *shmem_desc = NULL;
+ } else {
+ /* Map the temp shmem block */
+
+ u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
+ u32 descriptor_count;
+
+ if (in_user_space) {
+ error = tf_validate_shmem_and_flags(
+ temp_memref->descriptor,
+ temp_memref->size,
+ flags);
+ if (error != 0)
+ goto error;
+ }
+
+ error = tf_map_shmem(
+ connection,
+ temp_memref->descriptor,
+ flags,
+ in_user_space,
+ shared_mem_descriptors,
+ &(temp_memref->offset),
+ temp_memref->size,
+ shmem_desc,
+ &descriptor_count);
+ temp_memref->descriptor = shared_mem_descriptors[0];
+ }
+
+error:
+ return error;
+}
+
+/*
+ * Clean up a list of shared memory descriptors.
+ */
+static void tf_shared_memory_cleanup_list(
+ struct tf_connection *connection,
+ struct list_head *shmem_desc_list)
+{
+ while (!list_empty(shmem_desc_list)) {
+ struct tf_shmem_desc *shmem_desc;
+
+ shmem_desc = list_first_entry(shmem_desc_list,
+ struct tf_shmem_desc, list);
+
+ tf_unmap_shmem(connection, shmem_desc, 1);
+ }
+}
+
+
+/*
+ * Clean up the shared memory information in the connection.
+ * Releases all allocated pages.
+ */
+static void tf_cleanup_shared_memories(struct tf_connection *connection)
+{
+ /* clean up the list of used and free descriptors.
+ * done outside the mutex, because tf_unmap_shmem already
+ * mutex()ed
+ */
+ tf_shared_memory_cleanup_list(connection,
+ &connection->used_shmem_list);
+ tf_shared_memory_cleanup_list(connection,
+ &connection->free_shmem_list);
+
+ mutex_lock(&(connection->shmem_mutex));
+
+ /* Free the Vmas page */
+ if (connection->vmas) {
+ internal_free_page((unsigned long) connection->vmas);
+ connection->vmas = NULL;
+ }
+
+ tf_release_coarse_page_table_allocator(
+ &(connection->cpt_alloc_context));
+
+ mutex_unlock(&(connection->shmem_mutex));
+}
+
+
+/*
+ * Initialize the shared memory in a connection.
+ * Allocates the minimum memory to be provided
+ * for shared memory management
+ */
+int tf_init_shared_memory(struct tf_connection *connection)
+{
+ int error;
+ int i;
+ int coarse_page_index;
+
+ /*
+ * We only need to initialize special elements and attempt to allocate
+ * the minimum shared memory descriptors we want to support
+ */
+
+ mutex_init(&(connection->shmem_mutex));
+ INIT_LIST_HEAD(&(connection->free_shmem_list));
+ INIT_LIST_HEAD(&(connection->used_shmem_list));
+ atomic_set(&(connection->shmem_count), 0);
+
+ tf_init_coarse_page_table_allocator(
+ &(connection->cpt_alloc_context));
+
+
+ /*
+ * Preallocate 3 pages to increase the chances that a connection
+ * succeeds in allocating shared mem
+ */
+ for (i = 0;
+ i < 3;
+ i++) {
+ struct tf_shmem_desc *shmem_desc =
+ (struct tf_shmem_desc *) internal_kmalloc(
+ sizeof(*shmem_desc), GFP_KERNEL);
+
+ if (shmem_desc == NULL) {
+ printk(KERN_ERR "tf_init_shared_memory(%p):"
+ " failed to pre allocate descriptor %d\n",
+ connection,
+ i);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ for (coarse_page_index = 0;
+ coarse_page_index < TF_MAX_COARSE_PAGES;
+ coarse_page_index++) {
+ struct tf_coarse_page_table *coarse_pg_table;
+
+ coarse_pg_table = tf_alloc_coarse_page_table(
+ &(connection->cpt_alloc_context),
+ TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED);
+
+ if (coarse_pg_table == NULL) {
+ printk(KERN_ERR "tf_init_shared_memory(%p)"
+ ": descriptor %d coarse page %d - "
+ "tf_alloc_coarse_page_table() "
+ "failed\n",
+ connection,
+ i,
+ coarse_page_index);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ shmem_desc->coarse_pg_table[coarse_page_index] =
+ coarse_pg_table;
+ }
+ shmem_desc->coarse_pg_table_count = 0;
+
+ shmem_desc->type = TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM;
+ atomic_set(&shmem_desc->ref_count, 1);
+
+ /*
+ * add this preallocated descriptor to the list of free
+ * descriptors Keep the device context specific one at the
+ * beginning of the list
+ */
+ INIT_LIST_HEAD(&(shmem_desc->list));
+ list_add_tail(&(shmem_desc->list),
+ &(connection->free_shmem_list));
+ }
+
+ /* allocate memory for the vmas structure */
+ connection->vmas =
+ (struct vm_area_struct **) internal_get_zeroed_page(GFP_KERNEL);
+ if (connection->vmas == NULL) {
+ printk(KERN_ERR "tf_init_shared_memory(%p):"
+ " vmas - failed to get_zeroed_page\n",
+ connection);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ return 0;
+
+error:
+ tf_cleanup_shared_memories(connection);
+ return error;
+}
+
+/*----------------------------------------------------------------------------
+ * Connection operations to the Secure World
+ *----------------------------------------------------------------------------*/
+
+int tf_create_device_context(
+ struct tf_connection *connection)
+{
+ union tf_command command;
+ union tf_answer answer;
+ int error = 0;
+
+ dprintk(KERN_INFO "tf_create_device_context(%p)\n",
+ connection);
+
+ command.create_device_context.message_type =
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT;
+ command.create_device_context.message_size =
+ (sizeof(struct tf_command_create_device_context)
+ - sizeof(struct tf_command_header))/sizeof(u32);
+ command.create_device_context.operation_id = (u32) &answer;
+ command.create_device_context.device_context_id = (u32) connection;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ &command,
+ &answer,
+ connection,
+ true);
+
+ if ((error != 0) ||
+ (answer.create_device_context.error_code != S_SUCCESS))
+ goto error;
+
+ /*
+ * CREATE_DEVICE_CONTEXT succeeded,
+ * store device context handler and update connection status
+ */
+ connection->device_context =
+ answer.create_device_context.device_context;
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_create_device_context(%p):"
+ " device_context=0x%08x\n",
+ connection,
+ answer.create_device_context.device_context);
+ return 0;
+
+error:
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_create_device_context failed with "
+ "error %d\n", error);
+ } else {
+ /*
+ * We sent a DeviceCreateContext. The state is now
+ * TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT It has to be
+ * reset if we ever want to send a DeviceCreateContext again
+ */
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+ dprintk(KERN_ERR "tf_create_device_context failed with "
+ "error_code 0x%08X\n",
+ answer.create_device_context.error_code);
+ if (answer.create_device_context.error_code ==
+ S_ERROR_OUT_OF_MEMORY)
+ error = -ENOMEM;
+ else
+ error = -EFAULT;
+ }
+
+ return error;
+}
+
+/* Check that the current application belongs to the
+ * requested GID */
+static bool tf_check_gid(gid_t requested_gid)
+{
+ if (requested_gid == current_egid()) {
+ return true;
+ } else {
+ u32 size;
+ u32 i;
+ /* Look in the supplementary GIDs */
+ get_group_info(GROUP_INFO);
+ size = GROUP_INFO->ngroups;
+ for (i = 0; i < size; i++)
+ if (requested_gid == GROUP_AT(GROUP_INFO , i))
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Opens a client session to the Secure World
+ */
+int tf_open_client_session(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+ struct tf_shmem_desc *shmem_desc[4] = {NULL};
+ u32 i;
+
+ dprintk(KERN_INFO "tf_open_client_session(%p)\n", connection);
+
+ /*
+ * Initialize the message size with no login data. This will be later
+ * adjusted the the cases below
+ */
+ command->open_client_session.message_size =
+ (sizeof(struct tf_command_open_client_session) - 20
+ - sizeof(struct tf_command_header))/4;
+
+ switch (command->open_client_session.login_type) {
+ case TF_LOGIN_PUBLIC:
+ /* Nothing to do */
+ break;
+
+ case TF_LOGIN_USER:
+ /*
+ * Send the EUID of the calling application in the login data.
+ * Update message size.
+ */
+ *(u32 *) &command->open_client_session.login_data =
+ current_euid();
+#ifndef CONFIG_ANDROID
+ command->open_client_session.login_type =
+ (u32) TF_LOGIN_USER_LINUX_EUID;
+#else
+ command->open_client_session.login_type =
+ (u32) TF_LOGIN_USER_ANDROID_EUID;
+#endif
+
+ /* Added one word */
+ command->open_client_session.message_size += 1;
+ break;
+
+ case TF_LOGIN_GROUP: {
+ /* Check requested GID */
+ gid_t requested_gid =
+ *(u32 *) command->open_client_session.login_data;
+
+ if (!tf_check_gid(requested_gid)) {
+ dprintk(KERN_ERR "tf_open_client_session(%p) "
+ "TF_LOGIN_GROUP: requested GID (0x%x) does "
+ "not match real eGID (0x%x)"
+ "or any of the supplementary GIDs\n",
+ connection, requested_gid, current_egid());
+ error = -EACCES;
+ goto error;
+ }
+#ifndef CONFIG_ANDROID
+ command->open_client_session.login_type =
+ TF_LOGIN_GROUP_LINUX_GID;
+#else
+ command->open_client_session.login_type =
+ TF_LOGIN_GROUP_ANDROID_GID;
+#endif
+
+ command->open_client_session.message_size += 1; /* GID */
+ break;
+ }
+
+#ifndef CONFIG_ANDROID
+ case TF_LOGIN_APPLICATION: {
+ /*
+ * Compute SHA-1 hash of the application fully-qualified path
+ * name. Truncate the hash to 16 bytes and send it as login
+ * data. Update message size.
+ */
+ u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+ error = tf_hash_application_path_and_data(pSHA1Hash,
+ NULL, 0);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "error in tf_hash_application_path_and_data\n");
+ goto error;
+ }
+ memcpy(&command->open_client_session.login_data,
+ pSHA1Hash, 16);
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH;
+ /* 16 bytes */
+ command->open_client_session.message_size += 4;
+ break;
+ }
+#else
+ case TF_LOGIN_APPLICATION:
+ /*
+ * Send the real UID of the calling application in the login
+ * data. Update message size.
+ */
+ *(u32 *) &command->open_client_session.login_data =
+ current_uid();
+
+ command->open_client_session.login_type =
+ (u32) TF_LOGIN_APPLICATION_ANDROID_UID;
+
+ /* Added one word */
+ command->open_client_session.message_size += 1;
+ break;
+#endif
+
+#ifndef CONFIG_ANDROID
+ case TF_LOGIN_APPLICATION_USER: {
+ /*
+ * Compute SHA-1 hash of the concatenation of the application
+ * fully-qualified path name and the EUID of the calling
+ * application. Truncate the hash to 16 bytes and send it as
+ * login data. Update message size.
+ */
+ u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+ error = tf_hash_application_path_and_data(pSHA1Hash,
+ (u8 *) &(current_euid()), sizeof(current_euid()));
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "error in tf_hash_application_path_and_data\n");
+ goto error;
+ }
+ memcpy(&command->open_client_session.login_data,
+ pSHA1Hash, 16);
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH;
+
+ /* 16 bytes */
+ command->open_client_session.message_size += 4;
+
+ break;
+ }
+#else
+ case TF_LOGIN_APPLICATION_USER:
+ /*
+ * Send the real UID and the EUID of the calling application in
+ * the login data. Update message size.
+ */
+ *(u32 *) &command->open_client_session.login_data =
+ current_uid();
+ *(u32 *) &command->open_client_session.login_data[4] =
+ current_euid();
+
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_USER_ANDROID_UID_EUID;
+
+ /* Added two words */
+ command->open_client_session.message_size += 2;
+ break;
+#endif
+
+#ifndef CONFIG_ANDROID
+ case TF_LOGIN_APPLICATION_GROUP: {
+ /*
+ * Check requested GID. Compute SHA-1 hash of the concatenation
+ * of the application fully-qualified path name and the
+ * requested GID. Update message size
+ */
+ gid_t requested_gid;
+ u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+ requested_gid = *(u32 *) &command->open_client_session.
+ login_data;
+
+ if (!tf_check_gid(requested_gid)) {
+ dprintk(KERN_ERR "tf_open_client_session(%p) "
+ "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
+ "does not match real eGID (0x%x)"
+ "or any of the supplementary GIDs\n",
+ connection, requested_gid, current_egid());
+ error = -EACCES;
+ goto error;
+ }
+
+ error = tf_hash_application_path_and_data(pSHA1Hash,
+ &requested_gid, sizeof(u32));
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "error in tf_hash_application_path_and_data\n");
+ goto error;
+ }
+
+ memcpy(&command->open_client_session.login_data,
+ pSHA1Hash, 16);
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH;
+
+ /* 16 bytes */
+ command->open_client_session.message_size += 4;
+ break;
+ }
+#else
+ case TF_LOGIN_APPLICATION_GROUP: {
+ /*
+ * Check requested GID. Send the real UID and the requested GID
+ * in the login data. Update message size.
+ */
+ gid_t requested_gid;
+
+ requested_gid = *(u32 *) &command->open_client_session.
+ login_data;
+
+ if (!tf_check_gid(requested_gid)) {
+ dprintk(KERN_ERR "tf_open_client_session(%p) "
+ "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
+ "does not match real eGID (0x%x)"
+ "or any of the supplementary GIDs\n",
+ connection, requested_gid, current_egid());
+ error = -EACCES;
+ goto error;
+ }
+
+ *(u32 *) &command->open_client_session.login_data =
+ current_uid();
+ *(u32 *) &command->open_client_session.login_data[4] =
+ requested_gid;
+
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID;
+
+ /* Added two words */
+ command->open_client_session.message_size += 2;
+
+ break;
+ }
+#endif
+
+ case TF_LOGIN_PRIVILEGED:
+ /* A privileged login may be performed only on behalf of the
+ kernel itself or on behalf of a process with euid=0 or
+ egid=0. */
+ if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
+ dprintk(KERN_DEBUG "tf_open_client_session: "
+ "TF_LOGIN_PRIVILEGED for kernel API\n");
+ } else if (current_euid() != 0 && current_egid() != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ " user %d, group %d not allowed to open "
+ "session with TF_LOGIN_PRIVILEGED\n",
+ current_euid(), current_egid());
+ error = -EACCES;
+ goto error;
+ } else {
+ dprintk(KERN_DEBUG "tf_open_client_session: "
+ "TF_LOGIN_PRIVILEGED for %u:%u\n",
+ current_euid(), current_egid());
+ }
+ command->open_client_session.login_type =
+ TF_LOGIN_PRIVILEGED;
+ break;
+
+ case TF_LOGIN_AUTHENTICATION: {
+ /*
+ * Compute SHA-1 hash of the application binary
+ * Send this hash as the login data (20 bytes)
+ */
+
+ u8 *hash;
+ hash = &(command->open_client_session.login_data[0]);
+
+ error = tf_get_current_process_hash(hash);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "error in tf_get_current_process_hash\n");
+ goto error;
+ }
+ command->open_client_session.login_type =
+ TF_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH;
+
+ /* 20 bytes */
+ command->open_client_session.message_size += 5;
+ break;
+ }
+
+ case TF_LOGIN_PRIVILEGED_KERNEL:
+ /* A kernel login may be performed only on behalf of the
+ kernel itself. */
+ if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
+ dprintk(KERN_DEBUG "tf_open_client_session: "
+ "TF_LOGIN_PRIVILEGED_KERNEL for kernel API\n");
+ command->open_client_session.login_type =
+ TF_LOGIN_PRIVILEGED_KERNEL;
+ } else {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ " user %d, group %d not allowed to open "
+ "session with TF_LOGIN_PRIVILEGED_KERNEL\n",
+ current_euid(), current_egid());
+ error = -EACCES;
+ goto error;
+ }
+ command->open_client_session.login_type =
+ TF_LOGIN_PRIVILEGED_KERNEL;
+ break;
+
+ default:
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "unknown login_type(%08X)\n",
+ command->open_client_session.login_type);
+ error = -EOPNOTSUPP;
+ goto error;
+ }
+
+ /* Map the temporary memory references */
+ for (i = 0; i < 4; i++) {
+ int param_type;
+ param_type = TF_GET_PARAM_TYPE(
+ command->open_client_session.param_types, i);
+ if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
+ TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
+ == TF_PARAM_TYPE_MEMREF_FLAG) {
+ /* Map temp mem ref */
+ error = tf_map_temp_shmem(connection,
+ &command->open_client_session.
+ params[i].temp_memref,
+ param_type,
+ &shmem_desc[i]);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "unable to map temporary memory block "
+ "(%08X)\n", error);
+ goto error;
+ }
+ }
+ }
+
+ /* Fill the handle of the Device Context */
+ command->open_client_session.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+error:
+ /* Unmap the temporary memory references */
+ for (i = 0; i < 4; i++)
+ if (shmem_desc[i] != NULL)
+ tf_unmap_shmem(connection, shmem_desc[i], 0);
+
+ if (error != 0)
+ dprintk(KERN_ERR "tf_open_client_session returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_open_client_session returns "
+ "error_code 0x%08X\n",
+ answer->open_client_session.error_code);
+
+ return error;
+}
+
+
+/*
+ * Closes a client session from the Secure World
+ */
+int tf_close_client_session(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+
+ dprintk(KERN_DEBUG "tf_close_client_session(%p)\n", connection);
+
+ command->close_client_session.message_size =
+ (sizeof(struct tf_command_close_client_session) -
+ sizeof(struct tf_command_header)) / 4;
+ command->close_client_session.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+ if (error != 0)
+ dprintk(KERN_ERR "tf_close_client_session returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_close_client_session returns "
+ "error 0x%08X\n",
+ answer->close_client_session.error_code);
+
+ return error;
+}
+
+
+/*
+ * Registers a shared memory to the Secure World
+ */
+int tf_register_shared_memory(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+ struct tf_shmem_desc *shmem_desc = NULL;
+ bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL;
+ struct tf_command_register_shared_memory *msg =
+ &command->register_shared_memory;
+
+ dprintk(KERN_INFO "tf_register_shared_memory(%p) "
+ "%p[0x%08X][0x%08x]\n",
+ connection,
+ (void *)msg->shared_mem_descriptors[0],
+ msg->shared_mem_size,
+ (u32)msg->memory_flags);
+
+ if (in_user_space) {
+ error = tf_validate_shmem_and_flags(
+ msg->shared_mem_descriptors[0],
+ msg->shared_mem_size,
+ (u32)msg->memory_flags);
+ if (error != 0)
+ goto error;
+ }
+
+ /* Initialize message_size with no descriptors */
+ msg->message_size
+ = (offsetof(struct tf_command_register_shared_memory,
+ shared_mem_descriptors) -
+ sizeof(struct tf_command_header)) / 4;
+
+ /* Map the shmem block and update the message */
+ if (msg->shared_mem_size == 0) {
+ /* Empty shared mem */
+ msg->shared_mem_start_offset = msg->shared_mem_descriptors[0];
+ } else {
+ u32 descriptor_count;
+ error = tf_map_shmem(
+ connection,
+ msg->shared_mem_descriptors[0],
+ msg->memory_flags,
+ in_user_space,
+ msg->shared_mem_descriptors,
+ &(msg->shared_mem_start_offset),
+ msg->shared_mem_size,
+ &shmem_desc,
+ &descriptor_count);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_register_shared_memory: "
+ "unable to map shared memory block\n");
+ goto error;
+ }
+ msg->message_size += descriptor_count;
+ }
+
+ /*
+ * write the correct device context handle and the address of the shared
+ * memory descriptor in the message
+ */
+ msg->device_context = connection->device_context;
+ msg->block_id = (u32)shmem_desc;
+
+ /* Send the updated message */
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+ if ((error != 0) ||
+ (answer->register_shared_memory.error_code
+ != S_SUCCESS)) {
+ dprintk(KERN_ERR "tf_register_shared_memory: "
+ "operation failed. Unmap block\n");
+ goto error;
+ }
+
+ /* Saves the block handle returned by the secure world */
+ if (shmem_desc != NULL)
+ shmem_desc->block_identifier =
+ answer->register_shared_memory.block;
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_register_shared_memory(%p):"
+ " block_id=0x%08x block=0x%08x\n",
+ connection, msg->block_id,
+ answer->register_shared_memory.block);
+ return 0;
+
+ /* error completion */
+error:
+ tf_unmap_shmem(
+ connection,
+ shmem_desc,
+ 0);
+
+ if (error != 0)
+ dprintk(KERN_ERR "tf_register_shared_memory returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_register_shared_memory returns "
+ "error_code 0x%08X\n",
+ answer->register_shared_memory.error_code);
+
+ return error;
+}
+
+
+/*
+ * Releases a shared memory from the Secure World
+ */
+int tf_release_shared_memory(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+
+ dprintk(KERN_DEBUG "tf_release_shared_memory(%p)\n", connection);
+
+ command->release_shared_memory.message_size =
+ (sizeof(struct tf_command_release_shared_memory) -
+ sizeof(struct tf_command_header)) / 4;
+ command->release_shared_memory.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+ if ((error != 0) ||
+ (answer->release_shared_memory.error_code != S_SUCCESS))
+ goto error;
+
+ /* Use block_id to get back the pointer to shmem_desc */
+ tf_unmap_shmem(
+ connection,
+ (struct tf_shmem_desc *)
+ answer->release_shared_memory.block_id,
+ 0);
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_release_shared_memory(%p):"
+ " block_id=0x%08x block=0x%08x\n",
+ connection, answer->release_shared_memory.block_id,
+ command->release_shared_memory.block);
+ return 0;
+
+
+error:
+ if (error != 0)
+ dprintk(KERN_ERR "tf_release_shared_memory returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_release_shared_memory returns "
+ "nChannelStatus 0x%08X\n",
+ answer->release_shared_memory.error_code);
+
+ return error;
+
+}
+
+
+/*
+ * Invokes a client command to the Secure World
+ */
+int tf_invoke_client_command(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+ struct tf_shmem_desc *shmem_desc[4] = {NULL};
+ int i;
+
+ dprintk(KERN_INFO "tf_invoke_client_command(%p)\n", connection);
+
+ command->release_shared_memory.message_size =
+ (sizeof(struct tf_command_invoke_client_command) -
+ sizeof(struct tf_command_header)) / 4;
+
+#ifdef CONFIG_TF_ZEBRA
+ error = tf_crypto_try_shortcuted_update(connection,
+ (struct tf_command_invoke_client_command *) command,
+ (struct tf_answer_invoke_client_command *) answer);
+ if (error == 0)
+ return error;
+#endif
+
+ /* Map the tmprefs */
+ for (i = 0; i < 4; i++) {
+ int param_type = TF_GET_PARAM_TYPE(
+ command->invoke_client_command.param_types, i);
+ if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
+ TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
+ == TF_PARAM_TYPE_MEMREF_FLAG) {
+ /* A temporary memref: map it */
+ error = tf_map_temp_shmem(connection,
+ &command->invoke_client_command.
+ params[i].temp_memref,
+ param_type, &shmem_desc[i]);
+ if (error != 0) {
+ dprintk(KERN_ERR
+ "tf_invoke_client_command: "
+ "unable to map temporary memory "
+ "block\n (%08X)", error);
+ goto error;
+ }
+ }
+ }
+
+ command->invoke_client_command.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(&connection->dev->sm, command,
+ answer, connection, true);
+
+error:
+ /* Unmap de temp mem refs */
+ for (i = 0; i < 4; i++) {
+ if (shmem_desc[i] != NULL) {
+ dprintk(KERN_INFO "tf_invoke_client_command: "
+ "UnMatemp_memref %d\n ", i);
+ tf_unmap_shmem(connection, shmem_desc[i], 0);
+ }
+ }
+
+ if (error != 0)
+ dprintk(KERN_ERR "tf_invoke_client_command returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_invoke_client_command returns "
+ "error_code 0x%08X\n",
+ answer->invoke_client_command.error_code);
+
+ return error;
+}
+
+
+/*
+ * Cancels a client command from the Secure World
+ */
+int tf_cancel_client_command(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+
+ dprintk(KERN_DEBUG "tf_cancel_client_command(%p)\n", connection);
+
+ command->cancel_client_operation.device_context =
+ connection->device_context;
+ command->cancel_client_operation.message_size =
+ (sizeof(struct tf_command_cancel_client_operation) -
+ sizeof(struct tf_command_header)) / 4;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+ if ((error != 0) ||
+ (answer->cancel_client_operation.error_code != S_SUCCESS))
+ goto error;
+
+
+ /* successful completion */
+ return 0;
+
+error:
+ if (error != 0)
+ dprintk(KERN_ERR "tf_cancel_client_command returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_cancel_client_command returns "
+ "nChannelStatus 0x%08X\n",
+ answer->cancel_client_operation.error_code);
+
+ return error;
+}
+
+
+
+/*
+ * Destroys a device context from the Secure World
+ */
+int tf_destroy_device_context(
+ struct tf_connection *connection)
+{
+ int error;
+ /*
+ * AFY: better use the specialized tf_command_destroy_device_context
+ * structure: this will save stack
+ */
+ union tf_command command;
+ union tf_answer answer;
+
+ dprintk(KERN_INFO "tf_destroy_device_context(%p)\n", connection);
+
+ BUG_ON(connection == NULL);
+
+ command.header.message_type = TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+ command.header.message_size =
+ (sizeof(struct tf_command_destroy_device_context) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+
+ /*
+ * fill in the device context handler
+ * it is guarantied that the first shared memory descriptor describes
+ * the device context
+ */
+ command.destroy_device_context.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ &command,
+ &answer,
+ connection,
+ false);
+
+ if ((error != 0) ||
+ (answer.destroy_device_context.error_code != S_SUCCESS))
+ goto error;
+
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_destroy_device_context(%p)\n",
+ connection);
+ return 0;
+
+error:
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_destroy_device_context failed with "
+ "error %d\n", error);
+ } else {
+ dprintk(KERN_ERR "tf_destroy_device_context failed with "
+ "error_code 0x%08X\n",
+ answer.destroy_device_context.error_code);
+ if (answer.destroy_device_context.error_code ==
+ S_ERROR_OUT_OF_MEMORY)
+ error = -ENOMEM;
+ else
+ error = -EFAULT;
+ }
+
+ return error;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Connection initialization and cleanup operations
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Opens a connection to the specified device.
+ *
+ * The placeholder referenced by connection is set to the address of the
+ * new connection; it is set to NULL upon failure.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+int tf_open(struct tf_device *dev,
+ struct file *file,
+ struct tf_connection **connection)
+{
+ int error;
+ struct tf_connection *conn = NULL;
+
+ dprintk(KERN_INFO "tf_open(%p, %p)\n", file, connection);
+
+ /*
+ * Allocate and initialize the conn.
+ * kmalloc only allocates sizeof(*conn) virtual memory
+ */
+ conn = (struct tf_connection *) internal_kmalloc(sizeof(*conn),
+ GFP_KERNEL);
+ if (conn == NULL) {
+ printk(KERN_ERR "tf_open(): "
+ "Out of memory for conn!\n");
+ error = -ENOMEM;
+ goto error;
+ }
+
+ memset(conn, 0, sizeof(*conn));
+
+ conn->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ conn->dev = dev;
+ spin_lock_init(&(conn->state_lock));
+ atomic_set(&(conn->pending_op_count), 0);
+ INIT_LIST_HEAD(&(conn->list));
+
+ /*
+ * Initialize the shared memory
+ */
+ error = tf_init_shared_memory(conn);
+ if (error != 0)
+ goto error;
+
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * Initialize CUS specifics
+ */
+ tf_crypto_init_cus(conn);
+#endif
+
+ /*
+ * Attach the conn to the device.
+ */
+ spin_lock(&(dev->connection_list_lock));
+ list_add(&(conn->list), &(dev->connection_list));
+ spin_unlock(&(dev->connection_list_lock));
+
+ /*
+ * Successful completion.
+ */
+
+ *connection = conn;
+
+ dprintk(KERN_INFO "tf_open(): Success (conn=%p)\n", conn);
+ return 0;
+
+ /*
+ * Error handling.
+ */
+
+error:
+ dprintk(KERN_ERR "tf_open(): Failure (error %d)\n", error);
+ /* Deallocate the descriptor pages if necessary */
+ internal_kfree(conn);
+ *connection = NULL;
+ return error;
+}
+
+
+/*
+ * Closes the specified connection.
+ *
+ * Upon return, the connection has been destroyed and cannot be used anymore.
+ *
+ * This function does nothing if connection is set to NULL.
+ */
+void tf_close(struct tf_connection *connection)
+{
+ int error;
+ enum TF_CONN_STATE state;
+
+ dprintk(KERN_DEBUG "tf_close(%p)\n", connection);
+
+ if (connection == NULL)
+ return;
+
+ /*
+ * Assumption: Linux guarantees that no other operation is in progress
+ * and that no other operation will be started when close is called
+ */
+ BUG_ON(atomic_read(&(connection->pending_op_count)) != 0);
+
+ /*
+ * Exchange a Destroy Device Context message if needed.
+ */
+ spin_lock(&(connection->state_lock));
+ state = connection->state;
+ spin_unlock(&(connection->state_lock));
+ if (state == TF_CONN_STATE_VALID_DEVICE_CONTEXT) {
+ /*
+ * A DestroyDeviceContext operation was not performed. Do it
+ * now.
+ */
+ error = tf_destroy_device_context(connection);
+ if (error != 0)
+ /* avoid cleanup if destroy device context fails */
+ goto error;
+ }
+
+ /*
+ * Clean up the shared memory
+ */
+ tf_cleanup_shared_memories(connection);
+
+ spin_lock(&(connection->dev->connection_list_lock));
+ list_del(&(connection->list));
+ spin_unlock(&(connection->dev->connection_list_lock));
+
+ internal_kfree(connection);
+
+ return;
+
+error:
+ dprintk(KERN_DEBUG "tf_close(%p) failed with error code %d\n",
+ connection, error);
+}
+
diff --git a/security/tf_driver/tf_conn.h b/security/tf_driver/tf_conn.h
new file mode 100644
index 000000000000..8bed16f19d5f
--- /dev/null
+++ b/security/tf_driver/tf_conn.h
@@ -0,0 +1,106 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_CONN_H__
+#define __TF_CONN_H__
+
+#include "tf_defs.h"
+
+/*
+ * Returns a pointer to the connection referenced by the
+ * specified file.
+ */
+static inline struct tf_connection *tf_conn_from_file(
+ struct file *file)
+{
+ return file->private_data;
+}
+
+int tf_validate_shmem_and_flags(u32 shmem, u32 shmem_size, u32 flags);
+
+int tf_map_shmem(
+ struct tf_connection *connection,
+ u32 buffer,
+ /* flags for read-write access rights on the memory */
+ u32 flags,
+ bool in_user_space,
+ u32 descriptors[TF_MAX_COARSE_PAGES],
+ u32 *buffer_start_offset,
+ u32 buffer_size,
+ struct tf_shmem_desc **shmem_desc,
+ u32 *descriptor_count);
+
+void tf_unmap_shmem(
+ struct tf_connection *connection,
+ struct tf_shmem_desc *shmem_desc,
+ u32 full_cleanup);
+
+/*----------------------------------------------------------------------------
+ * Connection operations to the Secure World
+ *----------------------------------------------------------------------------*/
+
+int tf_create_device_context(
+ struct tf_connection *connection);
+
+int tf_destroy_device_context(
+ struct tf_connection *connection);
+
+int tf_open_client_session(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_close_client_session(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_register_shared_memory(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_release_shared_memory(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_invoke_client_command(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_cancel_client_command(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+/*----------------------------------------------------------------------------
+ * Connection initialization and cleanup operations
+ *----------------------------------------------------------------------------*/
+
+int tf_open(struct tf_device *dev,
+ struct file *file,
+ struct tf_connection **connection);
+
+void tf_close(
+ struct tf_connection *connection);
+
+
+#endif /* !defined(__TF_CONN_H__) */
diff --git a/security/tf_driver/scxlnx_defs.h b/security/tf_driver/tf_defs.h
index b6430d2a3c59..58d5eaaaea5b 100644
--- a/security/tf_driver/scxlnx_defs.h
+++ b/security/tf_driver/tf_defs.h
@@ -1,5 +1,5 @@
-/*
- * Copyright (c) 2006-2010 Trusted Logic S.A.
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -17,8 +17,8 @@
* MA 02111-1307 USA
*/
-#ifndef __SCXLNX_DEFS_H__
-#define __SCXLNX_DEFS_H__
+#ifndef __TF_DEFS_H__
+#define __TF_DEFS_H__
#include <asm/atomic.h>
#include <linux/version.h>
@@ -27,7 +27,6 @@
#include <linux/completion.h>
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <linux/sysdev.h>
#include <linux/sysfs.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
@@ -35,7 +34,7 @@
#include <linux/wakelock.h>
#endif
-#include "scx_protocol.h"
+#include "tf_protocol.h"
/*----------------------------------------------------------------------------*/
@@ -44,77 +43,77 @@
/*
* Maximum number of shared memory blocks that can be reigsters in a connection
*/
-#define SCXLNX_SHMEM_MAX_COUNT (64)
+#define TF_SHMEM_MAX_COUNT (64)
/*
* Describes the possible types of shared memories
*
- * SCXLNX_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM :
+ * TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM :
* The descriptor describes a registered shared memory.
* Its coarse pages are preallocated when initializing the
* connection
- * SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM :
+ * TF_SHMEM_TYPE_REGISTERED_SHMEM :
* The descriptor describes a registered shared memory.
* Its coarse pages are not preallocated
- * SCXLNX_SHMEM_TYPE_PM_HIBERNATE :
+ * TF_SHMEM_TYPE_PM_HIBERNATE :
* The descriptor describes a power management shared memory.
*/
-enum SCXLNX_SHMEM_TYPE {
- SCXLNX_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM = 0,
- SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM,
- SCXLNX_SHMEM_TYPE_PM_HIBERNATE,
+enum TF_SHMEM_TYPE {
+ TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM = 0,
+ TF_SHMEM_TYPE_REGISTERED_SHMEM,
+ TF_SHMEM_TYPE_PM_HIBERNATE,
};
/*
* This structure contains a pointer on a coarse page table
*/
-struct SCXLNX_COARSE_PAGE_TABLE {
+struct tf_coarse_page_table {
/*
* Identifies the coarse page table descriptor in
- * sFreeCoarsePageTables list
+ * free_coarse_page_tables list
*/
struct list_head list;
/*
* The address of the coarse page table
*/
- u32 *pDescriptors;
+ u32 *descriptors;
/*
* The address of the array containing this coarse page table
*/
- struct SCXLNX_COARSE_PAGE_TABLE_ARRAY *pParent;
+ struct tf_coarse_page_table_array *parent;
};
-#define SCXLNX_PAGE_DESCRIPTOR_TYPE_NORMAL 0
-#define SCXLNX_PAGE_DESCRIPTOR_TYPE_PREALLOCATED 1
+#define TF_PAGE_DESCRIPTOR_TYPE_NORMAL 0
+#define TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED 1
/*
* This structure describes an array of up to 4 coarse page tables
* allocated within a single 4KB page.
*/
-struct SCXLNX_COARSE_PAGE_TABLE_ARRAY {
+struct tf_coarse_page_table_array {
/*
- * identifies the element in the sCoarsePageTableArrays list
+ * identifies the element in the coarse_page_table_arrays list
*/
struct list_head list;
/*
* Type of page descriptor
- * can take any of SCXLNX_PAGE_DESCRIPTOR_TYPE_XXX value
+ * can take any of TF_PAGE_DESCRIPTOR_TYPE_XXX value
*/
- u32 nType;
+ u32 type;
- struct SCXLNX_COARSE_PAGE_TABLE sCoarsePageTables[4];
+ struct tf_coarse_page_table coarse_page_tables[4];
/*
* A counter of the number of coarse pages currently used
* the max value should be 4 (one coarse page table is 1KB while one
* page is 4KB)
*/
- u8 nReferenceCount;
+ u8 ref_count;
};
@@ -124,7 +123,7 @@ struct SCXLNX_COARSE_PAGE_TABLE_ARRAY {
* when the driver needs to allocate a new coarse page
* table.
*/
-struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT {
+struct tf_coarse_page_table_allocation_context {
/*
* The spin lock protecting concurrent access to the structure.
*/
@@ -133,19 +132,19 @@ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT {
/*
* The list of allocated coarse page table arrays
*/
- struct list_head sCoarsePageTableArrays;
+ struct list_head coarse_page_table_arrays;
/*
* The list of free coarse page tables
*/
- struct list_head sFreeCoarsePageTables;
+ struct list_head free_coarse_page_tables;
};
/*
* Fully describes a shared memory block
*/
-struct SCXLNX_SHMEM_DESC {
+struct tf_shmem_desc {
/*
* Identifies the shared memory descriptor in the list of free shared
* memory descriptors
@@ -155,25 +154,25 @@ struct SCXLNX_SHMEM_DESC {
/*
* Identifies the type of shared memory descriptor
*/
- enum SCXLNX_SHMEM_TYPE nType;
+ enum TF_SHMEM_TYPE type;
/*
* The identifier of the block of shared memory, as returned by the
* Secure World.
- * This identifier is hBlock field of a REGISTER_SHARED_MEMORY answer
+ * This identifier is block field of a REGISTER_SHARED_MEMORY answer
*/
- u32 hIdentifier;
+ u32 block_identifier;
/* Client buffer */
- u8 *pBuffer;
+ u8 *client_buffer;
/* Up to eight coarse page table context */
- struct SCXLNX_COARSE_PAGE_TABLE *pCoarsePageTable[SCX_MAX_COARSE_PAGES];
+ struct tf_coarse_page_table *coarse_pg_table[TF_MAX_COARSE_PAGES];
- u32 nNumberOfCoarsePageTables;
+ u32 coarse_pg_table_count;
/* Reference counter */
- atomic_t nRefCnt;
+ atomic_t ref_count;
};
@@ -184,7 +183,7 @@ struct SCXLNX_SHMEM_DESC {
*
* Note that this driver supports only one instance of the Secure World
*/
-struct SCXLNX_COMM {
+struct tf_comm {
/*
* The spin lock protecting concurrent access to the structure.
*/
@@ -192,89 +191,78 @@ struct SCXLNX_COMM {
/*
* Bit vector with the following possible flags:
- * - SCXLNX_COMM_FLAG_IRQ_REQUESTED: If set, indicates that
+ * - TF_COMM_FLAG_IRQ_REQUESTED: If set, indicates that
* the IRQ has been successfuly requested.
- * - SCXLNX_COMM_FLAG_TERMINATING: If set, indicates that the
+ * - TF_COMM_FLAG_TERMINATING: If set, indicates that the
* communication with the Secure World is being terminated.
* Transmissions to the Secure World are not permitted
- * - SCXLNX_COMM_FLAG_W3B_ALLOCATED: If set, indicates that the
+ * - TF_COMM_FLAG_W3B_ALLOCATED: If set, indicates that the
* W3B buffer has been allocated.
*
* This bit vector must be accessed with the kernel's atomic bitwise
* operations.
*/
- unsigned long nFlags;
+ unsigned long flags;
/*
* The virtual address of the L1 shared buffer.
*/
- struct SCHANNEL_C1S_BUFFER *pBuffer;
+ struct tf_l1_shared_buffer *l1_buffer;
/*
* The wait queue the client threads are waiting on.
*/
- wait_queue_head_t waitQueue;
+ wait_queue_head_t wait_queue;
#ifdef CONFIG_TF_TRUSTZONE
/*
* The interrupt line used by the Secure World.
*/
- int nSoftIntIrq;
+ int soft_int_irq;
/* ----- W3B ----- */
/* shared memory descriptor to identify the W3B */
- struct SCXLNX_SHMEM_DESC sW3BShmemDesc;
+ struct tf_shmem_desc w3b_shmem_desc;
/* Virtual address of the kernel allocated shared memory */
- u32 nW3BShmemVAddr;
+ u32 w3b;
/* offset of data in shared memory coarse pages */
- u32 nW3BShmemOffset;
+ u32 w3b_shmem_offset;
- u32 nW3BShmemSize;
+ u32 w3b_shmem_size;
- struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT
- sW3BAllocationContext;
+ struct tf_coarse_page_table_allocation_context
+ w3b_cpt_alloc_context;
#endif
#ifdef CONFIG_TF_ZEBRA
/*
* The SE SDP can only be initialized once...
*/
- int bSEInitialized;
-
- /* Virtual address of the L0 communication buffer */
- void *pInitSharedBuffer;
+ int se_initialized;
/*
* Lock to be held by a client when executing an RPC
*/
- struct mutex sRPCLock;
+ struct mutex rpc_mutex;
/*
* Lock to protect concurrent accesses to DMA channels
*/
- struct mutex sDMALock;
+ struct mutex dma_mutex;
#endif
};
-#define SCXLNX_COMM_FLAG_IRQ_REQUESTED (0)
-#define SCXLNX_COMM_FLAG_PA_AVAILABLE (1)
-#define SCXLNX_COMM_FLAG_TERMINATING (2)
-#define SCXLNX_COMM_FLAG_W3B_ALLOCATED (3)
-#define SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED (4)
+#define TF_COMM_FLAG_IRQ_REQUESTED (0)
+#define TF_COMM_FLAG_PA_AVAILABLE (1)
+#define TF_COMM_FLAG_TERMINATING (2)
+#define TF_COMM_FLAG_W3B_ALLOCATED (3)
+#define TF_COMM_FLAG_L1_SHARED_ALLOCATED (4)
/*----------------------------------------------------------------------------*/
-struct SCXLNX_DEVICE_STATS {
- struct kobject kobj;
-
- struct kobj_type kobj_type;
-
- struct attribute kobj_stat_attribute;
-
- struct attribute *kobj_attribute_list[2];
-
+struct tf_device_stats {
atomic_t stat_pages_allocated;
atomic_t stat_memories_allocated;
atomic_t stat_pages_locked;
@@ -283,24 +271,29 @@ struct SCXLNX_DEVICE_STATS {
/*
* This structure describes the information about one device handled by the
* driver. Note that the driver supports only a single device. see the global
- * variable g_SCXLNXDevice
+ * variable g_tf_dev
+
*/
-struct SCXLNX_DEVICE {
+struct tf_device {
/*
- * The device number for the device.
+ * The kernel object for the device
*/
- dev_t nDevNum;
+ struct kobject kobj;
/*
- * Interfaces the system device with the kernel.
+ * The device number for the device.
*/
- struct sys_device sysdev;
+ dev_t dev_number;
/*
* Interfaces the char device with the kernel.
*/
struct cdev cdev;
+#ifdef CONFIG_TF_TEEC
+ struct cdev cdev_teec;
+#endif
+
#ifdef CONFIG_TF_ZEBRA
struct cdev cdev_ctrl;
@@ -308,57 +301,54 @@ struct SCXLNX_DEVICE {
* Globals for CUS
*/
/* Current key handles loaded in HWAs */
- u32 hAES1SecureKeyContext;
- u32 hDESSecureKeyContext;
- bool bSHAM1IsPublic;
+ u32 aes1_key_context;
+ u32 des_key_context;
+ bool sham1_is_public;
- /* Semaphores used to serialize HWA accesses */
- struct semaphore sAES1CriticalSection;
- struct mutex sDESCriticalSection;
- struct mutex sSHACriticalSection;
+ /* Object used to serialize HWA accesses */
+ struct semaphore aes1_sema;
+ struct semaphore des_sema;
+ struct semaphore sha_sema;
/*
* An aligned and correctly shaped pre-allocated buffer used for DMA
* transfers
*/
- u32 nDMABufferLength;
- u8 *pDMABuffer;
- dma_addr_t pDMABufferPhys;
+ u32 dma_buffer_length;
+ u8 *dma_buffer;
+ dma_addr_t dma_buffer_phys;
/* Workspace allocated at boot time and reserved to the Secure World */
- u32 nWorkspaceAddr;
- u32 nWorkspaceSize;
+ u32 workspace_addr;
+ u32 workspace_size;
+
+ /*
+ * A Mutex to provide exclusive locking of the ioctl()
+ */
+ struct mutex dev_mutex;
#endif
/*
* Communications with the SM.
*/
- struct SCXLNX_COMM sm;
+ struct tf_comm sm;
/*
* Lists the connections attached to this device. A connection is
* created each time a user space application "opens" a file descriptor
* on the driver
*/
- struct list_head conns;
+ struct list_head connection_list;
/*
* The spin lock used to protect concurrent access to the connection
* list.
*/
- spinlock_t connsLock;
+ spinlock_t connection_list_lock;
- struct SCXLNX_DEVICE_STATS sDeviceStats;
+ struct tf_device_stats stats;
};
-/* the bits of the nFlags field of the SCXLNX_DEVICE structure */
-#define SCXLNX_DEVICE_FLAG_CDEV_INITIALIZED (0)
-#define SCXLNX_DEVICE_FLAG_SYSDEV_CLASS_REGISTERED (1)
-#define SCXLNX_DEVICE_FLAG_SYSDEV_REGISTERED (2)
-#define SCXLNX_DEVICE_FLAG_CDEV_REGISTERED (3)
-#define SCXLNX_DEVICE_FLAG_CDEV_ADDED (4)
-#define SCXLNX_DEVICE_SYSFS_REGISTERED (5)
-
/*----------------------------------------------------------------------------*/
/*
* This type describes a connection state.
@@ -368,24 +358,24 @@ struct SCXLNX_DEVICE {
* Messages may be invalidated between the start of the ioctl call and the
* moment the message is sent to the Secure World.
*
- * SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT :
+ * TF_CONN_STATE_NO_DEVICE_CONTEXT :
* The connection has no DEVICE_CONTEXT created and no
* CREATE_DEVICE_CONTEXT being processed by the Secure World
- * SCXLNX_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT :
+ * TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT :
* The connection has a CREATE_DEVICE_CONTEXT being processed by the Secure
* World
- * SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT :
+ * TF_CONN_STATE_VALID_DEVICE_CONTEXT :
* The connection has a DEVICE_CONTEXT created and no
* DESTROY_DEVICE_CONTEXT is being processed by the Secure World
- * SCXLNX_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT :
+ * TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT :
* The connection has a DESTROY_DEVICE_CONTEXT being processed by the Secure
* World
*/
-enum SCXLNX_CONN_STATE {
- SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT = 0,
- SCXLNX_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT,
- SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT,
- SCXLNX_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT
+enum TF_CONN_STATE {
+ TF_CONN_STATE_NO_DEVICE_CONTEXT = 0,
+ TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT,
+ TF_CONN_STATE_VALID_DEVICE_CONTEXT,
+ TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT
};
@@ -401,10 +391,22 @@ enum SCXLNX_CONN_STATE {
* Note that this only covers the case where some other thread
* sent a DESTROY_DEVICE_CONTEXT command.
*/
-enum SCXLNX_COMMAND_STATE {
- SCXLNX_COMMAND_STATE_PENDING = 0,
- SCXLNX_COMMAND_STATE_SENT,
- SCXLNX_COMMAND_STATE_ABORTED
+enum TF_COMMAND_STATE {
+ TF_COMMAND_STATE_PENDING = 0,
+ TF_COMMAND_STATE_SENT,
+ TF_COMMAND_STATE_ABORTED
+};
+
+/*
+ * The origin of connection parameters such as login data and
+ * memory reference pointers.
+ *
+ * PROCESS: the calling process. All arguments must be validated.
+ * KERNEL: kernel code. All arguments can be trusted by this driver.
+ */
+enum TF_CONNECTION_OWNER {
+ TF_CONNECTION_OWNER_PROCESS = 0,
+ TF_CONNECTION_OWNER_KERNEL,
};
@@ -413,7 +415,7 @@ enum SCXLNX_COMMAND_STATE {
* A connection is created each time an application opens a file descriptor on
* the driver
*/
-struct SCXLNX_CONNECTION {
+struct tf_connection {
/*
* Identifies the connection in the list of the connections attached to
* the same device.
@@ -423,80 +425,84 @@ struct SCXLNX_CONNECTION {
/*
* State of the connection.
*/
- enum SCXLNX_CONN_STATE nState;
+ enum TF_CONN_STATE state;
/*
* A pointer to the corresponding device structure
*/
- struct SCXLNX_DEVICE *pDevice;
+ struct tf_device *dev;
/*
- * A spinlock to use to access nState
+ * A spinlock to use to access state
*/
- spinlock_t stateLock;
+ spinlock_t state_lock;
/*
* Counts the number of operations currently pending on the connection.
* (for debug only)
*/
- atomic_t nPendingOpCounter;
+ atomic_t pending_op_count;
/*
* A handle for the device context
*/
- u32 hDeviceContext;
+ u32 device_context;
/*
* Lists the used shared memory descriptors
*/
- struct list_head sUsedSharedMemoryList;
+ struct list_head used_shmem_list;
/*
* Lists the free shared memory descriptors
*/
- struct list_head sFreeSharedMemoryList;
+ struct list_head free_shmem_list;
/*
* A mutex to use to access this structure
*/
- struct mutex sharedMemoriesMutex;
+ struct mutex shmem_mutex;
/*
* Counts the number of shared memories registered.
*/
- atomic_t nShmemAllocated;
+ atomic_t shmem_count;
/*
* Page to retrieve memory properties when
* registering shared memory through REGISTER_SHARED_MEMORY
* messages
*/
- struct vm_area_struct **ppVmas;
+ struct vm_area_struct **vmas;
/*
* coarse page table allocation context
*/
- struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT sAllocationContext;
+ struct tf_coarse_page_table_allocation_context cpt_alloc_context;
+
+ /* The origin of connection parameters such as login data and
+ memory reference pointers. */
+ enum TF_CONNECTION_OWNER owner;
#ifdef CONFIG_TF_ZEBRA
/* Lists all the Cryptoki Update Shortcuts */
- struct list_head ShortcutList;
+ struct list_head shortcut_list;
- /* Lock to protect concurrent accesses to ShortcutList */
- spinlock_t shortcutListCriticalSectionLock;
+ /* Lock to protect concurrent accesses to shortcut_list */
+ spinlock_t shortcut_list_lock;
#endif
};
/*----------------------------------------------------------------------------*/
/*
- * The nOperationID field of a message points to this structure.
+ * The operation_id field of a message points to this structure.
* It is used to identify the thread that triggered the message transmission
* Whoever reads an answer can wake up that thread using the completion event
*/
-struct SCXLNX_ANSWER_STRUCT {
- bool bAnswerCopied;
- union SCX_ANSWER_MESSAGE *pAnswer;
+struct tf_answer_struct {
+ bool answer_copied;
+ union tf_answer *answer;
};
/*----------------------------------------------------------------------------*/
@@ -505,16 +511,16 @@ struct SCXLNX_ANSWER_STRUCT {
* The ASCII-C string representation of the base name of the devices managed by
* this driver.
*/
-#define SCXLNX_DEVICE_BASE_NAME "tf_driver"
+#define TF_DEVICE_BASE_NAME "tf_driver"
/**
* The major and minor numbers of the registered character device driver.
* Only 1 instance of the driver is supported.
*/
-#define SCXLNX_DEVICE_MINOR_NUMBER (0)
+#define TF_DEVICE_MINOR_NUMBER (0)
-struct SCXLNX_DEVICE *SCXLNXGetDevice(void);
+struct tf_device *tf_get_device(void);
#define CLEAN_CACHE_CFG_MASK (~0xC) /* 1111 0011 */
@@ -529,4 +535,4 @@ struct SCXLNX_DEVICE *SCXLNXGetDevice(void);
#define GROUP_INFO (current->group_info)
#endif
-#endif /* !defined(__SCXLNX_DEFS_H__) */
+#endif /* !defined(__TF_DEFS_H__) */
diff --git a/security/tf_driver/tf_device.c b/security/tf_driver/tf_device.c
new file mode 100644
index 000000000000..c52856944e63
--- /dev/null
+++ b/security/tf_driver/tf_device.c
@@ -0,0 +1,749 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/pm.h>
+#include <linux/sysdev.h>
+#include <linux/vmalloc.h>
+#include <linux/signal.h>
+#ifdef CONFIG_ANDROID
+#include <linux/device.h>
+#endif
+
+#include "tf_protocol.h"
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_conn.h"
+#include "tf_comm.h"
+#ifdef CONFIG_TF_ZEBRA
+#include <plat/cpu.h>
+#include "tf_zebra.h"
+#endif
+
+#include "s_version.h"
+
+/*----------------------------------------------------------------------------
+ * Forward Declarations
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Creates and registers the device to be managed by the specified driver.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static int tf_device_register(void);
+
+
+/*
+ * Implements the device Open callback.
+ */
+static int tf_device_open(
+ struct inode *inode,
+ struct file *file);
+
+
+/*
+ * Implements the device Release callback.
+ */
+static int tf_device_release(
+ struct inode *inode,
+ struct file *file);
+
+
+/*
+ * Implements the device ioctl callback.
+ */
+static long tf_device_ioctl(
+ struct file *file,
+ unsigned int ioctl_num,
+ unsigned long ioctl_param);
+
+
+/*
+ * Implements the device shutdown callback.
+ */
+static int tf_device_shutdown(
+ struct sys_device *sysdev);
+
+
+/*
+ * Implements the device suspend callback.
+ */
+static int tf_device_suspend(
+ struct sys_device *sysdev,
+ pm_message_t state);
+
+
+/*
+ * Implements the device resume callback.
+ */
+static int tf_device_resume(
+ struct sys_device *sysdev);
+
+
+/*---------------------------------------------------------------------------
+ * Module Parameters
+ *---------------------------------------------------------------------------*/
+
+/*
+ * The device major number used to register a unique character device driver.
+ * Let the default value be 122
+ */
+static int device_major_number = 122;
+
+module_param(device_major_number, int, 0000);
+MODULE_PARM_DESC(device_major_number,
+ "The device major number used to register a unique character "
+ "device driver");
+
+#ifdef CONFIG_TF_TRUSTZONE
+/**
+ * The softint interrupt line used by the Secure World.
+ */
+static int soft_interrupt = -1;
+
+module_param(soft_interrupt, int, 0000);
+MODULE_PARM_DESC(soft_interrupt,
+ "The softint interrupt line used by the Secure world");
+#endif
+
+#ifdef CONFIG_ANDROID
+static struct class *tf_class;
+#endif
+
+/*
+ * Interfaces the system device with the kernel.
+ */
+struct sys_device g_tf_sysdev;
+
+/*----------------------------------------------------------------------------
+ * Global Variables
+ *----------------------------------------------------------------------------*/
+
+/*
+ * tf_driver character device definitions.
+ * read and write methods are not defined
+ * and will return an error if used by user space
+ */
+static const struct file_operations g_tf_device_file_ops = {
+ .owner = THIS_MODULE,
+ .open = tf_device_open,
+ .release = tf_device_release,
+ .unlocked_ioctl = tf_device_ioctl,
+ .llseek = no_llseek,
+};
+
+
+static struct sysdev_class g_tf_device_sys_class = {
+ .name = TF_DEVICE_BASE_NAME,
+ .shutdown = tf_device_shutdown,
+ .suspend = tf_device_suspend,
+ .resume = tf_device_resume,
+};
+
+/* The single device supported by this driver */
+static struct tf_device g_tf_dev;
+
+/*----------------------------------------------------------------------------
+ * Implementations
+ *----------------------------------------------------------------------------*/
+
+struct tf_device *tf_get_device(void)
+{
+ return &g_tf_dev;
+}
+
+/*
+ * sysfs entries
+ */
+struct tf_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct tf_device *, char *);
+ ssize_t (*store)(struct tf_device *, const char *, size_t);
+};
+
+/*
+ * sysfs entry showing allocation stats
+ */
+static ssize_t info_show(struct tf_device *dev, char *buf)
+{
+ struct tf_device_stats *dev_stats = &dev->stats;
+
+ return snprintf(buf, PAGE_SIZE,
+ "stat.memories.allocated: %d\n"
+ "stat.pages.allocated: %d\n"
+ "stat.pages.locked: %d\n",
+ atomic_read(&dev_stats->stat_memories_allocated),
+ atomic_read(&dev_stats->stat_pages_allocated),
+ atomic_read(&dev_stats->stat_pages_locked));
+}
+static struct tf_sysfs_entry tf_info_entry = __ATTR_RO(info);
+
+#ifdef CONFIG_TF_ZEBRA
+/*
+ * sysfs entry showing whether secure world is up and running
+ */
+static ssize_t tf_started_show(struct tf_device *dev, char *buf)
+{
+ int tf_started = test_bit(TF_COMM_FLAG_PA_AVAILABLE,
+ &dev->sm.flags);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", tf_started ? "yes" : "no");
+}
+static struct tf_sysfs_entry tf_started_entry =
+ __ATTR_RO(tf_started);
+
+static ssize_t workspace_addr_show(struct tf_device *dev, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", dev->workspace_addr);
+}
+static struct tf_sysfs_entry tf_workspace_addr_entry =
+ __ATTR_RO(workspace_addr);
+
+static ssize_t workspace_size_show(struct tf_device *dev, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", dev->workspace_size);
+}
+static struct tf_sysfs_entry tf_workspace_size_entry =
+ __ATTR_RO(workspace_size);
+#endif
+
+static ssize_t tf_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *page)
+{
+ struct tf_sysfs_entry *entry = container_of(attr, struct tf_sysfs_entry,
+ attr);
+ struct tf_device *dev = container_of(kobj, struct tf_device, kobj);
+
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(dev, page);
+}
+
+static ssize_t tf_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *page, size_t length)
+{
+ struct tf_sysfs_entry *entry = container_of(attr, struct tf_sysfs_entry,
+ attr);
+ struct tf_device *dev = container_of(kobj, struct tf_device, kobj);
+
+ if (!entry->store)
+ return -EIO;
+
+ return entry->store(dev, page, length);
+}
+
+static void tf_kobj_release(struct kobject *kobj) {}
+
+static struct attribute *tf_default_attrs[] = {
+ &tf_info_entry.attr,
+#ifdef CONFIG_TF_ZEBRA
+ &tf_started_entry.attr,
+ &tf_workspace_addr_entry.attr,
+ &tf_workspace_size_entry.attr,
+#endif
+ NULL,
+};
+static const struct sysfs_ops tf_sysfs_ops = {
+ .show = tf_attr_show,
+ .store = tf_attr_store,
+};
+static struct kobj_type tf_ktype = {
+ .release = tf_kobj_release,
+ .sysfs_ops = &tf_sysfs_ops,
+ .default_attrs = tf_default_attrs
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * First routine called when the kernel module is loaded
+ */
+static int __init tf_device_register(void)
+{
+ int error;
+ struct tf_device *dev = &g_tf_dev;
+
+ dprintk(KERN_INFO "tf_device_register()\n");
+
+ /*
+ * Initialize the device
+ */
+ dev->dev_number = MKDEV(device_major_number,
+ TF_DEVICE_MINOR_NUMBER);
+ cdev_init(&dev->cdev, &g_tf_device_file_ops);
+ dev->cdev.owner = THIS_MODULE;
+
+ g_tf_sysdev.id = 0;
+ g_tf_sysdev.cls = &g_tf_device_sys_class;
+
+ INIT_LIST_HEAD(&dev->connection_list);
+ spin_lock_init(&dev->connection_list_lock);
+
+ /* register the sysfs object driver stats */
+ error = kobject_init_and_add(&dev->kobj, &tf_ktype, NULL, "%s",
+ TF_DEVICE_BASE_NAME);
+ if (error) {
+ printk(KERN_ERR "tf_device_register(): "
+ "kobject_init_and_add failed (error %d)!\n", error);
+ kobject_put(&dev->kobj);
+ goto kobject_init_and_add_failed;
+ }
+
+ /*
+ * Register the system device.
+ */
+
+ error = sysdev_class_register(&g_tf_device_sys_class);
+ if (error != 0) {
+ printk(KERN_ERR "tf_device_register():"
+ " sysdev_class_register failed (error %d)!\n",
+ error);
+ goto sysdev_class_register_failed;
+ }
+
+ error = sysdev_register(&g_tf_sysdev);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_device_register(): "
+ "sysdev_register failed (error %d)!\n",
+ error);
+ goto sysdev_register_failed;
+ }
+
+ /*
+ * Register the char device.
+ */
+ printk(KERN_INFO "Registering char device %s (%u:%u)\n",
+ TF_DEVICE_BASE_NAME,
+ MAJOR(dev->dev_number),
+ MINOR(dev->dev_number));
+ error = register_chrdev_region(dev->dev_number, 1,
+ TF_DEVICE_BASE_NAME);
+ if (error != 0) {
+ printk(KERN_ERR "tf_device_register():"
+ " register_chrdev_region failed (error %d)!\n",
+ error);
+ goto register_chrdev_region_failed;
+ }
+
+ error = cdev_add(&dev->cdev, dev->dev_number, 1);
+ if (error != 0) {
+ printk(KERN_ERR "tf_device_register(): "
+ "cdev_add failed (error %d)!\n",
+ error);
+ goto cdev_add_failed;
+ }
+
+ /*
+ * Initialize the communication with the Secure World.
+ */
+#ifdef CONFIG_TF_TRUSTZONE
+ dev->sm.soft_int_irq = soft_interrupt;
+#endif
+ error = tf_init(&g_tf_dev.sm);
+ if (error != S_SUCCESS) {
+ dprintk(KERN_ERR "tf_device_register(): "
+ "tf_init failed (error %d)!\n",
+ error);
+ goto init_failed;
+ }
+
+#ifdef CONFIG_ANDROID
+ tf_class = class_create(THIS_MODULE, TF_DEVICE_BASE_NAME);
+ device_create(tf_class, NULL,
+ dev->dev_number,
+ NULL, TF_DEVICE_BASE_NAME);
+#endif
+
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * Initializes the /dev/tf_ctrl device node.
+ */
+ error = tf_ctrl_device_register();
+ if (error)
+ goto init_failed;
+#endif
+
+#ifdef CONFIG_BENCH_SECURE_CYCLE
+ run_bogo_mips();
+ address_cache_property((unsigned long) &tf_device_register);
+#endif
+ /*
+ * Successful completion.
+ */
+
+ dprintk(KERN_INFO "tf_device_register(): Success\n");
+ return 0;
+
+ /*
+ * Error: undo all operations in the reverse order
+ */
+init_failed:
+ cdev_del(&dev->cdev);
+cdev_add_failed:
+ unregister_chrdev_region(dev->dev_number, 1);
+register_chrdev_region_failed:
+ sysdev_unregister(&g_tf_sysdev);
+sysdev_register_failed:
+ sysdev_class_unregister(&g_tf_device_sys_class);
+sysdev_class_register_failed:
+kobject_init_and_add_failed:
+ kobject_del(&g_tf_dev.kobj);
+
+ dprintk(KERN_INFO "tf_device_register(): Failure (error %d)\n",
+ error);
+ return error;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_open(struct inode *inode, struct file *file)
+{
+ int error;
+ struct tf_device *dev = &g_tf_dev;
+ struct tf_connection *connection = NULL;
+
+ dprintk(KERN_INFO "tf_device_open(%u:%u, %p)\n",
+ imajor(inode), iminor(inode), file);
+
+ /* Dummy lseek for non-seekable driver */
+ error = nonseekable_open(inode, file);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_device_open(%p): "
+ "nonseekable_open failed (error %d)!\n",
+ file, error);
+ goto error;
+ }
+
+#ifndef CONFIG_ANDROID
+ /*
+ * Check file flags. We only autthorize the O_RDWR access
+ */
+ if (file->f_flags != O_RDWR) {
+ dprintk(KERN_ERR "tf_device_open(%p): "
+ "Invalid access mode %u\n",
+ file, file->f_flags);
+ error = -EACCES;
+ goto error;
+ }
+#endif
+
+ /*
+ * Open a new connection.
+ */
+
+ error = tf_open(dev, file, &connection);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_device_open(%p): "
+ "tf_open failed (error %d)!\n",
+ file, error);
+ goto error;
+ }
+
+ file->private_data = connection;
+
+ /*
+ * Send the CreateDeviceContext command to the secure
+ */
+ error = tf_create_device_context(connection);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_device_open(%p): "
+ "tf_create_device_context failed (error %d)!\n",
+ file, error);
+ goto error1;
+ }
+
+ /*
+ * Successful completion.
+ */
+
+ dprintk(KERN_INFO "tf_device_open(%p): Success (connection=%p)\n",
+ file, connection);
+ return 0;
+
+ /*
+ * Error handling.
+ */
+
+error1:
+ tf_close(connection);
+error:
+ dprintk(KERN_INFO "tf_device_open(%p): Failure (error %d)\n",
+ file, error);
+ return error;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_release(struct inode *inode, struct file *file)
+{
+ struct tf_connection *connection;
+
+ dprintk(KERN_INFO "tf_device_release(%u:%u, %p)\n",
+ imajor(inode), iminor(inode), file);
+
+ connection = tf_conn_from_file(file);
+ tf_close(connection);
+
+ dprintk(KERN_INFO "tf_device_release(%p): Success\n", file);
+ return 0;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static long tf_device_ioctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ int result = S_SUCCESS;
+ struct tf_connection *connection;
+ union tf_command command;
+ struct tf_command_header header;
+ union tf_answer answer;
+ u32 command_size;
+ u32 answer_size;
+ void *user_answer;
+
+ dprintk(KERN_INFO "tf_device_ioctl(%p, %u, %p)\n",
+ file, ioctl_num, (void *) ioctl_param);
+
+ switch (ioctl_num) {
+ case IOCTL_TF_GET_VERSION:
+ /* ioctl is asking for the driver interface version */
+ result = TF_DRIVER_INTERFACE_VERSION;
+ goto exit;
+
+ case IOCTL_TF_EXCHANGE:
+ /*
+ * ioctl is asking to perform a message exchange with the Secure
+ * Module
+ */
+
+ /*
+ * Make a local copy of the data from the user application
+ * This routine checks the data is readable
+ *
+ * Get the header first.
+ */
+ if (copy_from_user(&header,
+ (struct tf_command_header *)ioctl_param,
+ sizeof(struct tf_command_header))) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Cannot access ioctl parameter %p\n",
+ file, (void *) ioctl_param);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ /* size in words of u32 */
+ command_size = header.message_size +
+ sizeof(struct tf_command_header)/sizeof(u32);
+ if (command_size > sizeof(command)/sizeof(u32)) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Buffer overflow: too many bytes to copy %d\n",
+ file, command_size);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ if (copy_from_user(&command,
+ (union tf_command *)ioctl_param,
+ command_size * sizeof(u32))) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Cannot access ioctl parameter %p\n",
+ file, (void *) ioctl_param);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ connection = tf_conn_from_file(file);
+ BUG_ON(connection == NULL);
+
+ /*
+ * The answer memory space address is in the operation_id field
+ */
+ user_answer = (void *) command.header.operation_id;
+
+ atomic_inc(&(connection->pending_op_count));
+
+ dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+ "Sending message type 0x%08x\n",
+ file, command.header.message_type);
+
+ switch (command.header.message_type) {
+ case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+ result = tf_open_client_session(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+ result = tf_close_client_session(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+ result = tf_register_shared_memory(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+ result = tf_release_shared_memory(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+ result = tf_invoke_client_command(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+ result = tf_cancel_client_command(connection,
+ &command, &answer);
+ break;
+
+ default:
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Incorrect message type (0x%08x)!\n",
+ connection, command.header.message_type);
+ result = -EOPNOTSUPP;
+ break;
+ }
+
+ atomic_dec(&(connection->pending_op_count));
+
+ if (result != 0) {
+ dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+ "Operation returning error code 0x%08x)!\n",
+ file, result);
+ goto exit;
+ }
+
+ /*
+ * Copy the answer back to the user space application.
+ * The driver does not check this field, only copy back to user
+ * space the data handed over by Secure World
+ */
+ answer_size = answer.header.message_size +
+ sizeof(struct tf_answer_header)/sizeof(u32);
+ if (copy_to_user(user_answer,
+ &answer, answer_size * sizeof(u32))) {
+ dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+ "Failed to copy back the full command "
+ "answer to %p\n", file, user_answer);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_device_ioctl(%p): Success\n", file);
+ break;
+
+ case IOCTL_TF_GET_DESCRIPTION: {
+ /* ioctl asking for the version information buffer */
+ struct tf_version_information_buffer *pInfoBuffer;
+
+ dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION:(%p, %u, %p)\n",
+ file, ioctl_num, (void *) ioctl_param);
+
+ pInfoBuffer =
+ ((struct tf_version_information_buffer *) ioctl_param);
+
+ dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION1: "
+ "driver_description=\"%64s\"\n", S_VERSION_STRING);
+
+ if (copy_to_user(pInfoBuffer->driver_description,
+ S_VERSION_STRING,
+ strlen(S_VERSION_STRING) + 1)) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Fail to copy back the driver description "
+ "to %p\n",
+ file, pInfoBuffer->driver_description);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION2: "
+ "secure_world_description=\"%64s\"\n",
+ tf_get_description(&g_tf_dev.sm));
+
+ if (copy_to_user(pInfoBuffer->secure_world_description,
+ tf_get_description(&g_tf_dev.sm),
+ TF_DESCRIPTION_BUFFER_LENGTH)) {
+ dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+ "Failed to copy back the secure world "
+ "description to %p\n",
+ file, pInfoBuffer->secure_world_description);
+ result = -EFAULT;
+ goto exit;
+ }
+ break;
+ }
+
+ default:
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Unknown IOCTL code 0x%08x!\n",
+ file, ioctl_num);
+ result = -EOPNOTSUPP;
+ goto exit;
+ }
+
+exit:
+ return result;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_shutdown(struct sys_device *sysdev)
+{
+
+ return tf_power_management(&g_tf_dev.sm,
+ TF_POWER_OPERATION_SHUTDOWN);
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_suspend(struct sys_device *sysdev, pm_message_t state)
+{
+ dprintk(KERN_INFO "tf_device_suspend: Enter\n");
+ return tf_power_management(&g_tf_dev.sm,
+ TF_POWER_OPERATION_HIBERNATE);
+}
+
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_resume(struct sys_device *sysdev)
+{
+ return tf_power_management(&g_tf_dev.sm,
+ TF_POWER_OPERATION_RESUME);
+}
+
+
+/*----------------------------------------------------------------------------*/
+
+module_init(tf_device_register);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Trusted Logic S.A.");
diff --git a/security/tf_driver/tf_protocol.h b/security/tf_driver/tf_protocol.h
new file mode 100644
index 000000000000..4e552654aa9a
--- /dev/null
+++ b/security/tf_driver/tf_protocol.h
@@ -0,0 +1,688 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_PROTOCOL_H__
+#define __TF_PROTOCOL_H__
+
+/*----------------------------------------------------------------------------
+ *
+ * This header file defines the structure used in the SChannel Protocol.
+ * See your Product Reference Manual for a specification of the SChannel
+ * protocol.
+ *---------------------------------------------------------------------------*/
+
+/*
+ * The driver interface version returned by the version ioctl
+ */
+#define TF_DRIVER_INTERFACE_VERSION 0x04000000
+
+/*
+ * Protocol version handling
+ */
+#define TF_S_PROTOCOL_MAJOR_VERSION (0x06)
+#define GET_PROTOCOL_MAJOR_VERSION(a) (a >> 24)
+#define GET_PROTOCOL_MINOR_VERSION(a) ((a >> 16) & 0xFF)
+
+/*
+ * The S flag of the config_flag_s register.
+ */
+#define TF_CONFIG_FLAG_S (1 << 3)
+
+/*
+ * The TimeSlot field of the sync_serial_n register.
+ */
+#define TF_SYNC_SERIAL_TIMESLOT_N (1)
+
+/*
+ * status_s related defines.
+ */
+#define TF_STATUS_P_MASK (0X00000001)
+#define TF_STATUS_POWER_STATE_SHIFT (3)
+#define TF_STATUS_POWER_STATE_MASK (0x1F << TF_STATUS_POWER_STATE_SHIFT)
+
+/*
+ * Possible power states of the POWER_STATE field of the status_s register
+ */
+#define TF_POWER_MODE_COLD_BOOT (0)
+#define TF_POWER_MODE_WARM_BOOT (1)
+#define TF_POWER_MODE_ACTIVE (3)
+#define TF_POWER_MODE_READY_TO_SHUTDOWN (5)
+#define TF_POWER_MODE_READY_TO_HIBERNATE (7)
+#define TF_POWER_MODE_WAKEUP (8)
+#define TF_POWER_MODE_PANIC (15)
+
+/*
+ * Possible command values for MANAGEMENT commands
+ */
+#define TF_MANAGEMENT_HIBERNATE (1)
+#define TF_MANAGEMENT_SHUTDOWN (2)
+#define TF_MANAGEMENT_PREPARE_FOR_CORE_OFF (3)
+#define TF_MANAGEMENT_RESUME_FROM_CORE_OFF (4)
+
+/*
+ * The capacity of the Normal Word message queue, in number of slots.
+ */
+#define TF_N_MESSAGE_QUEUE_CAPACITY (512)
+
+/*
+ * The capacity of the Secure World message answer queue, in number of slots.
+ */
+#define TF_S_ANSWER_QUEUE_CAPACITY (256)
+
+/*
+ * The value of the S-timeout register indicating an infinite timeout.
+ */
+#define TF_S_TIMEOUT_0_INFINITE (0xFFFFFFFF)
+#define TF_S_TIMEOUT_1_INFINITE (0xFFFFFFFF)
+
+/*
+ * The value of the S-timeout register indicating an immediate timeout.
+ */
+#define TF_S_TIMEOUT_0_IMMEDIATE (0x0)
+#define TF_S_TIMEOUT_1_IMMEDIATE (0x0)
+
+/*
+ * Identifies the get protocol version SMC.
+ */
+#define TF_SMC_GET_PROTOCOL_VERSION (0XFFFFFFFB)
+
+/*
+ * Identifies the init SMC.
+ */
+#define TF_SMC_INIT (0XFFFFFFFF)
+
+/*
+ * Identifies the reset irq SMC.
+ */
+#define TF_SMC_RESET_IRQ (0xFFFFFFFE)
+
+/*
+ * Identifies the SET_W3B SMC.
+ */
+#define TF_SMC_WAKE_UP (0xFFFFFFFD)
+
+/*
+ * Identifies the STOP SMC.
+ */
+#define TF_SMC_STOP (0xFFFFFFFC)
+
+/*
+ * Identifies the n-yield SMC.
+ */
+#define TF_SMC_N_YIELD (0X00000003)
+
+
+/* Possible stop commands for SMC_STOP */
+#define SCSTOP_HIBERNATE (0xFFFFFFE1)
+#define SCSTOP_SHUTDOWN (0xFFFFFFE2)
+
+/*
+ * representation of an UUID.
+ */
+struct tf_uuid {
+ u32 time_low;
+ u16 time_mid;
+ u16 time_hi_and_version;
+ u8 clock_seq_and_node[8];
+};
+
+
+/**
+ * Command parameters.
+ */
+struct tf_command_param_value {
+ u32 a;
+ u32 b;
+};
+
+struct tf_command_param_temp_memref {
+ u32 descriptor; /* data pointer for exchange message.*/
+ u32 size;
+ u32 offset;
+};
+
+struct tf_command_param_memref {
+ u32 block;
+ u32 size;
+ u32 offset;
+};
+
+union tf_command_param {
+ struct tf_command_param_value value;
+ struct tf_command_param_temp_memref temp_memref;
+ struct tf_command_param_memref memref;
+};
+
+/**
+ * Answer parameters.
+ */
+struct tf_answer_param_value {
+ u32 a;
+ u32 b;
+};
+
+struct tf_answer_param_size {
+ u32 _ignored;
+ u32 size;
+};
+
+union tf_answer_param {
+ struct tf_answer_param_size size;
+ struct tf_answer_param_value value;
+};
+
+/*
+ * Descriptor tables capacity
+ */
+#define TF_MAX_W3B_COARSE_PAGES (2)
+/* TF_MAX_COARSE_PAGES is the number of level 1 descriptors (describing
+ * 1MB each) that can be shared with the secure world in a single registered
+ * shared memory block. It must be kept in synch with
+ * SCHANNEL6_MAX_DESCRIPTORS_PER_REGISTERED_SHARED_MEM in the SChannel
+ * protocol spec. */
+#define TF_MAX_COARSE_PAGES 128
+#define TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT (8)
+#define TF_DESCRIPTOR_TABLE_CAPACITY \
+ (1 << TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT)
+#define TF_DESCRIPTOR_TABLE_CAPACITY_MASK \
+ (TF_DESCRIPTOR_TABLE_CAPACITY - 1)
+/* Shared memories coarse pages can map up to 1MB */
+#define TF_MAX_COARSE_PAGE_MAPPED_SIZE \
+ (PAGE_SIZE * TF_DESCRIPTOR_TABLE_CAPACITY)
+/* Shared memories cannot exceed 8MB */
+#define TF_MAX_SHMEM_SIZE \
+ (TF_MAX_COARSE_PAGE_MAPPED_SIZE << 3)
+
+/*
+ * Buffer size for version description fields
+ */
+#define TF_DESCRIPTION_BUFFER_LENGTH 64
+
+/*
+ * Shared memory type flags.
+ */
+#define TF_SHMEM_TYPE_READ (0x00000001)
+#define TF_SHMEM_TYPE_WRITE (0x00000002)
+
+/*
+ * Shared mem flags
+ */
+#define TF_SHARED_MEM_FLAG_INPUT 1
+#define TF_SHARED_MEM_FLAG_OUTPUT 2
+#define TF_SHARED_MEM_FLAG_INOUT 3
+
+
+/*
+ * Parameter types
+ */
+#define TF_PARAM_TYPE_NONE 0x0
+#define TF_PARAM_TYPE_VALUE_INPUT 0x1
+#define TF_PARAM_TYPE_VALUE_OUTPUT 0x2
+#define TF_PARAM_TYPE_VALUE_INOUT 0x3
+#define TF_PARAM_TYPE_MEMREF_TEMP_INPUT 0x5
+#define TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT 0x6
+#define TF_PARAM_TYPE_MEMREF_TEMP_INOUT 0x7
+#define TF_PARAM_TYPE_MEMREF_INPUT 0xD
+#define TF_PARAM_TYPE_MEMREF_OUTPUT 0xE
+#define TF_PARAM_TYPE_MEMREF_INOUT 0xF
+
+#define TF_PARAM_TYPE_MEMREF_FLAG 0x4
+#define TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG 0x8
+
+
+#define TF_MAKE_PARAM_TYPES(t0, t1, t2, t3) \
+ ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
+#define TF_GET_PARAM_TYPE(t, i) (((t) >> (4 * i)) & 0xF)
+
+/*
+ * Login types.
+ */
+#define TF_LOGIN_PUBLIC 0x00000000
+#define TF_LOGIN_USER 0x00000001
+#define TF_LOGIN_GROUP 0x00000002
+#define TF_LOGIN_APPLICATION 0x00000004
+#define TF_LOGIN_APPLICATION_USER 0x00000005
+#define TF_LOGIN_APPLICATION_GROUP 0x00000006
+#define TF_LOGIN_AUTHENTICATION 0x80000000
+#define TF_LOGIN_PRIVILEGED 0x80000002
+
+/* Login variants */
+
+#define TF_LOGIN_VARIANT(main_type, os, variant) \
+ ((main_type) | (1 << 27) | ((os) << 16) | ((variant) << 8))
+
+#define TF_LOGIN_GET_MAIN_TYPE(type) \
+ ((type) & ~TF_LOGIN_VARIANT(0, 0xFF, 0xFF))
+
+#define TF_LOGIN_OS_ANY 0x00
+#define TF_LOGIN_OS_LINUX 0x01
+#define TF_LOGIN_OS_ANDROID 0x04
+
+/* OS-independent variants */
+#define TF_LOGIN_USER_NONE \
+ TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_ANY, 0xFF)
+#define TF_LOGIN_GROUP_NONE \
+ TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_ANY, 0xFF)
+#define TF_LOGIN_APPLICATION_USER_NONE \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_ANY, 0xFF)
+#define TF_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH \
+ TF_LOGIN_VARIANT(TF_LOGIN_AUTHENTICATION, TF_LOGIN_OS_ANY, 0x01)
+#define TF_LOGIN_PRIVILEGED_KERNEL \
+ TF_LOGIN_VARIANT(TF_LOGIN_PRIVILEGED, TF_LOGIN_OS_ANY, 0x01)
+
+/* Linux variants */
+#define TF_LOGIN_USER_LINUX_EUID \
+ TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_GROUP_LINUX_GID \
+ TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_GROUP, TF_LOGIN_OS_LINUX, 0x01)
+
+/* Android variants */
+#define TF_LOGIN_USER_ANDROID_EUID \
+ TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_ANDROID, 0x01)
+#define TF_LOGIN_GROUP_ANDROID_GID \
+ TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_ANDROID, 0x01)
+#define TF_LOGIN_APPLICATION_ANDROID_UID \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION, TF_LOGIN_OS_ANDROID, 0x01)
+#define TF_LOGIN_APPLICATION_USER_ANDROID_UID_EUID \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_ANDROID, \
+ 0x01)
+#define TF_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_GROUP, TF_LOGIN_OS_ANDROID, \
+ 0x01)
+
+/*
+ * return origins
+ */
+#define TF_ORIGIN_COMMS 2
+#define TF_ORIGIN_TEE 3
+#define TF_ORIGIN_TRUSTED_APP 4
+/*
+ * The message types.
+ */
+#define TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT 0x02
+#define TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT 0xFD
+#define TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY 0xF7
+#define TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY 0xF9
+#define TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION 0xF0
+#define TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION 0xF2
+#define TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND 0xF5
+#define TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND 0xF4
+#define TF_MESSAGE_TYPE_MANAGEMENT 0xFE
+
+
+/*
+ * The SChannel error codes.
+ */
+#define S_SUCCESS 0x00000000
+#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C
+
+
+struct tf_command_header {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info;
+ u32 operation_id;
+};
+
+struct tf_answer_header {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info;
+ u32 operation_id;
+ u32 error_code;
+};
+
+/*
+ * CREATE_DEVICE_CONTEXT command message.
+ */
+struct tf_command_create_device_context {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ u32 operation_id;
+ u32 device_context_id;
+};
+
+/*
+ * CREATE_DEVICE_CONTEXT answer message.
+ */
+struct tf_answer_create_device_context {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+ /* an opaque Normal World identifier for the device context */
+ u32 device_context;
+};
+
+/*
+ * DESTROY_DEVICE_CONTEXT command message.
+ */
+struct tf_command_destroy_device_context {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ u32 operation_id;
+ u32 device_context;
+};
+
+/*
+ * DESTROY_DEVICE_CONTEXT answer message.
+ */
+struct tf_answer_destroy_device_context {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+ u32 device_context_id;
+};
+
+/*
+ * OPEN_CLIENT_SESSION command message.
+ */
+struct tf_command_open_client_session {
+ u8 message_size;
+ u8 message_type;
+ u16 param_types;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 device_context;
+ u32 cancellation_id;
+ u64 timeout;
+ struct tf_uuid destination_uuid;
+ union tf_command_param params[4];
+ u32 login_type;
+ /*
+ * Size = 0 for public, [16] for group identification, [20] for
+ * authentication
+ */
+ u8 login_data[20];
+};
+
+/*
+ * OPEN_CLIENT_SESSION answer message.
+ */
+struct tf_answer_open_client_session {
+ u8 message_size;
+ u8 message_type;
+ u8 error_origin;
+ u8 __reserved;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+ u32 client_session;
+ union tf_answer_param answers[4];
+};
+
+/*
+ * CLOSE_CLIENT_SESSION command message.
+ */
+struct tf_command_close_client_session {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 device_context;
+ u32 client_session;
+};
+
+/*
+ * CLOSE_CLIENT_SESSION answer message.
+ */
+struct tf_answer_close_client_session {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+};
+
+
+/*
+ * REGISTER_SHARED_MEMORY command message
+ */
+struct tf_command_register_shared_memory {
+ u8 message_size;
+ u8 message_type;
+ u16 memory_flags;
+ u32 operation_id;
+ u32 device_context;
+ u32 block_id;
+ u32 shared_mem_size;
+ u32 shared_mem_start_offset;
+ u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
+};
+
+/*
+ * REGISTER_SHARED_MEMORY answer message.
+ */
+struct tf_answer_register_shared_memory {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+ u32 block;
+};
+
+/*
+ * RELEASE_SHARED_MEMORY command message.
+ */
+struct tf_command_release_shared_memory {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 device_context;
+ u32 block;
+};
+
+/*
+ * RELEASE_SHARED_MEMORY answer message.
+ */
+struct tf_answer_release_shared_memory {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ u32 operation_id;
+ u32 error_code;
+ u32 block_id;
+};
+
+/*
+ * INVOKE_CLIENT_COMMAND command message.
+ */
+struct tf_command_invoke_client_command {
+ u8 message_size;
+ u8 message_type;
+ u16 param_types;
+ u32 operation_id;
+ u32 device_context;
+ u32 client_session;
+ u64 timeout;
+ u32 cancellation_id;
+ u32 client_command_identifier;
+ union tf_command_param params[4];
+};
+
+/*
+ * INVOKE_CLIENT_COMMAND command answer.
+ */
+struct tf_answer_invoke_client_command {
+ u8 message_size;
+ u8 message_type;
+ u8 error_origin;
+ u8 __reserved;
+ u32 operation_id;
+ u32 error_code;
+ union tf_answer_param answers[4];
+};
+
+/*
+ * CANCEL_CLIENT_OPERATION command message.
+ */
+struct tf_command_cancel_client_operation {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 device_context;
+ u32 client_session;
+ u32 cancellation_id;
+};
+
+struct tf_answer_cancel_client_operation {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ u32 operation_id;
+ u32 error_code;
+};
+
+/*
+ * MANAGEMENT command message.
+ */
+struct tf_command_management {
+ u8 message_size;
+ u8 message_type;
+ u16 command;
+ u32 operation_id;
+ u32 w3b_size;
+ u32 w3b_start_offset;
+ u32 shared_mem_descriptors[1];
+};
+
+/*
+ * POWER_MANAGEMENT answer message.
+ * The message does not provide message specific parameters.
+ * Therefore no need to define a specific answer structure
+ */
+
+/*
+ * Structure for L2 messages
+ */
+union tf_command {
+ struct tf_command_header header;
+ struct tf_command_create_device_context create_device_context;
+ struct tf_command_destroy_device_context destroy_device_context;
+ struct tf_command_open_client_session open_client_session;
+ struct tf_command_close_client_session close_client_session;
+ struct tf_command_register_shared_memory register_shared_memory;
+ struct tf_command_release_shared_memory release_shared_memory;
+ struct tf_command_invoke_client_command invoke_client_command;
+ struct tf_command_cancel_client_operation cancel_client_operation;
+ struct tf_command_management management;
+};
+
+/*
+ * Structure for any L2 answer
+ */
+
+union tf_answer {
+ struct tf_answer_header header;
+ struct tf_answer_create_device_context create_device_context;
+ struct tf_answer_open_client_session open_client_session;
+ struct tf_answer_close_client_session close_client_session;
+ struct tf_answer_register_shared_memory register_shared_memory;
+ struct tf_answer_release_shared_memory release_shared_memory;
+ struct tf_answer_invoke_client_command invoke_client_command;
+ struct tf_answer_destroy_device_context destroy_device_context;
+ struct tf_answer_cancel_client_operation cancel_client_operation;
+};
+
+/* Structure of the Communication Buffer */
+struct tf_l1_shared_buffer {
+ #ifdef CONFIG_TF_ZEBRA
+ u32 init_status;
+ u32 protocol_version;
+ u32 l1_shared_buffer_descr;
+ u32 backing_store_addr;
+ u32 backext_storage_addr;
+ u32 workspace_addr;
+ u32 workspace_size;
+ u32 conf_descriptor;
+ u32 conf_size;
+ u32 conf_offset;
+ u8 reserved1[24];
+ #else
+ u32 config_flag_s;
+ u32 w3b_size_max_s;
+ u32 reserved0;
+ u32 w3b_size_current_s;
+ u8 reserved1[48];
+ #endif
+ u8 version_description[TF_DESCRIPTION_BUFFER_LENGTH];
+ u32 status_s;
+ u32 reserved2;
+ u32 sync_serial_n;
+ u32 sync_serial_s;
+ u64 time_n[2];
+ u64 timeout_s[2];
+ u32 first_command;
+ u32 first_free_command;
+ u32 first_answer;
+ u32 first_free_answer;
+ u32 w3b_descriptors[128];
+ #ifdef CONFIG_TF_ZEBRA
+ u8 rpc_trace_buffer[140];
+ u8 rpc_cus_buffer[180];
+ #else
+ u8 reserved3[320];
+ #endif
+ u32 command_queue[TF_N_MESSAGE_QUEUE_CAPACITY];
+ u32 answer_queue[TF_S_ANSWER_QUEUE_CAPACITY];
+};
+
+
+/*
+ * tf_version_information_buffer structure description
+ * Description of the sVersionBuffer handed over from user space to kernel space
+ * This field is filled by the driver during a CREATE_DEVICE_CONTEXT ioctl
+ * and handed back to user space
+ */
+struct tf_version_information_buffer {
+ u8 driver_description[65];
+ u8 secure_world_description[65];
+};
+
+
+/* The IOCTLs the driver supports */
+#include <linux/ioctl.h>
+
+#define IOCTL_TF_GET_VERSION _IO('z', 0)
+#define IOCTL_TF_EXCHANGE _IOWR('z', 1, union tf_command)
+#define IOCTL_TF_GET_DESCRIPTION _IOR('z', 2, \
+ struct tf_version_information_buffer)
+
+#endif /* !defined(__TF_PROTOCOL_H__) */
diff --git a/security/tf_driver/tf_util.c b/security/tf_driver/tf_util.c
new file mode 100644
index 000000000000..455c539596f2
--- /dev/null
+++ b/security/tf_driver/tf_util.c
@@ -0,0 +1,1149 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/mman.h>
+#include "tf_util.h"
+
+/*----------------------------------------------------------------------------
+ * Debug printing routines
+ *----------------------------------------------------------------------------*/
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+
+void address_cache_property(unsigned long va)
+{
+ unsigned long pa;
+ unsigned long inner;
+ unsigned long outer;
+
+ asm volatile ("mcr p15, 0, %0, c7, c8, 0" : : "r" (va));
+ asm volatile ("mrc p15, 0, %0, c7, c4, 0" : "=r" (pa));
+
+ dprintk(KERN_INFO "VA:%x, PA:%x\n",
+ (unsigned int) va,
+ (unsigned int) pa);
+
+ if (pa & 1) {
+ dprintk(KERN_INFO "Prop Error\n");
+ return;
+ }
+
+ outer = (pa >> 2) & 3;
+ dprintk(KERN_INFO "\touter : %x", (unsigned int) outer);
+
+ switch (outer) {
+ case 3:
+ dprintk(KERN_INFO "Write-Back, no Write-Allocate\n");
+ break;
+ case 2:
+ dprintk(KERN_INFO "Write-Through, no Write-Allocate.\n");
+ break;
+ case 1:
+ dprintk(KERN_INFO "Write-Back, Write-Allocate.\n");
+ break;
+ case 0:
+ dprintk(KERN_INFO "Non-cacheable.\n");
+ break;
+ }
+
+ inner = (pa >> 4) & 7;
+ dprintk(KERN_INFO "\tinner : %x", (unsigned int)inner);
+
+ switch (inner) {
+ case 7:
+ dprintk(KERN_INFO "Write-Back, no Write-Allocate\n");
+ break;
+ case 6:
+ dprintk(KERN_INFO "Write-Through.\n");
+ break;
+ case 5:
+ dprintk(KERN_INFO "Write-Back, Write-Allocate.\n");
+ break;
+ case 3:
+ dprintk(KERN_INFO "Device.\n");
+ break;
+ case 1:
+ dprintk(KERN_INFO "Strongly-ordered.\n");
+ break;
+ case 0:
+ dprintk(KERN_INFO "Non-cacheable.\n");
+ break;
+ }
+
+ if (pa & 0x00000002)
+ dprintk(KERN_INFO "SuperSection.\n");
+ if (pa & 0x00000080)
+ dprintk(KERN_INFO "Memory is shareable.\n");
+ else
+ dprintk(KERN_INFO "Memory is non-shareable.\n");
+
+ if (pa & 0x00000200)
+ dprintk(KERN_INFO "Non-secure.\n");
+}
+
+#ifdef CONFIG_BENCH_SECURE_CYCLE
+
+#define LOOP_SIZE (100000)
+
+void run_bogo_mips(void)
+{
+ uint32_t cycles;
+ void *address = &run_bogo_mips;
+
+ dprintk(KERN_INFO "BogoMIPS:\n");
+
+ setup_counters();
+ cycles = run_code_speed(LOOP_SIZE);
+ dprintk(KERN_INFO "%u cycles with code access\n", cycles);
+ cycles = run_data_speed(LOOP_SIZE, (unsigned long)address);
+ dprintk(KERN_INFO "%u cycles to access %x\n", cycles,
+ (unsigned int) address);
+}
+
+#endif /* CONFIG_BENCH_SECURE_CYCLE */
+
+/*
+ * Dump the L1 shared buffer.
+ */
+void tf_dump_l1_shared_buffer(struct tf_l1_shared_buffer *buffer)
+{
+ dprintk(KERN_INFO
+ "buffer@%p:\n"
+ #ifndef CONFIG_TF_ZEBRA
+ " config_flag_s=%08X\n"
+ #endif
+ " version_description=%64s\n"
+ " status_s=%08X\n"
+ " sync_serial_n=%08X\n"
+ " sync_serial_s=%08X\n"
+ " time_n[0]=%016llX\n"
+ " time_n[1]=%016llX\n"
+ " timeout_s[0]=%016llX\n"
+ " timeout_s[1]=%016llX\n"
+ " first_command=%08X\n"
+ " first_free_command=%08X\n"
+ " first_answer=%08X\n"
+ " first_free_answer=%08X\n\n",
+ buffer,
+ #ifndef CONFIG_TF_ZEBRA
+ buffer->config_flag_s,
+ #endif
+ buffer->version_description,
+ buffer->status_s,
+ buffer->sync_serial_n,
+ buffer->sync_serial_s,
+ buffer->time_n[0],
+ buffer->time_n[1],
+ buffer->timeout_s[0],
+ buffer->timeout_s[1],
+ buffer->first_command,
+ buffer->first_free_command,
+ buffer->first_answer,
+ buffer->first_free_answer);
+}
+
+
+/*
+ * Dump the specified SChannel message using dprintk.
+ */
+void tf_dump_command(union tf_command *command)
+{
+ u32 i;
+
+ dprintk(KERN_INFO "message@%p:\n", command);
+
+ switch (command->header.message_type) {
+ case TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT\n"
+ " operation_id = 0x%08X\n"
+ " device_context_id = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->create_device_context.device_context_id
+ );
+ break;
+
+ case TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->destroy_device_context.device_context);
+ break;
+
+ case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION\n"
+ " param_types = 0x%04X\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " cancellation_id = 0x%08X\n"
+ " timeout = 0x%016llX\n"
+ " destination_uuid = "
+ "%08X-%04X-%04X-%02X%02X-"
+ "%02X%02X%02X%02X%02X%02X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->open_client_session.param_types,
+ command->header.operation_id,
+ command->open_client_session.device_context,
+ command->open_client_session.cancellation_id,
+ command->open_client_session.timeout,
+ command->open_client_session.destination_uuid.
+ time_low,
+ command->open_client_session.destination_uuid.
+ time_mid,
+ command->open_client_session.destination_uuid.
+ time_hi_and_version,
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[0],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[1],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[2],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[3],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[4],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[5],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[6],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[7]
+ );
+
+ for (i = 0; i < 4; i++) {
+ uint32_t *param = (uint32_t *) &command->
+ open_client_session.params[i];
+ dprintk(KERN_INFO " params[%d] = "
+ "0x%08X:0x%08X:0x%08X\n",
+ i, param[0], param[1], param[2]);
+ }
+
+ switch (TF_LOGIN_GET_MAIN_TYPE(
+ command->open_client_session.login_type)) {
+ case TF_LOGIN_PUBLIC:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_PUBLIC\n");
+ break;
+ case TF_LOGIN_USER:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_USER\n");
+ break;
+ case TF_LOGIN_GROUP:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_GROUP\n");
+ break;
+ case TF_LOGIN_APPLICATION:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_APPLICATION\n");
+ break;
+ case TF_LOGIN_APPLICATION_USER:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_APPLICATION_USER\n");
+ break;
+ case TF_LOGIN_APPLICATION_GROUP:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_APPLICATION_GROUP\n");
+ break;
+ case TF_LOGIN_AUTHENTICATION:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_AUTHENTICATION\n");
+ break;
+ case TF_LOGIN_PRIVILEGED:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_PRIVILEGED\n");
+ break;
+ case TF_LOGIN_PRIVILEGED_KERNEL:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_PRIVILEGED_KERNEL\n");
+ break;
+ default:
+ dprintk(
+ KERN_ERR " login_type = "
+ "0x%08X (Unknown login type)\n",
+ command->open_client_session.login_type);
+ break;
+ }
+
+ dprintk(
+ KERN_INFO " login_data = ");
+ for (i = 0; i < 20; i++)
+ dprintk(
+ KERN_INFO "%d",
+ command->open_client_session.
+ login_data[i]);
+ dprintk("\n");
+ break;
+
+ case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " client_session = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->close_client_session.device_context,
+ command->close_client_session.client_session
+ );
+ break;
+
+ case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY\n"
+ " memory_flags = 0x%04X\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " block_id = 0x%08X\n"
+ " shared_mem_size = 0x%08X\n"
+ " shared_mem_start_offset = 0x%08X\n"
+ " shared_mem_descriptors[0] = 0x%08X\n"
+ " shared_mem_descriptors[1] = 0x%08X\n"
+ " shared_mem_descriptors[2] = 0x%08X\n"
+ " shared_mem_descriptors[3] = 0x%08X\n"
+ " shared_mem_descriptors[4] = 0x%08X\n"
+ " shared_mem_descriptors[5] = 0x%08X\n"
+ " shared_mem_descriptors[6] = 0x%08X\n"
+ " shared_mem_descriptors[7] = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->register_shared_memory.memory_flags,
+ command->header.operation_id,
+ command->register_shared_memory.device_context,
+ command->register_shared_memory.block_id,
+ command->register_shared_memory.shared_mem_size,
+ command->register_shared_memory.
+ shared_mem_start_offset,
+ command->register_shared_memory.
+ shared_mem_descriptors[0],
+ command->register_shared_memory.
+ shared_mem_descriptors[1],
+ command->register_shared_memory.
+ shared_mem_descriptors[2],
+ command->register_shared_memory.
+ shared_mem_descriptors[3],
+ command->register_shared_memory.
+ shared_mem_descriptors[4],
+ command->register_shared_memory.
+ shared_mem_descriptors[5],
+ command->register_shared_memory.
+ shared_mem_descriptors[6],
+ command->register_shared_memory.
+ shared_mem_descriptors[7]);
+ break;
+
+ case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " block = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->release_shared_memory.device_context,
+ command->release_shared_memory.block);
+ break;
+
+ case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND\n"
+ " param_types = 0x%04X\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " client_session = 0x%08X\n"
+ " timeout = 0x%016llX\n"
+ " cancellation_id = 0x%08X\n"
+ " client_command_identifier = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->invoke_client_command.param_types,
+ command->header.operation_id,
+ command->invoke_client_command.device_context,
+ command->invoke_client_command.client_session,
+ command->invoke_client_command.timeout,
+ command->invoke_client_command.cancellation_id,
+ command->invoke_client_command.
+ client_command_identifier
+ );
+
+ for (i = 0; i < 4; i++) {
+ uint32_t *param = (uint32_t *) &command->
+ open_client_session.params[i];
+ dprintk(KERN_INFO " params[%d] = "
+ "0x%08X:0x%08X:0x%08X\n", i,
+ param[0], param[1], param[2]);
+ }
+ break;
+
+ case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " client_session = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->cancel_client_operation.device_context,
+ command->cancel_client_operation.client_session);
+ break;
+
+ case TF_MESSAGE_TYPE_MANAGEMENT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_MANAGEMENT\n"
+ " operation_id = 0x%08X\n"
+ " command = 0x%08X\n"
+ " w3b_size = 0x%08X\n"
+ " w3b_start_offset = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->management.command,
+ command->management.w3b_size,
+ command->management.w3b_start_offset);
+ break;
+
+ default:
+ dprintk(
+ KERN_ERR " message_type = 0x%08X "
+ "(Unknown message type)\n",
+ command->header.message_type);
+ break;
+ }
+}
+
+
+/*
+ * Dump the specified SChannel answer using dprintk.
+ */
+void tf_dump_answer(union tf_answer *answer)
+{
+ u32 i;
+ dprintk(
+ KERN_INFO "answer@%p:\n",
+ answer);
+
+ switch (answer->header.message_type) {
+ case TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "tf_answer_create_device_context\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " device_context = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->create_device_context.error_code,
+ answer->create_device_context.device_context);
+ break;
+
+ case TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "ANSWER_DESTROY_DEVICE_CONTEXT\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " device_context_id = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->destroy_device_context.error_code,
+ answer->destroy_device_context.device_context_id);
+ break;
+
+
+ case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "tf_answer_open_client_session\n"
+ " error_origin = 0x%02X\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " client_session = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->open_client_session.error_origin,
+ answer->header.operation_id,
+ answer->open_client_session.error_code,
+ answer->open_client_session.client_session);
+ for (i = 0; i < 4; i++) {
+ dprintk(KERN_INFO " answers[%d]=0x%08X:0x%08X\n",
+ i,
+ answer->open_client_session.answers[i].
+ value.a,
+ answer->open_client_session.answers[i].
+ value.b);
+ }
+ break;
+
+ case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "ANSWER_CLOSE_CLIENT_SESSION\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->close_client_session.error_code);
+ break;
+
+ case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "tf_answer_register_shared_memory\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " block = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->register_shared_memory.error_code,
+ answer->register_shared_memory.block);
+ break;
+
+ case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "ANSWER_RELEASE_SHARED_MEMORY\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " block_id = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->release_shared_memory.error_code,
+ answer->release_shared_memory.block_id);
+ break;
+
+ case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "tf_answer_invoke_client_command\n"
+ " error_origin = 0x%02X\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->invoke_client_command.error_origin,
+ answer->header.operation_id,
+ answer->invoke_client_command.error_code
+ );
+ for (i = 0; i < 4; i++) {
+ dprintk(KERN_INFO " answers[%d]=0x%08X:0x%08X\n",
+ i,
+ answer->invoke_client_command.answers[i].
+ value.a,
+ answer->invoke_client_command.answers[i].
+ value.b);
+ }
+ break;
+
+ case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_ANSWER_CANCEL_CLIENT_COMMAND\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->cancel_client_operation.error_code);
+ break;
+
+ case TF_MESSAGE_TYPE_MANAGEMENT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_MANAGEMENT\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->header.error_code);
+ break;
+
+ default:
+ dprintk(
+ KERN_ERR " message_type = 0x%02X "
+ "(Unknown message type)\n",
+ answer->header.message_type);
+ break;
+
+ }
+}
+
+#endif /* defined(TF_DRIVER_DEBUG_SUPPORT) */
+
+/*----------------------------------------------------------------------------
+ * SHA-1 implementation
+ * This is taken from the Linux kernel source crypto/sha1.c
+ *----------------------------------------------------------------------------*/
+
+struct sha1_ctx {
+ u64 count;
+ u32 state[5];
+ u8 buffer[64];
+};
+
+static inline u32 rol(u32 value, u32 bits)
+{
+ return ((value) << (bits)) | ((value) >> (32 - (bits)));
+}
+
+/* blk0() and blk() perform the initial expand. */
+/* I got the idea of expanding during the round function from SSLeay */
+#define blk0(i) block32[i]
+
+#define blk(i) (block32[i & 15] = rol( \
+ block32[(i + 13) & 15] ^ block32[(i + 8) & 15] ^ \
+ block32[(i + 2) & 15] ^ block32[i & 15], 1))
+
+/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
+#define R0(v, w, x, y, z, i) do { \
+ z += ((w & (x ^ y)) ^ y) + blk0(i) + 0x5A827999 + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+#define R1(v, w, x, y, z, i) do { \
+ z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+#define R2(v, w, x, y, z, i) do { \
+ z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+#define R3(v, w, x, y, z, i) do { \
+ z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+#define R4(v, w, x, y, z, i) do { \
+ z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+
+/* Hash a single 512-bit block. This is the core of the algorithm. */
+static void sha1_transform(u32 *state, const u8 *in)
+{
+ u32 a, b, c, d, e;
+ u32 block32[16];
+
+ /* convert/copy data to workspace */
+ for (a = 0; a < sizeof(block32)/sizeof(u32); a++)
+ block32[a] = ((u32) in[4 * a]) << 24 |
+ ((u32) in[4 * a + 1]) << 16 |
+ ((u32) in[4 * a + 2]) << 8 |
+ ((u32) in[4 * a + 3]);
+
+ /* Copy context->state[] to working vars */
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+
+ /* 4 rounds of 20 operations each. Loop unrolled. */
+ R0(a, b, c, d, e, 0); R0(e, a, b, c, d, 1);
+ R0(d, e, a, b, c, 2); R0(c, d, e, a, b, 3);
+ R0(b, c, d, e, a, 4); R0(a, b, c, d, e, 5);
+ R0(e, a, b, c, d, 6); R0(d, e, a, b, c, 7);
+ R0(c, d, e, a, b, 8); R0(b, c, d, e, a, 9);
+ R0(a, b, c, d, e, 10); R0(e, a, b, c, d, 11);
+ R0(d, e, a, b, c, 12); R0(c, d, e, a, b, 13);
+ R0(b, c, d, e, a, 14); R0(a, b, c, d, e, 15);
+
+ R1(e, a, b, c, d, 16); R1(d, e, a, b, c, 17);
+ R1(c, d, e, a, b, 18); R1(b, c, d, e, a, 19);
+
+ R2(a, b, c, d, e, 20); R2(e, a, b, c, d, 21);
+ R2(d, e, a, b, c, 22); R2(c, d, e, a, b, 23);
+ R2(b, c, d, e, a, 24); R2(a, b, c, d, e, 25);
+ R2(e, a, b, c, d, 26); R2(d, e, a, b, c, 27);
+ R2(c, d, e, a, b, 28); R2(b, c, d, e, a, 29);
+ R2(a, b, c, d, e, 30); R2(e, a, b, c, d, 31);
+ R2(d, e, a, b, c, 32); R2(c, d, e, a, b, 33);
+ R2(b, c, d, e, a, 34); R2(a, b, c, d, e, 35);
+ R2(e, a, b, c, d, 36); R2(d, e, a, b, c, 37);
+ R2(c, d, e, a, b, 38); R2(b, c, d, e, a, 39);
+
+ R3(a, b, c, d, e, 40); R3(e, a, b, c, d, 41);
+ R3(d, e, a, b, c, 42); R3(c, d, e, a, b, 43);
+ R3(b, c, d, e, a, 44); R3(a, b, c, d, e, 45);
+ R3(e, a, b, c, d, 46); R3(d, e, a, b, c, 47);
+ R3(c, d, e, a, b, 48); R3(b, c, d, e, a, 49);
+ R3(a, b, c, d, e, 50); R3(e, a, b, c, d, 51);
+ R3(d, e, a, b, c, 52); R3(c, d, e, a, b, 53);
+ R3(b, c, d, e, a, 54); R3(a, b, c, d, e, 55);
+ R3(e, a, b, c, d, 56); R3(d, e, a, b, c, 57);
+ R3(c, d, e, a, b, 58); R3(b, c, d, e, a, 59);
+
+ R4(a, b, c, d, e, 60); R4(e, a, b, c, d, 61);
+ R4(d, e, a, b, c, 62); R4(c, d, e, a, b, 63);
+ R4(b, c, d, e, a, 64); R4(a, b, c, d, e, 65);
+ R4(e, a, b, c, d, 66); R4(d, e, a, b, c, 67);
+ R4(c, d, e, a, b, 68); R4(b, c, d, e, a, 69);
+ R4(a, b, c, d, e, 70); R4(e, a, b, c, d, 71);
+ R4(d, e, a, b, c, 72); R4(c, d, e, a, b, 73);
+ R4(b, c, d, e, a, 74); R4(a, b, c, d, e, 75);
+ R4(e, a, b, c, d, 76); R4(d, e, a, b, c, 77);
+ R4(c, d, e, a, b, 78); R4(b, c, d, e, a, 79);
+
+ /* Add the working vars back into context.state[] */
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ /* Wipe variables */
+ a = b = c = d = e = 0;
+ memset(block32, 0x00, sizeof(block32));
+}
+
+
+static void sha1_init(void *ctx)
+{
+ struct sha1_ctx *sctx = ctx;
+ static const struct sha1_ctx initstate = {
+ 0,
+ { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 },
+ { 0, }
+ };
+
+ *sctx = initstate;
+}
+
+
+static void sha1_update(void *ctx, const u8 *data, unsigned int len)
+{
+ struct sha1_ctx *sctx = ctx;
+ unsigned int i, j;
+
+ j = (sctx->count >> 3) & 0x3f;
+ sctx->count += len << 3;
+
+ if ((j + len) > 63) {
+ memcpy(&sctx->buffer[j], data, (i = 64 - j));
+ sha1_transform(sctx->state, sctx->buffer);
+ for ( ; i + 63 < len; i += 64)
+ sha1_transform(sctx->state, &data[i]);
+ j = 0;
+ } else
+ i = 0;
+ memcpy(&sctx->buffer[j], &data[i], len - i);
+}
+
+
+/* Add padding and return the message digest. */
+static void sha1_final(void *ctx, u8 *out)
+{
+ struct sha1_ctx *sctx = ctx;
+ u32 i, j, index, padlen;
+ u64 t;
+ u8 bits[8] = { 0, };
+ static const u8 padding[64] = { 0x80, };
+
+ t = sctx->count;
+ bits[7] = 0xff & t; t >>= 8;
+ bits[6] = 0xff & t; t >>= 8;
+ bits[5] = 0xff & t; t >>= 8;
+ bits[4] = 0xff & t; t >>= 8;
+ bits[3] = 0xff & t; t >>= 8;
+ bits[2] = 0xff & t; t >>= 8;
+ bits[1] = 0xff & t; t >>= 8;
+ bits[0] = 0xff & t;
+
+ /* Pad out to 56 mod 64 */
+ index = (sctx->count >> 3) & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64+56) - index);
+ sha1_update(sctx, padding, padlen);
+
+ /* Append length */
+ sha1_update(sctx, bits, sizeof(bits));
+
+ /* Store state in digest */
+ for (i = j = 0; i < 5; i++, j += 4) {
+ u32 t2 = sctx->state[i];
+ out[j+3] = t2 & 0xff; t2 >>= 8;
+ out[j+2] = t2 & 0xff; t2 >>= 8;
+ out[j+1] = t2 & 0xff; t2 >>= 8;
+ out[j] = t2 & 0xff;
+ }
+
+ /* Wipe context */
+ memset(sctx, 0, sizeof(*sctx));
+}
+
+
+
+
+/*----------------------------------------------------------------------------
+ * Process identification
+ *----------------------------------------------------------------------------*/
+
+/* This function generates a processes hash table for authentication */
+int tf_get_current_process_hash(void *hash)
+{
+ int result = 0;
+ void *buffer;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+
+ buffer = internal_kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (buffer == NULL) {
+ dprintk(
+ KERN_ERR "tf_get_current_process_hash:"
+ " Out of memory for buffer!\n");
+ return -ENOMEM;
+ }
+
+ mm = current->mm;
+
+ down_read(&(mm->mmap_sem));
+ for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
+ if ((vma->vm_flags & VM_EXECUTABLE) != 0 && vma->vm_file
+ != NULL) {
+ struct dentry *dentry;
+ unsigned long start;
+ unsigned long cur;
+ unsigned long end;
+ struct sha1_ctx sha1;
+
+ dentry = dget(vma->vm_file->f_dentry);
+
+ dprintk(
+ KERN_DEBUG "tf_get_current_process_hash: "
+ "Found executable VMA for inode %lu "
+ "(%lu bytes).\n",
+ dentry->d_inode->i_ino,
+ (unsigned long) (dentry->d_inode->
+ i_size));
+
+ start = do_mmap(vma->vm_file, 0,
+ dentry->d_inode->i_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_PRIVATE, 0);
+ if (start < 0) {
+ dprintk(
+ KERN_ERR "tf_get_current_process_hash"
+ "Hash: do_mmap failed (error %d)!\n",
+ (int) start);
+ dput(dentry);
+ result = -EFAULT;
+ goto vma_out;
+ }
+
+ end = start + dentry->d_inode->i_size;
+
+ sha1_init(&sha1);
+ cur = start;
+ while (cur < end) {
+ unsigned long chunk;
+
+ chunk = end - cur;
+ if (chunk > PAGE_SIZE)
+ chunk = PAGE_SIZE;
+ if (copy_from_user(buffer, (const void *) cur,
+ chunk) != 0) {
+ dprintk(
+ KERN_ERR "tf_get_current_"
+ "process_hash: copy_from_user "
+ "failed!\n");
+ result = -EINVAL;
+ (void) do_munmap(mm, start,
+ dentry->d_inode->i_size);
+ dput(dentry);
+ goto vma_out;
+ }
+ sha1_update(&sha1, buffer, chunk);
+ cur += chunk;
+ }
+ sha1_final(&sha1, hash);
+ result = 0;
+
+ (void) do_munmap(mm, start, dentry->d_inode->i_size);
+ dput(dentry);
+ break;
+ }
+ }
+vma_out:
+ up_read(&(mm->mmap_sem));
+
+ internal_kfree(buffer);
+
+ if (result == -ENOENT)
+ dprintk(
+ KERN_ERR "tf_get_current_process_hash: "
+ "No executable VMA found for process!\n");
+ return result;
+}
+
+#ifndef CONFIG_ANDROID
+/* This function hashes the path of the current application.
+ * If data = NULL ,nothing else is added to the hash
+ else add data to the hash
+ */
+int tf_hash_application_path_and_data(char *buffer, void *data,
+ u32 data_len)
+{
+ int result = -ENOENT;
+ char *tmp = NULL;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+
+ tmp = internal_kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (tmp == NULL) {
+ result = -ENOMEM;
+ goto end;
+ }
+
+ mm = current->mm;
+
+ down_read(&(mm->mmap_sem));
+ for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
+ if ((vma->vm_flags & VM_EXECUTABLE) != 0
+ && vma->vm_file != NULL) {
+ struct path *path;
+ char *endpath;
+ size_t pathlen;
+ struct sha1_ctx sha1;
+ u8 hash[SHA1_DIGEST_SIZE];
+
+ path = &vma->vm_file->f_path;
+
+ endpath = d_path(path, tmp, PAGE_SIZE);
+ if (IS_ERR(path)) {
+ result = PTR_ERR(endpath);
+ up_read(&(mm->mmap_sem));
+ goto end;
+ }
+ pathlen = (tmp + PAGE_SIZE) - endpath;
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+ {
+ char *c;
+ dprintk(KERN_DEBUG "current process path = ");
+ for (c = endpath;
+ c < tmp + PAGE_SIZE;
+ c++)
+ dprintk("%c", *c);
+
+ dprintk(", uid=%d, euid=%d\n", current_uid(),
+ current_euid());
+ }
+#endif /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+ sha1_init(&sha1);
+ sha1_update(&sha1, endpath, pathlen);
+ if (data != NULL) {
+ dprintk(KERN_INFO "current process path: "
+ "Hashing additional data\n");
+ sha1_update(&sha1, data, data_len);
+ }
+ sha1_final(&sha1, hash);
+ memcpy(buffer, hash, sizeof(hash));
+
+ result = 0;
+
+ break;
+ }
+ }
+ up_read(&(mm->mmap_sem));
+
+end:
+ if (tmp != NULL)
+ internal_kfree(tmp);
+
+ return result;
+}
+#endif /* !CONFIG_ANDROID */
+
+void *internal_kmalloc(size_t size, int priority)
+{
+ void *ptr;
+ struct tf_device *dev = tf_get_device();
+
+ ptr = kmalloc(size, priority);
+
+ if (ptr != NULL)
+ atomic_inc(
+ &dev->stats.stat_memories_allocated);
+
+ return ptr;
+}
+
+void internal_kfree(void *ptr)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (ptr != NULL)
+ atomic_dec(
+ &dev->stats.stat_memories_allocated);
+ return kfree(ptr);
+}
+
+void internal_vunmap(void *ptr)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (ptr != NULL)
+ atomic_dec(
+ &dev->stats.stat_memories_allocated);
+
+ vunmap((void *) (((unsigned int)ptr) & 0xFFFFF000));
+}
+
+void *internal_vmalloc(size_t size)
+{
+ void *ptr;
+ struct tf_device *dev = tf_get_device();
+
+ ptr = vmalloc(size);
+
+ if (ptr != NULL)
+ atomic_inc(
+ &dev->stats.stat_memories_allocated);
+
+ return ptr;
+}
+
+void internal_vfree(void *ptr)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (ptr != NULL)
+ atomic_dec(
+ &dev->stats.stat_memories_allocated);
+ return vfree(ptr);
+}
+
+unsigned long internal_get_zeroed_page(int priority)
+{
+ unsigned long result;
+ struct tf_device *dev = tf_get_device();
+
+ result = get_zeroed_page(priority);
+
+ if (result != 0)
+ atomic_inc(&dev->stats.
+ stat_pages_allocated);
+
+ return result;
+}
+
+void internal_free_page(unsigned long addr)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (addr != 0)
+ atomic_dec(
+ &dev->stats.stat_pages_allocated);
+ return free_page(addr);
+}
+
+int internal_get_user_pages(
+ struct task_struct *tsk,
+ struct mm_struct *mm,
+ unsigned long start,
+ int len,
+ int write,
+ int force,
+ struct page **pages,
+ struct vm_area_struct **vmas)
+{
+ int result;
+ struct tf_device *dev = tf_get_device();
+
+ result = get_user_pages(
+ tsk,
+ mm,
+ start,
+ len,
+ write,
+ force,
+ pages,
+ vmas);
+
+ if (result > 0)
+ atomic_add(result,
+ &dev->stats.stat_pages_locked);
+
+ return result;
+}
+
+void internal_get_page(struct page *page)
+{
+ struct tf_device *dev = tf_get_device();
+
+ atomic_inc(&dev->stats.stat_pages_locked);
+
+ get_page(page);
+}
+
+void internal_page_cache_release(struct page *page)
+{
+ struct tf_device *dev = tf_get_device();
+
+ atomic_dec(&dev->stats.stat_pages_locked);
+
+ page_cache_release(page);
+}
diff --git a/security/tf_driver/scxlnx_util.h b/security/tf_driver/tf_util.h
index daff3a7d4b95..b349e9ae9063 100644
--- a/security/tf_driver/scxlnx_util.h
+++ b/security/tf_driver/tf_util.h
@@ -1,5 +1,5 @@
-/*
- * Copyright (c) 2006-2010 Trusted Logic S.A.
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -16,8 +16,9 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
-#ifndef __SCXLNX_UTIL_H__
-#define __SCXLNX_UTIL_H__
+
+#ifndef __TF_UTIL_H__
+#define __TF_UTIL_H__
#include <linux/spinlock.h>
#include <linux/errno.h>
@@ -30,8 +31,8 @@
#include <linux/vmalloc.h>
#include <asm/byteorder.h>
-#include "scx_protocol.h"
-#include "scxlnx_defs.h"
+#include "tf_protocol.h"
+#include "tf_defs.h"
/*----------------------------------------------------------------------------
* Debug printing routines
@@ -39,29 +40,33 @@
#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
-void addressCacheProperty(unsigned long va);
+void address_cache_property(unsigned long va);
#define dprintk printk
+#define dpr_info pr_info
+#define dpr_err pr_err
-void SCXLNXDumpL1SharedBuffer(struct SCHANNEL_C1S_BUFFER *pBuf);
+void tf_dump_l1_shared_buffer(struct tf_l1_shared_buffer *buffer);
-void SCXLNXDumpMessage(union SCX_COMMAND_MESSAGE *pMessage);
+void tf_dump_command(union tf_command *command);
-void SCXLNXDumpAnswer(union SCX_ANSWER_MESSAGE *pAnswer);
+void tf_dump_answer(union tf_answer *answer);
#ifdef CONFIG_BENCH_SECURE_CYCLE
-void setupCounters(void);
-void runBogoMIPS(void);
-int runCodeSpeed(unsigned int nLoop);
-int runDataSpeed(unsigned int nLoop, unsigned long nVA);
+void setup_counters(void);
+void run_bogo_mips(void);
+int run_code_speed(unsigned int loop);
+int run_data_speed(unsigned int loop, unsigned long va);
#endif /* CONFIG_BENCH_SECURE_CYCLE */
#else /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
#define dprintk(args...) do { ; } while (0)
-#define SCXLNXDumpL1SharedBuffer(pBuf) ((void) 0)
-#define SCXLNXDumpMessage(pMessage) ((void) 0)
-#define SCXLNXDumpAnswer(pAnswer) ((void) 0)
+#define dpr_info(args...) do { ; } while (0)
+#define dpr_err(args...) do { ; } while (0)
+#define tf_dump_l1_shared_buffer(buffer) ((void) 0)
+#define tf_dump_command(command) ((void) 0)
+#define tf_dump_answer(answer) ((void) 0)
#endif /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
@@ -71,22 +76,23 @@ int runDataSpeed(unsigned int nLoop, unsigned long nVA);
* Process identification
*----------------------------------------------------------------------------*/
-int SCXLNXConnGetCurrentProcessHash(void *pHash);
+int tf_get_current_process_hash(void *hash);
-int SCXLNXConnHashApplicationPathAndData(char *pBuffer, void *pData,
- u32 nDataLen);
+#ifndef CONFIG_ANDROID
+int tf_hash_application_path_and_data(char *buffer, void *data, u32 data_len);
+#endif /* !CONFIG_ANDROID */
/*----------------------------------------------------------------------------
* Statistic computation
*----------------------------------------------------------------------------*/
-void *internal_kmalloc(size_t nSize, int nPriority);
-void internal_kfree(void *pMemory);
-void internal_vunmap(void *pMemory);
-void *internal_vmalloc(size_t nSize);
-void internal_vfree(void *pMemory);
-unsigned long internal_get_zeroed_page(int nPriority);
-void internal_free_page(unsigned long pPage);
+void *internal_kmalloc(size_t size, int priority);
+void internal_kfree(void *ptr);
+void internal_vunmap(void *ptr);
+void *internal_vmalloc(size_t size);
+void internal_vfree(void *ptr);
+unsigned long internal_get_zeroed_page(int priority);
+void internal_free_page(unsigned long addr);
int internal_get_user_pages(
struct task_struct *tsk,
struct mm_struct *mm,
@@ -98,5 +104,5 @@ int internal_get_user_pages(
struct vm_area_struct **vmas);
void internal_get_page(struct page *page);
void internal_page_cache_release(struct page *page);
-#endif /* __SCXLNX_UTIL_H__ */
+#endif /* __TF_UTIL_H__ */