summaryrefslogtreecommitdiff
path: root/security
diff options
context:
space:
mode:
Diffstat (limited to 'security')
-rw-r--r--security/Kconfig4
-rw-r--r--security/Makefile4
-rw-r--r--security/integrity/ima/ima_policy.c1
-rw-r--r--security/selinux/netlabel.c6
-rw-r--r--security/tf_driver/Kconfig9
-rw-r--r--security/tf_driver/Makefile37
-rw-r--r--security/tf_driver/s_version.h115
-rw-r--r--security/tf_driver/tee_client_api.h180
-rw-r--r--security/tf_driver/tee_client_api_ex.h60
-rw-r--r--security/tf_driver/tee_client_api_imp.h68
-rw-r--r--security/tf_driver/tf_comm.c1717
-rw-r--r--security/tf_driver/tf_comm.h202
-rw-r--r--security/tf_driver/tf_comm_tz.c911
-rw-r--r--security/tf_driver/tf_conn.c1675
-rw-r--r--security/tf_driver/tf_conn.h106
-rw-r--r--security/tf_driver/tf_defs.h547
-rw-r--r--security/tf_driver/tf_device.c873
-rw-r--r--security/tf_driver/tf_protocol.h699
-rw-r--r--security/tf_driver/tf_teec.c618
-rw-r--r--security/tf_driver/tf_teec.h33
-rw-r--r--security/tf_driver/tf_util.c1138
-rw-r--r--security/tf_driver/tf_util.h123
-rw-r--r--security/tlk_driver/Kconfig14
-rw-r--r--security/tlk_driver/Makefile30
-rw-r--r--security/tlk_driver/ote_comms.c542
-rw-r--r--security/tlk_driver/ote_device.c769
-rw-r--r--security/tlk_driver/ote_fs.c311
-rw-r--r--security/tlk_driver/ote_irq.S23
-rw-r--r--security/tlk_driver/ote_log.c196
-rw-r--r--security/tlk_driver/ote_protocol.h342
-rw-r--r--security/tlk_driver/ote_types.h79
31 files changed, 11426 insertions, 6 deletions
diff --git a/security/Kconfig b/security/Kconfig
index e9c6ac724fef..bad85cd29f94 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -103,7 +103,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
- default 32768 if ARM
+ default 32768 if ARM || (ARM64 && COMPAT)
default 65536
help
This is the portion of low virtual memory which should be protected
@@ -121,6 +121,8 @@ source security/selinux/Kconfig
source security/smack/Kconfig
source security/tomoyo/Kconfig
source security/apparmor/Kconfig
+source security/tf_driver/Kconfig
+source security/tlk_driver/Kconfig
source security/yama/Kconfig
source security/integrity/Kconfig
diff --git a/security/Makefile b/security/Makefile
index c26c81e92571..6e5b56784cf7 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -7,7 +7,9 @@ subdir-$(CONFIG_SECURITY_SELINUX) += selinux
subdir-$(CONFIG_SECURITY_SMACK) += smack
subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo
subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor
+subdir-$(CONFIG_TRUSTED_FOUNDATIONS) += tf_driver
subdir-$(CONFIG_SECURITY_YAMA) += yama
+subdir-$(CONFIG_TRUSTED_LITTLE_KERNEL) += tlk_driver
# always enable default capabilities
obj-y += commoncap.o
@@ -24,6 +26,8 @@ obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o
obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/built-in.o
obj-$(CONFIG_SECURITY_YAMA) += yama/built-in.o
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
+obj-$(CONFIG_TRUSTED_FOUNDATIONS) += tf_driver/built-in.o
+obj-$(CONFIG_TRUSTED_LITTLE_KERNEL) += tlk_driver/built-in.o
# Object integrity file lists
subdir-$(CONFIG_INTEGRITY) += integrity
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 399433ad614e..a9c3d3cd1990 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -73,7 +73,6 @@ static struct ima_rule_entry default_rules[] = {
{.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = RAMFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = DEVPTS_SUPER_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = BINFMTFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC},
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index da4b8b233280..6235d052338b 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -442,8 +442,7 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
sksec->nlbl_state != NLBL_CONNLABELED)
return 0;
- local_bh_disable();
- bh_lock_sock_nested(sk);
+ lock_sock(sk);
/* connected sockets are allowed to disconnect when the address family
* is set to AF_UNSPEC, if that is what is happening we want to reset
@@ -464,7 +463,6 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
sksec->nlbl_state = NLBL_CONNLABELED;
socket_connect_return:
- bh_unlock_sock(sk);
- local_bh_enable();
+ release_sock(sk);
return rc;
}
diff --git a/security/tf_driver/Kconfig b/security/tf_driver/Kconfig
new file mode 100644
index 000000000000..e212e53913eb
--- /dev/null
+++ b/security/tf_driver/Kconfig
@@ -0,0 +1,9 @@
+config TRUSTED_FOUNDATIONS
+ bool "Enable TF Driver"
+ default n
+ select CRYPTO_SHA1
+ select TEGRA_USE_SECURE_KERNEL
+ help
+ This option adds kernel support for communication with the Trusted Foundations.
+ Default options is n
+ If you are unsure how to answer this question, answer N.
diff --git a/security/tf_driver/Makefile b/security/tf_driver/Makefile
new file mode 100644
index 000000000000..9cf49e27507e
--- /dev/null
+++ b/security/tf_driver/Makefile
@@ -0,0 +1,37 @@
+#
+# Copyright (c) 2006-2010 Trusted Logic S.A.
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+# MA 02111-1307 USA
+#
+
+# debug options
+#ccflags-y += -O0 -DDEBUG -D_DEBUG -DCONFIG_TF_DRIVER_DEBUG_SUPPORT
+ccflags-y += -DNDEBUG
+ccflags-y += -DLINUX -DCONFIG_TF_TRUSTZONE -DCONFIG_TFN -DCONFIG_SECURE_TRACES -DCONFIG_TF_TEEC
+
+ifdef S_VERSION_BUILD
+ccflags-y += -DS_VERSION_BUILD=$(S_VERSION_BUILD)
+endif
+
+tf_driver-objs += tf_util.o
+tf_driver-objs += tf_conn.o
+tf_driver-objs += tf_device.o
+tf_driver-objs += tf_comm.o
+tf_driver-objs += tf_comm_tz.o
+tf_driver-objs += tf_teec.o
+
+obj-$(CONFIG_TRUSTED_FOUNDATIONS) += tf_driver.o
diff --git a/security/tf_driver/s_version.h b/security/tf_driver/s_version.h
new file mode 100644
index 000000000000..34c635735248
--- /dev/null
+++ b/security/tf_driver/s_version.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __S_VERSION_H__
+#define __S_VERSION_H__
+
+#include "linux/stringify.h"
+
+/*
+ * Usage: define S_VERSION_BUILD on the compiler's command line.
+ *
+ * Then set:
+ * - S_VERSION_OS
+ * - S_VERSION_PLATFORM
+ * - S_VERSION_MAIN
+ * - S_VERSION_ENG is optional
+ * - S_VERSION_PATCH is optional
+ * - S_VERSION_BUILD = 0 if S_VERSION_BUILD not defined or empty
+ */
+
+
+
+/*
+ * This version number must be updated for each new release.
+ *
+ * If this is a patch or engineering version use the following
+ * defines to set the version number. Else set these values to 0.
+ */
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+#define S_VERSION_OS "A" /* "A" for all Android */
+#define S_VERSION_PLATFORM "A"
+#define S_VERSION_MAIN "01.11"
+#define S_VERSION_ENG 0
+#define S_VERSION_PATCH 0
+
+#elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
+#define S_VERSION_OS "A" /* "A" for all Android */
+#define S_VERSION_PLATFORM "B"
+#define S_VERSION_MAIN "02.02"
+#define S_VERSION_ENG 0
+#define S_VERSION_PATCH 0
+
+#elif defined(CONFIG_ARCH_TEGRA_11x_SOC)
+#define S_VERSION_OS "A" /* "A" for all Android */
+#define S_VERSION_PLATFORM "C"
+#define S_VERSION_MAIN "02.03"
+#define S_VERSION_ENG 0
+#define S_VERSION_PATCH 0
+
+#else
+#define S_VERSION_OS "Z" /* Unknown platform */
+#define S_VERSION_PLATFORM "Z" /* Unknown platform */
+#define S_VERSION_MAIN "00.00"
+#define S_VERSION_ENG 0
+#define S_VERSION_PATCH 0
+#endif
+
+
+#ifdef S_VERSION_BUILD
+/* TRICK: detect if S_VERSION is defined but empty */
+#if 0 == S_VERSION_BUILD-0
+#undef S_VERSION_BUILD
+#define S_VERSION_BUILD 0
+#endif
+#else
+/* S_VERSION_BUILD is not defined */
+#define S_VERSION_BUILD 0
+#endif
+
+
+#if S_VERSION_ENG != 0
+#define _S_VERSION_ENG "e" __stringify(S_VERSION_ENG)
+#else
+#define _S_VERSION_ENG ""
+#endif
+
+#if S_VERSION_PATCH != 0
+#define _S_VERSION_PATCH "p" __stringify(S_VERSION_PATCH)
+#else
+#define _S_VERSION_PATCH ""
+#endif
+
+#if !defined(NDEBUG) || defined(_DEBUG)
+#define S_VERSION_VARIANT "D "
+#else
+#define S_VERSION_VARIANT " "
+#endif
+
+#define S_VERSION_STRING (\
+ "TFN" \
+ S_VERSION_OS \
+ S_VERSION_PLATFORM \
+ S_VERSION_MAIN \
+ _S_VERSION_ENG \
+ _S_VERSION_PATCH \
+ "." __stringify(S_VERSION_BUILD) " " \
+ S_VERSION_VARIANT)
+
+#endif /* __S_VERSION_H__ */
diff --git a/security/tf_driver/tee_client_api.h b/security/tf_driver/tee_client_api.h
new file mode 100644
index 000000000000..1dbbab1169c7
--- /dev/null
+++ b/security/tf_driver/tee_client_api.h
@@ -0,0 +1,180 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/* This header file corresponds to V1.0 of the GlobalPlatform
+ * TEE Client API Specification
+ */
+#ifndef __TEE_CLIENT_API_H__
+#define __TEE_CLIENT_API_H__
+
+#include <linux/types.h>
+
+#ifndef TEEC_EXPORT
+#define TEEC_EXPORT
+#endif
+
+/* The header tee_client_api_imp.h must define implementation-dependent
+ * types, constants and macros.
+ *
+ * The implementation-dependent types are:
+ * - TEEC_Context_IMP
+ * - TEEC_Session_IMP
+ * - TEEC_SharedMemory_IMP
+ * - TEEC_Operation_IMP
+ *
+ * The implementation-dependent constants are:
+ * - TEEC_CONFIG_SHAREDMEM_MAX_SIZE
+ * The implementation-dependent macros are:
+ * - TEEC_PARAM_TYPES
+ */
+#include "tee_client_api_imp.h"
+
+/* Type definitions */
+typedef struct TEEC_Context {
+ TEEC_Context_IMP imp;
+} TEEC_Context;
+
+typedef struct TEEC_Session {
+ TEEC_Session_IMP imp;
+} TEEC_Session;
+
+typedef struct TEEC_SharedMemory {
+ void *buffer;
+ size_t size;
+ uint32_t flags;
+ TEEC_SharedMemory_IMP imp;
+} TEEC_SharedMemory;
+
+typedef struct {
+ void *buffer;
+ size_t size;
+} TEEC_TempMemoryReference;
+
+typedef struct {
+ TEEC_SharedMemory *parent;
+ size_t size;
+ size_t offset;
+} TEEC_RegisteredMemoryReference;
+
+typedef struct {
+ uint32_t a;
+ uint32_t b;
+} TEEC_Value;
+
+typedef union {
+ TEEC_TempMemoryReference tmpref;
+ TEEC_RegisteredMemoryReference memref;
+ TEEC_Value value;
+} TEEC_Parameter;
+
+typedef struct TEEC_Operation {
+ volatile uint32_t started;
+ uint32_t paramTypes;
+ TEEC_Parameter params[4];
+ TEEC_Operation_IMP imp;
+} TEEC_Operation;
+
+#define TEEC_SUCCESS ((TEEC_Result)0x00000000)
+#define TEEC_ERROR_GENERIC ((TEEC_Result)0xFFFF0000)
+#define TEEC_ERROR_ACCESS_DENIED ((TEEC_Result)0xFFFF0001)
+#define TEEC_ERROR_CANCEL ((TEEC_Result)0xFFFF0002)
+#define TEEC_ERROR_ACCESS_CONFLICT ((TEEC_Result)0xFFFF0003)
+#define TEEC_ERROR_EXCESS_DATA ((TEEC_Result)0xFFFF0004)
+#define TEEC_ERROR_BAD_FORMAT ((TEEC_Result)0xFFFF0005)
+#define TEEC_ERROR_BAD_PARAMETERS ((TEEC_Result)0xFFFF0006)
+#define TEEC_ERROR_BAD_STATE ((TEEC_Result)0xFFFF0007)
+#define TEEC_ERROR_ITEM_NOT_FOUND ((TEEC_Result)0xFFFF0008)
+#define TEEC_ERROR_NOT_IMPLEMENTED ((TEEC_Result)0xFFFF0009)
+#define TEEC_ERROR_NOT_SUPPORTED ((TEEC_Result)0xFFFF000A)
+#define TEEC_ERROR_NO_DATA ((TEEC_Result)0xFFFF000B)
+#define TEEC_ERROR_OUT_OF_MEMORY ((TEEC_Result)0xFFFF000C)
+#define TEEC_ERROR_BUSY ((TEEC_Result)0xFFFF000D)
+#define TEEC_ERROR_COMMUNICATION ((TEEC_Result)0xFFFF000E)
+#define TEEC_ERROR_SECURITY ((TEEC_Result)0xFFFF000F)
+#define TEEC_ERROR_SHORT_BUFFER ((TEEC_Result)0xFFFF0010)
+
+#define TEEC_ORIGIN_API 0x00000001
+#define TEEC_ORIGIN_COMMS 0x00000002
+#define TEEC_ORIGIN_TEE 0x00000003
+#define TEEC_ORIGIN_TRUSTED_APP 0x00000004
+
+#define TEEC_MEM_INPUT 0x00000001
+#define TEEC_MEM_OUTPUT 0x00000002
+
+#define TEEC_NONE 0x0
+#define TEEC_VALUE_INPUT 0x1
+#define TEEC_VALUE_OUTPUT 0x2
+#define TEEC_VALUE_INOUT 0x3
+#define TEEC_MEMREF_TEMP_INPUT 0x5
+#define TEEC_MEMREF_TEMP_OUTPUT 0x6
+#define TEEC_MEMREF_TEMP_INOUT 0x7
+#define TEEC_MEMREF_WHOLE 0xC
+#define TEEC_MEMREF_PARTIAL_INPUT 0xD
+#define TEEC_MEMREF_PARTIAL_OUTPUT 0xE
+#define TEEC_MEMREF_PARTIAL_INOUT 0xF
+
+#define TEEC_LOGIN_PUBLIC 0x00000000
+#define TEEC_LOGIN_USER 0x00000001
+#define TEEC_LOGIN_GROUP 0x00000002
+#define TEEC_LOGIN_APPLICATION 0x00000004
+#define TEEC_LOGIN_USER_APPLICATION 0x00000005
+#define TEEC_LOGIN_GROUP_APPLICATION 0x00000006
+
+TEEC_Result TEEC_EXPORT TEEC_InitializeContext(
+ const char *name,
+ TEEC_Context * context);
+
+void TEEC_EXPORT TEEC_FinalizeContext(
+ TEEC_Context * context);
+
+TEEC_Result TEEC_EXPORT TEEC_RegisterSharedMemory(
+ TEEC_Context * context,
+ TEEC_SharedMemory *sharedMem);
+
+TEEC_Result TEEC_EXPORT TEEC_AllocateSharedMemory(
+ TEEC_Context * context,
+ TEEC_SharedMemory *sharedMem);
+
+void TEEC_EXPORT TEEC_ReleaseSharedMemory(
+ TEEC_SharedMemory *sharedMem);
+
+TEEC_Result TEEC_EXPORT TEEC_OpenSession(
+ TEEC_Context * context,
+ TEEC_Session * session,
+ const TEEC_UUID * destination,
+ uint32_t connectionMethod,
+ void *connectionData,
+ TEEC_Operation * operation,
+ uint32_t *errorOrigin);
+
+void TEEC_EXPORT TEEC_CloseSession(
+ TEEC_Session * session);
+
+TEEC_Result TEEC_EXPORT TEEC_InvokeCommand(
+ TEEC_Session * session,
+ uint32_t commandID,
+ TEEC_Operation * operation,
+ uint32_t *errorOrigin);
+
+void TEEC_EXPORT TEEC_RequestCancellation(
+ TEEC_Operation * operation);
+
+#include "tee_client_api_ex.h"
+
+#endif /* __TEE_CLIENT_API_H__ */
diff --git a/security/tf_driver/tee_client_api_ex.h b/security/tf_driver/tee_client_api_ex.h
new file mode 100644
index 000000000000..3025308a818d
--- /dev/null
+++ b/security/tf_driver/tee_client_api_ex.h
@@ -0,0 +1,60 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * This header file contains extensions to the TEE Client API that are
+ * specific to the Trusted Foundations implementations
+ */
+#ifndef __TEE_CLIENT_API_EX_H__
+#define __TEE_CLIENT_API_EX_H__
+
+#include <linux/types.h>
+
+/* Implementation-defined login types */
+#define TEEC_LOGIN_AUTHENTICATION 0x80000000
+#define TEEC_LOGIN_PRIVILEGED 0x80000002
+#define TEEC_LOGIN_PRIVILEGED_KERNEL 0x80000002
+
+/* Type definitions */
+
+typedef u64 TEEC_TimeLimit;
+
+void TEEC_EXPORT TEEC_GetTimeLimit(
+ TEEC_Context * context,
+ uint32_t timeout,
+ TEEC_TimeLimit *timeLimit);
+
+TEEC_Result TEEC_EXPORT TEEC_OpenSessionEx(
+ TEEC_Context * context,
+ TEEC_Session * session,
+ const TEEC_TimeLimit *timeLimit,
+ const TEEC_UUID * destination,
+ uint32_t connectionMethod,
+ void *connectionData,
+ TEEC_Operation * operation,
+ uint32_t *errorOrigin);
+
+TEEC_Result TEEC_EXPORT TEEC_InvokeCommandEx(
+ TEEC_Session * session,
+ const TEEC_TimeLimit *timeLimit,
+ uint32_t commandID,
+ TEEC_Operation * operation,
+ uint32_t *errorOrigin);
+
+#endif /* __TEE_CLIENT_API_EX_H__ */
diff --git a/security/tf_driver/tee_client_api_imp.h b/security/tf_driver/tee_client_api_imp.h
new file mode 100644
index 000000000000..e3053b301ad6
--- /dev/null
+++ b/security/tf_driver/tee_client_api_imp.h
@@ -0,0 +1,68 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * This header file defines the implementation-dependent types,
+ * constants and macros for all the Trusted Foundations implementations
+ * of the TEE Client API
+ */
+#ifndef __TEE_CLIENT_API_IMP_H__
+#define __TEE_CLIENT_API_IMP_H__
+
+#include <linux/types.h>
+
+typedef u32 TEEC_Result;
+
+typedef struct TEEC_UUID {
+ uint32_t time_low;
+ uint16_t time_mid;
+ uint16_t time_hi_and_version;
+ uint8_t clock_seq_and_node[8];
+} TEEC_UUID;
+
+typedef struct {
+ struct tf_connection *_connection;
+} TEEC_Context_IMP;
+
+typedef struct {
+ struct TEEC_Context *_context;
+ u32 _client_session;
+} TEEC_Session_IMP;
+
+typedef struct {
+ struct TEEC_Context *_context;
+ u32 _block;
+ bool _allocated;
+} TEEC_SharedMemory_IMP;
+
+typedef struct {
+ struct TEEC_Session *_pSession;
+} TEEC_Operation_IMP;
+
+/* There is no natural, compile-time limit on the shared memory, but a specific
+ * implementation may introduce a limit (in particular on TrustZone)
+ */
+#define TEEC_CONFIG_SHAREDMEM_MAX_SIZE ((size_t)0xFFFFFFFF)
+
+#define TEEC_PARAM_TYPES(entry0Type, entry1Type, entry2Type, entry3Type) \
+ ((entry0Type) | ((entry1Type) << 4) | \
+ ((entry2Type) << 8) | ((entry3Type) << 12))
+
+
+#endif /* __TEE_CLIENT_API_IMP_H__ */
diff --git a/security/tf_driver/tf_comm.c b/security/tf_driver/tf_comm.c
new file mode 100644
index 000000000000..01538249264f
--- /dev/null
+++ b/security/tf_driver/tf_comm.c
@@ -0,0 +1,1717 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/div64.h>
+#include <asm/system.h>
+#include <linux/version.h>
+#include <asm/cputype.h>
+#include <linux/interrupt.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/jiffies.h>
+#include <linux/freezer.h>
+
+#include "tf_defs.h"
+#include "tf_comm.h"
+#include "tf_protocol.h"
+#include "tf_util.h"
+#include "tf_conn.h"
+
+#ifdef CONFIG_TF_ZEBRA
+#include "tf_zebra.h"
+#endif
+
+/*---------------------------------------------------------------------------
+ * Internal Constants
+ *---------------------------------------------------------------------------*/
+
+/*
+ * shared memories descriptor constants
+ */
+#define DESCRIPTOR_B_MASK (1 << 2)
+#define DESCRIPTOR_C_MASK (1 << 3)
+#define DESCRIPTOR_S_MASK (1 << 10)
+
+#define L1_COARSE_DESCRIPTOR_BASE (0x00000001)
+#define L1_COARSE_DESCRIPTOR_ADDR_MASK (0xFFFFFC00)
+#define L1_COARSE_DESCRIPTOR_V13_12_SHIFT (5)
+
+#define L2_PAGE_DESCRIPTOR_BASE (0x00000003)
+#define L2_PAGE_DESCRIPTOR_AP_APX_READ (0x220)
+#define L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE (0x30)
+
+#define L2_INIT_DESCRIPTOR_BASE (0x00000003)
+#define L2_INIT_DESCRIPTOR_V13_12_SHIFT (4)
+
+/*
+ * Reject an attempt to share a strongly-Ordered or Device memory
+ * Strongly-Ordered: TEX=0b000, C=0, B=0
+ * Shared Device: TEX=0b000, C=0, B=1
+ * Non-Shared Device: TEX=0b010, C=0, B=0
+ */
+#define L2_TEX_C_B_MASK \
+ ((1<<8) | (1<<7) | (1<<6) | (1<<3) | (1<<2))
+#define L2_TEX_C_B_STRONGLY_ORDERED \
+ ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (0<<2))
+#define L2_TEX_C_B_SHARED_DEVICE \
+ ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (1<<2))
+#define L2_TEX_C_B_NON_SHARED_DEVICE \
+ ((0<<8) | (1<<7) | (0<<6) | (0<<3) | (0<<2))
+
+#define CACHE_S(x) ((x) & (1 << 24))
+#define CACHE_DSIZE(x) (((x) >> 12) & 4095)
+
+#define TIME_IMMEDIATE ((u64) 0x0000000000000000ULL)
+#define TIME_INFINITE ((u64) 0xFFFFFFFFFFFFFFFFULL)
+
+/*---------------------------------------------------------------------------
+ * atomic operation definitions
+ *---------------------------------------------------------------------------*/
+
+/*
+ * Atomically updates the sync_serial_n and time_n register
+ * sync_serial_n and time_n modifications are thread safe
+ */
+void tf_set_current_time(struct tf_comm *comm)
+{
+ u32 new_sync_serial;
+ struct timeval now;
+ u64 time64;
+
+ /*
+ * lock the structure while updating the L1 shared memory fields
+ */
+ spin_lock(&comm->lock);
+
+ /* read sync_serial_n and change the TimeSlot bit field */
+ new_sync_serial =
+ tf_read_reg32(&comm->l1_buffer->sync_serial_n) + 1;
+
+ do_gettimeofday(&now);
+ time64 = now.tv_sec;
+ time64 = (time64 * 1000) + (now.tv_usec / 1000);
+
+ /* Write the new time64 and nSyncSerial into shared memory */
+ tf_write_reg64(&comm->l1_buffer->time_n[new_sync_serial &
+ TF_SYNC_SERIAL_TIMESLOT_N], time64);
+ tf_write_reg32(&comm->l1_buffer->sync_serial_n,
+ new_sync_serial);
+
+ spin_unlock(&comm->lock);
+}
+
+/*
+ * Performs the specific read timeout operation
+ * The difficulty here is to read atomically 2 u32
+ * values from the L1 shared buffer.
+ * This is guaranteed by reading before and after the operation
+ * the timeslot given by the Secure World
+ */
+static inline void tf_read_timeout(struct tf_comm *comm, u64 *time)
+{
+ u32 sync_serial_s_initial = 0;
+ u32 sync_serial_s_final = 1;
+ u64 time64;
+
+ spin_lock(&comm->lock);
+
+ while (sync_serial_s_initial != sync_serial_s_final) {
+ sync_serial_s_initial = tf_read_reg32(
+ &comm->l1_buffer->sync_serial_s);
+ time64 = tf_read_reg64(
+ &comm->l1_buffer->timeout_s[sync_serial_s_initial&1]);
+
+ sync_serial_s_final = tf_read_reg32(
+ &comm->l1_buffer->sync_serial_s);
+ }
+
+ spin_unlock(&comm->lock);
+
+ *time = time64;
+}
+
+/*----------------------------------------------------------------------------
+ * SIGKILL signal handling
+ *----------------------------------------------------------------------------*/
+
+static bool sigkill_pending(void)
+{
+ if (signal_pending(current)) {
+ dprintk(KERN_INFO "A signal is pending\n");
+ if (sigismember(&current->pending.signal, SIGKILL)) {
+ dprintk(KERN_INFO "A SIGKILL is pending\n");
+ return true;
+ } else if (sigismember(
+ &current->signal->shared_pending.signal, SIGKILL)) {
+ dprintk(KERN_INFO "A SIGKILL is pending (shared)\n");
+ return true;
+ }
+ }
+ return false;
+}
+
+/*----------------------------------------------------------------------------
+ * Shared memory related operations
+ *----------------------------------------------------------------------------*/
+
+struct tf_coarse_page_table *tf_alloc_coarse_page_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ u32 type)
+{
+ struct tf_coarse_page_table *coarse_pg_table = NULL;
+
+ spin_lock(&(alloc_context->lock));
+
+ if (!(list_empty(&(alloc_context->free_coarse_page_tables)))) {
+ /*
+ * The free list can provide us a coarse page table
+ * descriptor
+ */
+ coarse_pg_table = list_first_entry(
+ &alloc_context->free_coarse_page_tables,
+ struct tf_coarse_page_table, list);
+ list_del(&(coarse_pg_table->list));
+
+ coarse_pg_table->parent->ref_count++;
+ } else {
+ /* no array of coarse page tables, create a new one */
+ struct tf_coarse_page_table_array *array;
+ void *page;
+ int i;
+
+ spin_unlock(&(alloc_context->lock));
+
+ /* first allocate a new page descriptor */
+ array = internal_kmalloc(sizeof(*array), GFP_KERNEL);
+ if (array == NULL) {
+ dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):"
+ " failed to allocate a table array\n",
+ alloc_context);
+ return NULL;
+ }
+
+ array->type = type;
+ array->ref_count = 0;
+ INIT_LIST_HEAD(&(array->list));
+
+ /* now allocate the actual page the page descriptor describes */
+ page = (void *) internal_get_zeroed_page(GFP_KERNEL);
+ if (page == NULL) {
+ dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):"
+ " failed allocate a page\n",
+ alloc_context);
+ internal_kfree(array);
+ return NULL;
+ }
+
+ spin_lock(&(alloc_context->lock));
+
+ /* initialize the coarse page table descriptors */
+ for (i = 0; i < 4; i++) {
+ INIT_LIST_HEAD(&(array->coarse_page_tables[i].list));
+ array->coarse_page_tables[i].descriptors =
+ page + (i * SIZE_1KB);
+ array->coarse_page_tables[i].parent = array;
+
+ if (i == 0) {
+ /*
+ * the first element is kept for the current
+ * coarse page table allocation
+ */
+ coarse_pg_table =
+ &(array->coarse_page_tables[i]);
+ array->ref_count++;
+ } else {
+ /*
+ * The other elements are added to the free list
+ */
+ list_add(&(array->coarse_page_tables[i].list),
+ &(alloc_context->
+ free_coarse_page_tables));
+ }
+ }
+
+ list_add(&(array->list),
+ &(alloc_context->coarse_page_table_arrays));
+ }
+ spin_unlock(&(alloc_context->lock));
+
+ return coarse_pg_table;
+}
+
+
+void tf_free_coarse_page_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_coarse_page_table *coarse_pg_table,
+ int force)
+{
+ struct tf_coarse_page_table_array *array;
+
+ spin_lock(&(alloc_context->lock));
+
+ array = coarse_pg_table->parent;
+
+ (array->ref_count)--;
+
+ if (array->ref_count == 0) {
+ /*
+ * no coarse page table descriptor is used
+ * check if we should free the whole page
+ */
+
+ if ((array->type == TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED)
+ && (force == 0))
+ /*
+ * This is a preallocated page,
+ * add the page back to the free list
+ */
+ list_add(&(coarse_pg_table->list),
+ &(alloc_context->free_coarse_page_tables));
+ else {
+ /*
+ * None of the page's coarse page table descriptors
+ * are in use, free the whole page
+ */
+ int i;
+ u32 *descriptors;
+
+ /*
+ * remove the page's associated coarse page table
+ * descriptors from the free list
+ */
+ for (i = 0; i < 4; i++)
+ if (&(array->coarse_page_tables[i]) !=
+ coarse_pg_table)
+ list_del(&(array->
+ coarse_page_tables[i].list));
+
+ descriptors =
+ array->coarse_page_tables[0].descriptors;
+ array->coarse_page_tables[0].descriptors = NULL;
+
+ /* remove the coarse page table from the array */
+ list_del(&(array->list));
+
+ spin_unlock(&(alloc_context->lock));
+ /*
+ * Free the page.
+ * The address of the page is contained in the first
+ * element
+ */
+ internal_free_page((unsigned long) descriptors);
+ /* finaly free the array */
+ internal_kfree(array);
+
+ spin_lock(&(alloc_context->lock));
+ }
+ } else {
+ /*
+ * Some coarse page table descriptors are in use.
+ * Add the descriptor to the free list
+ */
+ list_add(&(coarse_pg_table->list),
+ &(alloc_context->free_coarse_page_tables));
+ }
+
+ spin_unlock(&(alloc_context->lock));
+}
+
+
+void tf_init_coarse_page_table_allocator(
+ struct tf_coarse_page_table_allocation_context *alloc_context)
+{
+ spin_lock_init(&(alloc_context->lock));
+ INIT_LIST_HEAD(&(alloc_context->coarse_page_table_arrays));
+ INIT_LIST_HEAD(&(alloc_context->free_coarse_page_tables));
+}
+
+void tf_release_coarse_page_table_allocator(
+ struct tf_coarse_page_table_allocation_context *alloc_context)
+{
+ spin_lock(&(alloc_context->lock));
+
+ /* now clean up the list of page descriptors */
+ while (!list_empty(&(alloc_context->coarse_page_table_arrays))) {
+ struct tf_coarse_page_table_array *page_desc;
+ u32 *descriptors;
+
+ page_desc = list_first_entry(
+ &alloc_context->coarse_page_table_arrays,
+ struct tf_coarse_page_table_array, list);
+
+ descriptors = page_desc->coarse_page_tables[0].descriptors;
+ list_del(&(page_desc->list));
+
+ spin_unlock(&(alloc_context->lock));
+
+ if (descriptors != NULL)
+ internal_free_page((unsigned long)descriptors);
+
+ internal_kfree(page_desc);
+
+ spin_lock(&(alloc_context->lock));
+ }
+
+ spin_unlock(&(alloc_context->lock));
+}
+
+/*
+ * Returns the L1 coarse page descriptor for
+ * a coarse page table located at address coarse_pg_table_descriptors
+ */
+u32 tf_get_l1_coarse_descriptor(
+ u32 coarse_pg_table_descriptors[256])
+{
+ u32 descriptor = L1_COARSE_DESCRIPTOR_BASE;
+ unsigned int info = read_cpuid(CPUID_CACHETYPE);
+
+ descriptor |= (virt_to_phys((void *) coarse_pg_table_descriptors)
+ & L1_COARSE_DESCRIPTOR_ADDR_MASK);
+
+ if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11))) {
+ dprintk(KERN_DEBUG "tf_get_l1_coarse_descriptor "
+ "V31-12 added to descriptor\n");
+ /* the 16k alignment restriction applies */
+ descriptor |= (DESCRIPTOR_V13_12_GET(
+ (u32)coarse_pg_table_descriptors) <<
+ L1_COARSE_DESCRIPTOR_V13_12_SHIFT);
+ }
+
+ return descriptor;
+}
+
+
+#define dprintk_desc(...)
+/*
+ * Returns the L2 descriptor for the specified user page.
+ */
+u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep;
+ u32 *hwpte;
+ u32 tex = 0;
+ u32 descriptor = 0;
+
+ dprintk_desc(KERN_INFO "VirtAddr = %x\n", vaddr);
+ pgd = pgd_offset(mm, vaddr);
+ dprintk_desc(KERN_INFO "pgd = %x, value=%x\n", (unsigned int) pgd,
+ (unsigned int) *pgd);
+ if (pgd_none(*pgd))
+ goto error;
+ pud = pud_offset(pgd, vaddr);
+ dprintk_desc(KERN_INFO "pud = %x, value=%x\n", (unsigned int) pud,
+ (unsigned int) *pud);
+ if (pud_none(*pud))
+ goto error;
+ pmd = pmd_offset(pud, vaddr);
+ dprintk_desc(KERN_INFO "pmd = %x, value=%x\n", (unsigned int) pmd,
+ (unsigned int) *pmd);
+ if (pmd_none(*pmd))
+ goto error;
+
+ if (PMD_TYPE_SECT&(*pmd)) {
+ /* We have a section */
+ dprintk_desc(KERN_INFO "Section descr=%x\n",
+ (unsigned int)*pmd);
+ if ((*pmd) & PMD_SECT_BUFFERABLE)
+ descriptor |= DESCRIPTOR_B_MASK;
+ if ((*pmd) & PMD_SECT_CACHEABLE)
+ descriptor |= DESCRIPTOR_C_MASK;
+ if ((*pmd) & PMD_SECT_S)
+ descriptor |= DESCRIPTOR_S_MASK;
+ tex = ((*pmd) >> 12) & 7;
+ } else {
+ /* We have a table */
+ ptep = pte_offset_map(pmd, vaddr);
+ if (pte_present(*ptep)) {
+ dprintk_desc(KERN_INFO "L2 descr=%x\n",
+ (unsigned int) *ptep);
+ if ((*ptep) & L_PTE_MT_BUFFERABLE)
+ descriptor |= DESCRIPTOR_B_MASK;
+ if ((*ptep) & L_PTE_MT_WRITETHROUGH)
+ descriptor |= DESCRIPTOR_C_MASK;
+ if ((*ptep) & L_PTE_MT_DEV_SHARED)
+ descriptor |= DESCRIPTOR_S_MASK;
+
+ /*
+ * Linux's pte doesn't keep track of TEX value.
+ * Have to jump to hwpte see include/asm/pgtable.h
+ */
+#ifdef PTE_HWTABLE_SIZE
+ hwpte = (u32 *) (ptep + PTE_HWTABLE_PTRS);
+#else
+ hwpte = (u32 *) (ptep - PTRS_PER_PTE);
+#endif
+ if (((*hwpte) & L2_DESCRIPTOR_ADDR_MASK) !=
+ ((*ptep) & L2_DESCRIPTOR_ADDR_MASK))
+ goto error;
+ dprintk_desc(KERN_INFO "hw descr=%x\n", *hwpte);
+ tex = ((*hwpte) >> 6) & 7;
+ pte_unmap(ptep);
+ } else {
+ pte_unmap(ptep);
+ goto error;
+ }
+ }
+
+ descriptor |= (tex << 6);
+
+ return descriptor;
+
+error:
+ dprintk(KERN_ERR "Error occured in %s\n", __func__);
+ return 0;
+}
+
+
+/*
+ * Changes an L2 page descriptor back to a pointer to a physical page
+ */
+inline struct page *tf_l2_page_descriptor_to_page(u32 l2_page_descriptor)
+{
+ return pte_page(l2_page_descriptor & L2_DESCRIPTOR_ADDR_MASK);
+}
+
+
+/*
+ * Returns the L1 descriptor for the 1KB-aligned coarse page table. The address
+ * must be in the kernel address space.
+ */
+static void tf_get_l2_page_descriptor(
+ u32 *l2_page_descriptor,
+ u32 flags, struct mm_struct *mm)
+{
+ unsigned long page_vaddr;
+ u32 descriptor;
+ struct page *page;
+ bool unmap_page = false;
+
+#if 0
+ dprintk(KERN_INFO
+ "tf_get_l2_page_descriptor():"
+ "*l2_page_descriptor=%x\n",
+ *l2_page_descriptor);
+#endif
+
+ if (*l2_page_descriptor == L2_DESCRIPTOR_FAULT)
+ return;
+
+ page = (struct page *) (*l2_page_descriptor);
+
+ page_vaddr = (unsigned long) page_address(page);
+ if (page_vaddr == 0) {
+ dprintk(KERN_INFO "page_address returned 0\n");
+ /* Should we use kmap_atomic(page, KM_USER0) instead ? */
+ page_vaddr = (unsigned long) kmap(page);
+ if (page_vaddr == 0) {
+ *l2_page_descriptor = L2_DESCRIPTOR_FAULT;
+ dprintk(KERN_ERR "kmap returned 0\n");
+ return;
+ }
+ unmap_page = true;
+ }
+
+ descriptor = tf_get_l2_descriptor_common(page_vaddr, mm);
+ if (descriptor == 0) {
+ *l2_page_descriptor = L2_DESCRIPTOR_FAULT;
+ return;
+ }
+ descriptor |= L2_PAGE_DESCRIPTOR_BASE;
+
+ descriptor |= (page_to_phys(page) & L2_DESCRIPTOR_ADDR_MASK);
+
+ if (!(flags & TF_SHMEM_TYPE_WRITE))
+ /* only read access */
+ descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ;
+ else
+ /* read and write access */
+ descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE;
+
+ if (unmap_page)
+ kunmap(page);
+
+ *l2_page_descriptor = descriptor;
+}
+
+
+/*
+ * Unlocks the physical memory pages
+ * and frees the coarse pages that need to
+ */
+void tf_cleanup_shared_memory(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_shmem_desc *shmem_desc,
+ u32 full_cleanup)
+{
+ u32 coarse_page_index;
+
+ dprintk(KERN_INFO "tf_cleanup_shared_memory(%p)\n",
+ shmem_desc);
+
+#ifdef DEBUG_COARSE_TABLES
+ printk(KERN_DEBUG "tf_cleanup_shared_memory "
+ "- number of coarse page tables=%d\n",
+ shmem_desc->coarse_pg_table_count);
+
+ for (coarse_page_index = 0;
+ coarse_page_index < shmem_desc->coarse_pg_table_count;
+ coarse_page_index++) {
+ u32 j;
+
+ printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n",
+ shmem_desc->coarse_pg_table[coarse_page_index],
+ shmem_desc->coarse_pg_table[coarse_page_index]->
+ descriptors,
+ coarse_page_index);
+ if (shmem_desc->coarse_pg_table[coarse_page_index] != NULL) {
+ for (j = 0;
+ j < TF_DESCRIPTOR_TABLE_CAPACITY;
+ j += 8) {
+ int k;
+ printk(KERN_DEBUG " ");
+ for (k = j; k < j + 8; k++)
+ printk(KERN_DEBUG "%p ",
+ shmem_desc->coarse_pg_table[
+ coarse_page_index]->
+ descriptors);
+ printk(KERN_DEBUG "\n");
+ }
+ }
+ }
+ printk(KERN_DEBUG "tf_cleanup_shared_memory() - done\n\n");
+#endif
+
+ /* Parse the coarse page descriptors */
+ for (coarse_page_index = 0;
+ coarse_page_index < shmem_desc->coarse_pg_table_count;
+ coarse_page_index++) {
+ u32 j;
+ u32 found = 0;
+
+ /* parse the page descriptors of the coarse page */
+ for (j = 0; j < TF_DESCRIPTOR_TABLE_CAPACITY; j++) {
+ u32 l2_page_descriptor = (u32) (shmem_desc->
+ coarse_pg_table[coarse_page_index]->
+ descriptors[j]);
+
+ if (l2_page_descriptor != L2_DESCRIPTOR_FAULT) {
+ struct page *page =
+ tf_l2_page_descriptor_to_page(
+ l2_page_descriptor);
+
+ if (!PageReserved(page))
+ SetPageDirty(page);
+ internal_page_cache_release(page);
+
+ found = 1;
+ } else if (found == 1) {
+ break;
+ }
+ }
+
+ /*
+ * Only free the coarse pages of descriptors not preallocated
+ */
+ if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
+ (full_cleanup != 0))
+ tf_free_coarse_page_table(alloc_context,
+ shmem_desc->coarse_pg_table[coarse_page_index],
+ 0);
+ }
+
+ shmem_desc->coarse_pg_table_count = 0;
+ dprintk(KERN_INFO "tf_cleanup_shared_memory(%p) done\n",
+ shmem_desc);
+}
+
+/*
+ * Make sure the coarse pages are allocated. If not allocated, do it.
+ * Locks down the physical memory pages.
+ * Verifies the memory attributes depending on flags.
+ */
+int tf_fill_descriptor_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_shmem_desc *shmem_desc,
+ u32 buffer,
+ struct vm_area_struct **vmas,
+ u32 descriptors[TF_MAX_COARSE_PAGES],
+ u32 buffer_size,
+ u32 *buffer_start_offset,
+ bool in_user_space,
+ u32 flags,
+ u32 *descriptor_count)
+{
+ u32 coarse_page_index;
+ u32 coarse_page_count;
+ u32 page_count;
+ u32 page_shift = 0;
+ int ret = 0;
+ unsigned int info = read_cpuid(CPUID_CACHETYPE);
+
+ dprintk(KERN_INFO "tf_fill_descriptor_table"
+ "(%p, buffer=0x%08X, size=0x%08X, user=%01x "
+ "flags = 0x%08x)\n",
+ shmem_desc,
+ buffer,
+ buffer_size,
+ in_user_space,
+ flags);
+
+ /*
+ * Compute the number of pages
+ * Compute the number of coarse pages
+ * Compute the page offset
+ */
+ page_count = ((buffer & ~PAGE_MASK) +
+ buffer_size + ~PAGE_MASK) >> PAGE_SHIFT;
+
+ /* check whether the 16k alignment restriction applies */
+ if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11)))
+ /*
+ * The 16k alignment restriction applies.
+ * Shift data to get them 16k aligned
+ */
+ page_shift = DESCRIPTOR_V13_12_GET(buffer);
+ page_count += page_shift;
+
+
+ /*
+ * Check the number of pages fit in the coarse pages
+ */
+ if (page_count > (TF_DESCRIPTOR_TABLE_CAPACITY *
+ TF_MAX_COARSE_PAGES)) {
+ dprintk(KERN_ERR "tf_fill_descriptor_table(%p): "
+ "%u pages required to map shared memory!\n",
+ shmem_desc, page_count);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* coarse page describe 256 pages */
+ coarse_page_count = ((page_count +
+ TF_DESCRIPTOR_TABLE_CAPACITY_MASK) >>
+ TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT);
+
+ /*
+ * Compute the buffer offset
+ */
+ *buffer_start_offset = (buffer & ~PAGE_MASK) |
+ (page_shift << PAGE_SHIFT);
+
+ /* map each coarse page */
+ for (coarse_page_index = 0;
+ coarse_page_index < coarse_page_count;
+ coarse_page_index++) {
+ u32 j;
+ struct tf_coarse_page_table *coarse_pg_table;
+
+ /* compute a virtual address with appropriate offset */
+ u32 buffer_offset_vaddr = buffer +
+ (coarse_page_index * TF_MAX_COARSE_PAGE_MAPPED_SIZE);
+ u32 pages_to_get;
+
+ /*
+ * Compute the number of pages left for this coarse page.
+ * Decrement page_count each time
+ */
+ pages_to_get = (page_count >>
+ TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT) ?
+ TF_DESCRIPTOR_TABLE_CAPACITY : page_count;
+ page_count -= pages_to_get;
+
+ /*
+ * Check if the coarse page has already been allocated
+ * If not, do it now
+ */
+ if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM)
+ || (shmem_desc->type ==
+ TF_SHMEM_TYPE_PM_HIBERNATE)) {
+ coarse_pg_table = tf_alloc_coarse_page_table(
+ alloc_context,
+ TF_PAGE_DESCRIPTOR_TYPE_NORMAL);
+
+ if (coarse_pg_table == NULL) {
+ dprintk(KERN_ERR
+ "tf_fill_descriptor_table(%p): "
+ "tf_alloc_coarse_page_table "
+ "failed for coarse page %d\n",
+ shmem_desc, coarse_page_index);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ shmem_desc->coarse_pg_table[coarse_page_index] =
+ coarse_pg_table;
+ } else {
+ coarse_pg_table =
+ shmem_desc->coarse_pg_table[coarse_page_index];
+ }
+
+ /*
+ * The page is not necessarily filled with zeroes.
+ * Set the fault descriptors ( each descriptor is 4 bytes long)
+ */
+ memset(coarse_pg_table->descriptors, 0x00,
+ TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
+
+ if (in_user_space) {
+ int pages;
+
+ /*
+ * TRICK: use pCoarsePageDescriptor->descriptors to
+ * hold the (struct page*) items before getting their
+ * physical address
+ */
+ down_read(&(current->mm->mmap_sem));
+ pages = internal_get_user_pages(
+ current,
+ current->mm,
+ buffer_offset_vaddr,
+ /*
+ * page_shift is cleared after retrieving first
+ * coarse page
+ */
+ (pages_to_get - page_shift),
+ (flags & TF_SHMEM_TYPE_WRITE) ? 1 : 0,
+ 0,
+ (struct page **) (coarse_pg_table->descriptors
+ + page_shift),
+ vmas);
+ up_read(&(current->mm->mmap_sem));
+
+ if ((pages <= 0) ||
+ (pages != (pages_to_get - page_shift))) {
+ dprintk(KERN_ERR "tf_fill_descriptor_table:"
+ " get_user_pages got %d pages while "
+ "trying to get %d pages!\n",
+ pages, pages_to_get - page_shift);
+ ret = -EFAULT;
+ goto error;
+ }
+
+ for (j = page_shift;
+ j < page_shift + pages;
+ j++) {
+ /* Get the actual L2 descriptors */
+ tf_get_l2_page_descriptor(
+ &coarse_pg_table->descriptors[j],
+ flags,
+ current->mm);
+ /*
+ * Reject Strongly-Ordered or Device Memory
+ */
+#define IS_STRONGLY_ORDERED_OR_DEVICE_MEM(x) \
+ ((((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_STRONGLY_ORDERED) || \
+ (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_SHARED_DEVICE) || \
+ (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_NON_SHARED_DEVICE))
+
+ if (IS_STRONGLY_ORDERED_OR_DEVICE_MEM(
+ coarse_pg_table->
+ descriptors[j])) {
+ dprintk(KERN_ERR
+ "tf_fill_descriptor_table:"
+ " descriptor 0x%08X use "
+ "strongly-ordered or device "
+ "memory. Rejecting!\n",
+ coarse_pg_table->
+ descriptors[j]);
+ ret = -EFAULT;
+ goto error;
+ }
+ }
+ } else {
+ /* Kernel-space memory */
+ dprintk(KERN_INFO
+ "tf_fill_descriptor_table: "
+ "buffer starting at %p\n",
+ (void *)buffer_offset_vaddr);
+ for (j = page_shift; j < pages_to_get; j++) {
+ struct page *page;
+ void *addr =
+ (void *)(buffer_offset_vaddr +
+ (j - page_shift) * PAGE_SIZE);
+
+ if (is_vmalloc_addr(
+ (void *) buffer_offset_vaddr))
+ page = vmalloc_to_page(addr);
+ else
+ page = virt_to_page(addr);
+
+ if (page == NULL) {
+ dprintk(KERN_ERR
+ "tf_fill_descriptor_table: "
+ "cannot map %p (vmalloc) "
+ "to page\n",
+ addr);
+ ret = -EFAULT;
+ goto error;
+ }
+ coarse_pg_table->descriptors[j] = (u32)page;
+ get_page(page);
+
+ /* change coarse page "page address" */
+ tf_get_l2_page_descriptor(
+ &coarse_pg_table->descriptors[j],
+ flags,
+ &init_mm);
+ }
+ }
+
+ dmac_flush_range((void *)coarse_pg_table->descriptors,
+ (void *)(((u32)(coarse_pg_table->descriptors)) +
+ TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32)));
+
+ outer_clean_range(
+ __pa(coarse_pg_table->descriptors),
+ __pa(coarse_pg_table->descriptors) +
+ TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
+ wmb();
+
+ /* Update the coarse page table address */
+ descriptors[coarse_page_index] =
+ tf_get_l1_coarse_descriptor(
+ coarse_pg_table->descriptors);
+
+ /*
+ * The next coarse page has no page shift, reset the
+ * page_shift
+ */
+ page_shift = 0;
+ }
+
+ *descriptor_count = coarse_page_count;
+ shmem_desc->coarse_pg_table_count = coarse_page_count;
+
+#ifdef DEBUG_COARSE_TABLES
+ printk(KERN_DEBUG "ntf_fill_descriptor_table - size=0x%08X "
+ "numberOfCoarsePages=%d\n", buffer_size,
+ shmem_desc->coarse_pg_table_count);
+ for (coarse_page_index = 0;
+ coarse_page_index < shmem_desc->coarse_pg_table_count;
+ coarse_page_index++) {
+ u32 j;
+ struct tf_coarse_page_table *coarse_page_table =
+ shmem_desc->coarse_pg_table[coarse_page_index];
+
+ printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n",
+ coarse_page_table,
+ coarse_page_table->descriptors,
+ coarse_page_index);
+ for (j = 0;
+ j < TF_DESCRIPTOR_TABLE_CAPACITY;
+ j += 8) {
+ int k;
+ printk(KERN_DEBUG " ");
+ for (k = j; k < j + 8; k++)
+ printk(KERN_DEBUG "0x%08X ",
+ coarse_page_table->descriptors[k]);
+ printk(KERN_DEBUG "\n");
+ }
+ }
+ printk(KERN_DEBUG "ntf_fill_descriptor_table() - done\n\n");
+#endif
+
+ return 0;
+
+error:
+ tf_cleanup_shared_memory(
+ alloc_context,
+ shmem_desc,
+ 0);
+
+ return ret;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Standard communication operations
+ *----------------------------------------------------------------------------*/
+
+u8 *tf_get_description(struct tf_comm *comm)
+{
+ if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
+ return comm->l1_buffer->version_description;
+
+ return NULL;
+}
+
+/*
+ * Returns a non-zero value if the specified S-timeout has expired, zero
+ * otherwise.
+ *
+ * The placeholder referenced to by relative_timeout_jiffies gives the relative
+ * timeout from now in jiffies. It is set to zero if the S-timeout has expired,
+ * or to MAX_SCHEDULE_TIMEOUT if the S-timeout is infinite.
+ */
+static int tf_test_s_timeout(
+ u64 timeout,
+ signed long *relative_timeout_jiffies)
+{
+ struct timeval now;
+ u64 time64;
+
+ *relative_timeout_jiffies = 0;
+
+ /* immediate timeout */
+ if (timeout == TIME_IMMEDIATE)
+ return 1;
+
+ /* infinite timeout */
+ if (timeout == TIME_INFINITE) {
+ dprintk(KERN_DEBUG "tf_test_s_timeout: "
+ "timeout is infinite\n");
+ *relative_timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+ return 0;
+ }
+
+ do_gettimeofday(&now);
+ time64 = now.tv_sec;
+ /* will not overflow as operations are done on 64bit values */
+ time64 = (time64 * 1000) + (now.tv_usec / 1000);
+
+ /* timeout expired */
+ if (time64 >= timeout) {
+ dprintk(KERN_DEBUG "tf_test_s_timeout: timeout expired\n");
+ return 1;
+ }
+
+ /*
+ * finite timeout, compute relative_timeout_jiffies
+ */
+ /* will not overflow as time64 < timeout */
+ timeout -= time64;
+
+ /* guarantee *relative_timeout_jiffies is a valid timeout */
+ if ((timeout >> 32) != 0)
+ *relative_timeout_jiffies = MAX_JIFFY_OFFSET;
+ else
+ *relative_timeout_jiffies =
+ msecs_to_jiffies((unsigned int) timeout);
+
+ dprintk(KERN_DEBUG "tf_test_s_timeout: timeout is 0x%lx\n",
+ *relative_timeout_jiffies);
+ return 0;
+}
+
+static void tf_copy_answers(struct tf_comm *comm)
+{
+ u32 first_answer;
+ u32 first_free_answer;
+ struct tf_answer_struct *answerStructureTemp;
+
+ if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
+ spin_lock(&comm->lock);
+ first_free_answer = tf_read_reg32(
+ &comm->l1_buffer->first_free_answer);
+ first_answer = tf_read_reg32(
+ &comm->l1_buffer->first_answer);
+
+ while (first_answer != first_free_answer) {
+ /* answer queue not empty */
+ union tf_answer sComAnswer;
+ struct tf_answer_header header;
+
+ /*
+ * the size of the command in words of 32bit, not in
+ * bytes
+ */
+ u32 command_size;
+ u32 i;
+ u32 *temp = (uint32_t *) &header;
+
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_answers(%p): "
+ "Read answers from L1\n",
+ current->pid, comm);
+
+ /* Read the answer header */
+ for (i = 0;
+ i < sizeof(struct tf_answer_header)/sizeof(u32);
+ i++)
+ temp[i] = comm->l1_buffer->answer_queue[
+ (first_answer + i) %
+ TF_S_ANSWER_QUEUE_CAPACITY];
+
+ /* Read the answer from the L1_Buffer*/
+ command_size = header.message_size +
+ sizeof(struct tf_answer_header)/sizeof(u32);
+ temp = (uint32_t *) &sComAnswer;
+ for (i = 0; i < command_size; i++)
+ temp[i] = comm->l1_buffer->answer_queue[
+ (first_answer + i) %
+ TF_S_ANSWER_QUEUE_CAPACITY];
+
+ answerStructureTemp = (struct tf_answer_struct *)
+ sComAnswer.header.operation_id;
+
+ tf_dump_answer(&sComAnswer);
+
+ memcpy(answerStructureTemp->answer, &sComAnswer,
+ command_size * sizeof(u32));
+ answerStructureTemp->answer_copied = true;
+
+ first_answer += command_size;
+ tf_write_reg32(&comm->l1_buffer->first_answer,
+ first_answer);
+ }
+ spin_unlock(&(comm->lock));
+ }
+}
+
+static void tf_copy_command(
+ struct tf_comm *comm,
+ union tf_command *command,
+ struct tf_connection *connection,
+ enum TF_COMMAND_STATE *command_status)
+{
+ if ((test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
+ && (command != NULL)) {
+ /*
+ * Write the message in the message queue.
+ */
+
+ if (*command_status == TF_COMMAND_STATE_PENDING) {
+ u32 command_size;
+ u32 queue_words_count;
+ u32 i;
+ u32 first_free_command;
+ u32 first_command;
+
+ spin_lock(&comm->lock);
+
+ first_command = tf_read_reg32(
+ &comm->l1_buffer->first_command);
+ first_free_command = tf_read_reg32(
+ &comm->l1_buffer->first_free_command);
+
+ queue_words_count = first_free_command - first_command;
+ command_size = command->header.message_size +
+ sizeof(struct tf_command_header)/sizeof(u32);
+ if ((queue_words_count + command_size) <
+ TF_N_MESSAGE_QUEUE_CAPACITY) {
+ /*
+ * Command queue is not full.
+ * If the Command queue is full,
+ * the command will be copied at
+ * another iteration
+ * of the current function.
+ */
+
+ /*
+ * Change the conn state
+ */
+ if (connection == NULL)
+ goto copy;
+
+ spin_lock(&(connection->state_lock));
+
+ if ((connection->state ==
+ TF_CONN_STATE_NO_DEVICE_CONTEXT)
+ &&
+ (command->header.message_type ==
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
+
+ dprintk(KERN_INFO
+ "tf_copy_command(%p):"
+ "Conn state is DEVICE_CONTEXT_SENT\n",
+ connection);
+ connection->state =
+ TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT;
+ } else if ((connection->state !=
+ TF_CONN_STATE_VALID_DEVICE_CONTEXT)
+ &&
+ (command->header.message_type !=
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
+ /* The connection
+ * is no longer valid.
+ * We may not send any command on it,
+ * not even another
+ * DESTROY_DEVICE_CONTEXT.
+ */
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_command(%p): "
+ "Connection no longer valid."
+ "ABORT\n",
+ current->pid, connection);
+ *command_status =
+ TF_COMMAND_STATE_ABORTED;
+ spin_unlock(
+ &(connection->state_lock));
+ spin_unlock(
+ &comm->lock);
+ return;
+ } else if (
+ (command->header.message_type ==
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) &&
+ (connection->state ==
+ TF_CONN_STATE_VALID_DEVICE_CONTEXT)
+ ) {
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_command(%p): "
+ "Conn state is "
+ "DESTROY_DEVICE_CONTEXT_SENT\n",
+ current->pid, connection);
+ connection->state =
+ TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT;
+ }
+ spin_unlock(&(connection->state_lock));
+copy:
+ /*
+ * Copy the command to L1 Buffer
+ */
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_command(%p): "
+ "Write Message in the queue\n",
+ current->pid, command);
+ tf_dump_command(command);
+
+ for (i = 0; i < command_size; i++)
+ comm->l1_buffer->command_queue[
+ (first_free_command + i) %
+ TF_N_MESSAGE_QUEUE_CAPACITY] =
+ ((uint32_t *) command)[i];
+
+ *command_status =
+ TF_COMMAND_STATE_SENT;
+ first_free_command += command_size;
+
+ tf_write_reg32(
+ &comm->
+ l1_buffer->first_free_command,
+ first_free_command);
+ }
+ spin_unlock(&comm->lock);
+ }
+ }
+}
+
+/*
+ * Sends the specified message through the specified communication channel.
+ *
+ * This function sends the command and waits for the answer
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static int tf_send_recv(struct tf_comm *comm,
+ union tf_command *command,
+ struct tf_answer_struct *answerStruct,
+ struct tf_connection *connection,
+ int bKillable
+ )
+{
+ int result;
+ u64 timeout;
+ signed long nRelativeTimeoutJiffies;
+ bool wait_prepared = false;
+ enum TF_COMMAND_STATE command_status = TF_COMMAND_STATE_PENDING;
+ DEFINE_WAIT(wait);
+#ifdef CONFIG_FREEZER
+ unsigned long saved_flags;
+#endif
+ dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p)\n",
+ current->pid, command);
+
+#ifdef CONFIG_FREEZER
+ saved_flags = current->flags;
+ current->flags |= PF_KTHREAD;
+#endif
+
+ /*
+ * Read all answers from the answer queue
+ */
+copy_answers:
+ tf_copy_answers(comm);
+
+ tf_copy_command(comm, command, connection, &command_status);
+
+ /*
+ * Notify all waiting threads
+ */
+ wake_up(&(comm->wait_queue));
+
+#ifdef CONFIG_FREEZER
+ if (unlikely(freezing(current))) {
+
+ dprintk(KERN_INFO
+ "Entering refrigerator.\n");
+ try_to_freeze();
+ dprintk(KERN_INFO
+ "Left refrigerator.\n");
+ goto copy_answers;
+ }
+#endif
+
+#ifndef CONFIG_PREEMPT
+ if (need_resched())
+ schedule();
+#endif
+
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * Handle RPC (if any)
+ */
+ if (tf_rpc_execute(comm) == RPC_NON_YIELD)
+ goto schedule_secure_world;
+#endif
+
+ /*
+ * Join wait queue
+ */
+ /*dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p): Prepare to wait\n",
+ current->pid, command);*/
+ prepare_to_wait(&comm->wait_queue, &wait,
+ bKillable ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ wait_prepared = true;
+
+ /*
+ * Check if our answer is available
+ */
+ if (command_status == TF_COMMAND_STATE_ABORTED) {
+ /* Not waiting for an answer, return error code */
+ result = -EINTR;
+ dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+ "Command status is ABORTED."
+ "Exit with 0x%x\n",
+ current->pid, result);
+ goto exit;
+ }
+ if (answerStruct->answer_copied) {
+ dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
+ "Received answer (type 0x%02X)\n",
+ current->pid,
+ answerStruct->answer->header.message_type);
+ result = 0;
+ goto exit;
+ }
+
+ /*
+ * Check if a signal is pending
+ */
+ if (bKillable && (sigkill_pending())) {
+ if (command_status == TF_COMMAND_STATE_PENDING)
+ /*Command was not sent. */
+ result = -EINTR;
+ else
+ /* Command was sent but no answer was received yet. */
+ result = -EIO;
+
+ dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+ "Signal Pending. Return error %d\n",
+ current->pid, result);
+ goto exit;
+ }
+
+ /*
+ * Check if secure world is schedulable. It is schedulable if at
+ * least one of the following conditions holds:
+ * + it is still initializing (TF_COMM_FLAG_L1_SHARED_ALLOCATED
+ * is not set);
+ * + there is a command in the queue;
+ * + the secure world timeout is zero.
+ */
+ if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
+ u32 first_free_command;
+ u32 first_command;
+ spin_lock(&comm->lock);
+ first_command = tf_read_reg32(
+ &comm->l1_buffer->first_command);
+ first_free_command = tf_read_reg32(
+ &comm->l1_buffer->first_free_command);
+ spin_unlock(&comm->lock);
+ tf_read_timeout(comm, &timeout);
+ if ((first_free_command == first_command) &&
+ (tf_test_s_timeout(timeout,
+ &nRelativeTimeoutJiffies) == 0))
+ /*
+ * If command queue is empty and if timeout has not
+ * expired secure world is not schedulable
+ */
+ goto wait;
+ }
+
+ finish_wait(&comm->wait_queue, &wait);
+ wait_prepared = false;
+
+ /*
+ * Yield to the Secure World
+ */
+#ifdef CONFIG_TF_ZEBRA
+schedule_secure_world:
+#endif
+
+ result = tf_schedule_secure_world(comm);
+ if (result < 0)
+ goto exit;
+ goto copy_answers;
+
+wait:
+ if (bKillable && (sigkill_pending())) {
+ if (command_status == TF_COMMAND_STATE_PENDING)
+ result = -EINTR; /* Command was not sent. */
+ else
+ /* Command was sent but no answer was received yet. */
+ result = -EIO;
+
+ dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+ "Signal Pending while waiting. Return error %d\n",
+ current->pid, result);
+ goto exit;
+ }
+
+ if (nRelativeTimeoutJiffies == MAX_SCHEDULE_TIMEOUT)
+ dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
+ "prepare to sleep infinitely\n", current->pid);
+ else
+ dprintk(KERN_INFO "tf_send_recv: "
+ "prepare to sleep 0x%lx jiffies\n",
+ nRelativeTimeoutJiffies);
+
+ /* go to sleep */
+ if (schedule_timeout(nRelativeTimeoutJiffies) == 0)
+ dprintk(KERN_INFO
+ "tf_send_recv: timeout expired\n");
+ else
+ dprintk(KERN_INFO
+ "tf_send_recv: signal delivered\n");
+
+ finish_wait(&comm->wait_queue, &wait);
+ wait_prepared = false;
+ goto copy_answers;
+
+exit:
+ if (wait_prepared) {
+ finish_wait(&comm->wait_queue, &wait);
+ wait_prepared = false;
+ }
+
+#ifdef CONFIG_FREEZER
+ current->flags &= ~(PF_KTHREAD);
+ current->flags |= (saved_flags & PF_KTHREAD);
+#endif
+
+ return result;
+}
+
+/*
+ * Sends the specified message through the specified communication channel.
+ *
+ * This function sends the message and waits for the corresponding answer
+ * It may return if a signal needs to be delivered.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+int tf_send_receive(struct tf_comm *comm,
+ union tf_command *command,
+ union tf_answer *answer,
+ struct tf_connection *connection,
+ bool bKillable)
+{
+ int error;
+ struct tf_answer_struct answerStructure;
+#ifdef CONFIG_SMP
+ long ret_affinity;
+ cpumask_t saved_cpu_mask;
+ cpumask_t local_cpu_mask = CPU_MASK_NONE;
+#endif
+
+ answerStructure.answer = answer;
+ answerStructure.answer_copied = false;
+
+ if (command != NULL)
+ command->header.operation_id = (u32) &answerStructure;
+
+ dprintk(KERN_INFO "tf_send_receive\n");
+
+#ifdef CONFIG_TF_ZEBRA
+ if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
+ dprintk(KERN_ERR "tf_send_receive(%p): "
+ "Secure world not started\n", comm);
+
+ return -EFAULT;
+ }
+#endif
+
+ if (test_bit(TF_COMM_FLAG_TERMINATING, &(comm->flags)) != 0) {
+ dprintk(KERN_DEBUG
+ "tf_send_receive: Flag Terminating is set\n");
+ return 0;
+ }
+
+#ifdef CONFIG_SMP
+ cpu_set(0, local_cpu_mask);
+ cpumask_copy(&saved_cpu_mask, tsk_cpus_allowed(current));
+ ret_affinity = sched_setaffinity(0, &local_cpu_mask);
+ if (ret_affinity != 0)
+ dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity);
+#endif
+
+
+ /*
+ * Send the command
+ */
+ error = tf_send_recv(comm,
+ command, &answerStructure, connection, bKillable);
+
+ if (!bKillable && sigkill_pending()) {
+ if ((command->header.message_type ==
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) &&
+ (answer->create_device_context.error_code ==
+ S_SUCCESS)) {
+
+ /*
+ * CREATE_DEVICE_CONTEXT was interrupted.
+ */
+ dprintk(KERN_INFO "tf_send_receive: "
+ "sending DESTROY_DEVICE_CONTEXT\n");
+ answerStructure.answer = answer;
+ answerStructure.answer_copied = false;
+
+ command->header.message_type =
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+ command->header.message_size =
+ (sizeof(struct
+ tf_command_destroy_device_context) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+ command->header.operation_id =
+ (u32) &answerStructure;
+ command->destroy_device_context.device_context =
+ answer->create_device_context.
+ device_context;
+
+ goto destroy_context;
+ }
+ }
+
+ if (error == 0) {
+ /*
+ * tf_send_recv returned Success.
+ */
+ if (command->header.message_type ==
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) {
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+ } else if (command->header.message_type ==
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+ }
+ } else if (error == -EINTR) {
+ /*
+ * No command was sent, return failure.
+ */
+ dprintk(KERN_ERR
+ "tf_send_receive: "
+ "tf_send_recv failed (error %d) !\n",
+ error);
+ } else if (error == -EIO) {
+ /*
+ * A command was sent but its answer is still pending.
+ */
+
+ /* means bKillable is true */
+ dprintk(KERN_ERR
+ "tf_send_receive: "
+ "tf_send_recv interrupted (error %d)."
+ "Send DESTROY_DEVICE_CONTEXT.\n", error);
+
+ /* Send the DESTROY_DEVICE_CONTEXT. */
+ answerStructure.answer = answer;
+ answerStructure.answer_copied = false;
+
+ command->header.message_type =
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+ command->header.message_size =
+ (sizeof(struct tf_command_destroy_device_context) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+ command->header.operation_id =
+ (u32) &answerStructure;
+ command->destroy_device_context.device_context =
+ connection->device_context;
+
+ error = tf_send_recv(comm,
+ command, &answerStructure, connection, false);
+ if (error == -EINTR) {
+ /*
+ * Another thread already sent
+ * DESTROY_DEVICE_CONTEXT.
+ * We must still wait for the answer
+ * to the original command.
+ */
+ command = NULL;
+ goto destroy_context;
+ } else {
+ /* An answer was received.
+ * Check if it is the answer
+ * to the DESTROY_DEVICE_CONTEXT.
+ */
+ spin_lock(&comm->lock);
+ if (answer->header.message_type !=
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
+ answerStructure.answer_copied = false;
+ }
+ spin_unlock(&comm->lock);
+ if (!answerStructure.answer_copied) {
+ /* Answer to DESTROY_DEVICE_CONTEXT
+ * was not yet received.
+ * Wait for the answer.
+ */
+ dprintk(KERN_INFO
+ "[pid=%d] tf_send_receive:"
+ "Answer to DESTROY_DEVICE_CONTEXT"
+ "not yet received.Retry\n",
+ current->pid);
+ command = NULL;
+ goto destroy_context;
+ }
+ }
+ }
+
+ dprintk(KERN_INFO "tf_send_receive(): Message answer ready\n");
+ goto exit;
+
+destroy_context:
+ error = tf_send_recv(comm,
+ command, &answerStructure, connection, false);
+
+ /*
+ * tf_send_recv cannot return an error because
+ * it's not killable and not within a connection
+ */
+ BUG_ON(error != 0);
+
+ /* Reset the state, so a new CREATE DEVICE CONTEXT can be sent */
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+
+exit:
+
+#ifdef CONFIG_SMP
+ ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
+ if (ret_affinity != 0)
+ dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity);
+#endif
+ return error;
+}
+
+/*----------------------------------------------------------------------------
+ * Power management
+ *----------------------------------------------------------------------------*/
+
+
+/*
+ * Handles all the power management calls.
+ * The operation is the type of power management
+ * operation to be performed.
+ *
+ * This routine will only return if a failure occured or if
+ * the required opwer management is of type "resume".
+ * "Hibernate" and "Shutdown" should lock when doing the
+ * corresponding SMC to the Secure World
+ */
+int tf_power_management(struct tf_comm *comm,
+ enum TF_POWER_OPERATION operation)
+{
+ u32 status;
+ int error = 0;
+
+ dprintk(KERN_INFO "tf_power_management(%d)\n", operation);
+
+#ifdef CONFIG_TF_ZEBRA
+ if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
+ dprintk(KERN_INFO "tf_power_management(%p): "
+ "succeeded (not started)\n", comm);
+
+ return 0;
+ }
+#endif
+
+ status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
+ & TF_STATUS_POWER_STATE_MASK)
+ >> TF_STATUS_POWER_STATE_SHIFT);
+
+ switch (operation) {
+ case TF_POWER_OPERATION_SHUTDOWN:
+ switch (status) {
+ case TF_POWER_MODE_ACTIVE:
+ error = tf_pm_shutdown(comm);
+
+ if (error) {
+ dprintk(KERN_ERR "tf_power_management(): "
+ "Failed with error code 0x%08x\n",
+ error);
+ goto error;
+ }
+ break;
+
+ default:
+ goto not_allowed;
+ }
+ break;
+
+ case TF_POWER_OPERATION_HIBERNATE:
+ switch (status) {
+ case TF_POWER_MODE_ACTIVE:
+ error = tf_pm_hibernate(comm);
+
+ if (error) {
+ dprintk(KERN_ERR "tf_power_management(): "
+ "Failed with error code 0x%08x\n",
+ error);
+ goto error;
+ }
+ break;
+
+ default:
+ goto not_allowed;
+ }
+ break;
+
+ case TF_POWER_OPERATION_RESUME:
+ error = tf_pm_resume(comm);
+
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_power_management(): "
+ "Failed with error code 0x%08x\n",
+ error);
+ goto error;
+ }
+ break;
+ }
+
+ dprintk(KERN_INFO "tf_power_management(): succeeded\n");
+ return 0;
+
+not_allowed:
+ dprintk(KERN_ERR "tf_power_management(): "
+ "Power command not allowed in current "
+ "Secure World state %d\n", status);
+ error = -ENOTTY;
+error:
+ return error;
+}
+
diff --git a/security/tf_driver/tf_comm.h b/security/tf_driver/tf_comm.h
new file mode 100644
index 000000000000..8921dc1d1be0
--- /dev/null
+++ b/security/tf_driver/tf_comm.h
@@ -0,0 +1,202 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_COMM_H__
+#define __TF_COMM_H__
+
+#include "tf_defs.h"
+#include "tf_protocol.h"
+
+/*----------------------------------------------------------------------------
+ * Misc
+ *----------------------------------------------------------------------------*/
+
+void tf_set_current_time(struct tf_comm *comm);
+
+/*
+ * Atomic accesses to 32-bit variables in the L1 Shared buffer
+ */
+static inline u32 tf_read_reg32(const u32 *comm_buffer)
+{
+ u32 result;
+
+ __asm__ __volatile__("@ tf_read_reg32\n"
+ "ldrex %0, [%1]\n"
+ : "=&r" (result)
+ : "r" (comm_buffer)
+ );
+
+ return result;
+}
+
+static inline void tf_write_reg32(void *comm_buffer, u32 value)
+{
+ u32 tmp;
+
+ __asm__ __volatile__("@ tf_write_reg32\n"
+ "1: ldrex %0, [%2]\n"
+ " strex %0, %1, [%2]\n"
+ " teq %0, #0\n"
+ " bne 1b"
+ : "=&r" (tmp)
+ : "r" (value), "r" (comm_buffer)
+ : "cc"
+ );
+}
+
+/*
+ * Atomic accesses to 64-bit variables in the L1 Shared buffer
+ */
+static inline u64 tf_read_reg64(void *comm_buffer)
+{
+ u64 result;
+
+ __asm__ __volatile__("@ tf_read_reg64\n"
+ "ldrexd %0, [%1]\n"
+ : "=&r" (result)
+ : "r" (comm_buffer)
+ );
+
+ return result;
+}
+
+static inline void tf_write_reg64(void *comm_buffer, u64 value)
+{
+ u64 tmp;
+
+ __asm__ __volatile__("@ tf_write_reg64\n"
+ "1: ldrexd %0, [%2]\n"
+ " strexd %0, %1, [%2]\n"
+ " teq %0, #0\n"
+ " bne 1b"
+ : "=&r" (tmp)
+ : "r" (value), "r" (comm_buffer)
+ : "cc"
+ );
+}
+
+/*----------------------------------------------------------------------------
+ * SMC operations
+ *----------------------------------------------------------------------------*/
+
+/* RPC return values */
+#define RPC_NO 0x00 /* No RPC to execute */
+#define RPC_YIELD 0x01 /* Yield RPC */
+#define RPC_NON_YIELD 0x02 /* non-Yield RPC */
+
+int tf_rpc_execute(struct tf_comm *comm);
+
+/*----------------------------------------------------------------------------
+ * Shared memory related operations
+ *----------------------------------------------------------------------------*/
+
+#define L1_DESCRIPTOR_FAULT (0x00000000)
+#define L2_DESCRIPTOR_FAULT (0x00000000)
+
+#define L2_DESCRIPTOR_ADDR_MASK (0xFFFFF000)
+
+#define DESCRIPTOR_V13_12_MASK (0x3 << PAGE_SHIFT)
+#define DESCRIPTOR_V13_12_GET(a) ((a & DESCRIPTOR_V13_12_MASK) >> PAGE_SHIFT)
+
+struct tf_coarse_page_table *tf_alloc_coarse_page_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ u32 type);
+
+void tf_free_coarse_page_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_coarse_page_table *coarse_pg_table,
+ int force);
+
+void tf_init_coarse_page_table_allocator(
+ struct tf_coarse_page_table_allocation_context *alloc_context);
+
+void tf_release_coarse_page_table_allocator(
+ struct tf_coarse_page_table_allocation_context *alloc_context);
+
+struct page *tf_l2_page_descriptor_to_page(u32 l2_page_descriptor);
+
+u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm);
+
+void tf_cleanup_shared_memory(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_shmem_desc *shmem_desc,
+ u32 full_cleanup);
+
+int tf_fill_descriptor_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_shmem_desc *shmem_desc,
+ u32 buffer,
+ struct vm_area_struct **vmas,
+ u32 descriptors[TF_MAX_COARSE_PAGES],
+ u32 buffer_size,
+ u32 *buffer_start_offset,
+ bool in_user_space,
+ u32 flags,
+ u32 *descriptor_count);
+
+/*----------------------------------------------------------------------------
+ * Standard communication operations
+ *----------------------------------------------------------------------------*/
+
+int tf_schedule_secure_world(struct tf_comm *comm);
+
+int tf_send_receive(
+ struct tf_comm *comm,
+ union tf_command *command,
+ union tf_answer *answer,
+ struct tf_connection *connection,
+ bool bKillable);
+
+
+/**
+ * get a pointer to the secure world description.
+ * This points directly into the L1 shared buffer
+ * and is valid only once the communication has
+ * been initialized
+ **/
+u8 *tf_get_description(struct tf_comm *comm);
+
+/*----------------------------------------------------------------------------
+ * Power management
+ *----------------------------------------------------------------------------*/
+
+enum TF_POWER_OPERATION {
+ TF_POWER_OPERATION_HIBERNATE = 1,
+ TF_POWER_OPERATION_SHUTDOWN = 2,
+ TF_POWER_OPERATION_RESUME = 3,
+};
+
+int tf_pm_hibernate(struct tf_comm *comm);
+int tf_pm_resume(struct tf_comm *comm);
+int tf_pm_shutdown(struct tf_comm *comm);
+
+int tf_power_management(struct tf_comm *comm,
+ enum TF_POWER_OPERATION operation);
+
+
+/*----------------------------------------------------------------------------
+ * Communication initialization and termination
+ *----------------------------------------------------------------------------*/
+
+int tf_init(struct tf_comm *comm);
+
+void tf_terminate(struct tf_comm *comm);
+
+
+#endif /* __TF_COMM_H__ */
diff --git a/security/tf_driver/tf_comm_tz.c b/security/tf_driver/tf_comm_tz.c
new file mode 100644
index 000000000000..0f36209add7a
--- /dev/null
+++ b/security/tf_driver/tf_comm_tz.c
@@ -0,0 +1,911 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * Copyright (C) 2011-2012 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/div64.h>
+#include <asm/system.h>
+#include <linux/version.h>
+#include <asm/cputype.h>
+#include <linux/interrupt.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/jiffies.h>
+
+#include <trace/events/nvsecurity.h>
+
+#include "tf_defs.h"
+#include "tf_comm.h"
+#include "tf_protocol.h"
+#include "tf_util.h"
+#include "tf_conn.h"
+
+/*
+ * Structure common to all SMC operations
+ */
+struct tf_generic_smc {
+ u32 reg0;
+ u32 reg1;
+ u32 reg2;
+ u32 reg3;
+ u32 reg4;
+};
+
+/*----------------------------------------------------------------------------
+ * SMC operations
+ *----------------------------------------------------------------------------*/
+
+static inline void tf_smc_generic_call(
+ struct tf_generic_smc *generic_smc)
+{
+#ifdef CONFIG_SMP
+ long ret;
+ cpumask_t saved_cpu_mask;
+ cpumask_t local_cpu_mask = CPU_MASK_NONE;
+
+ cpu_set(0, local_cpu_mask);
+ cpumask_copy(&saved_cpu_mask, tsk_cpus_allowed(current));
+ ret = sched_setaffinity(0, &local_cpu_mask);
+ if (ret != 0)
+ dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret);
+#endif
+
+ trace_smc_generic_call(NVSEC_SMC_START);
+
+ __asm__ volatile(
+ "mov r0, %2\n"
+ "mov r1, %3\n"
+ "mov r2, %4\n"
+ "mov r3, %5\n"
+ "mov r4, %6\n"
+ ".word 0xe1600070 @ SMC 0\n"
+ "mov %0, r0\n"
+ "mov %1, r1\n"
+ : "=r" (generic_smc->reg0), "=r" (generic_smc->reg1)
+ : "r" (generic_smc->reg0), "r" (generic_smc->reg1),
+ "r" (generic_smc->reg2), "r" (generic_smc->reg3),
+ "r" (generic_smc->reg4)
+ : "r0", "r1", "r2", "r3", "r4");
+
+ trace_smc_generic_call(NVSEC_SMC_DONE);
+
+#ifdef CONFIG_SMP
+ ret = sched_setaffinity(0, &saved_cpu_mask);
+ if (ret != 0)
+ dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret);
+#endif
+}
+
+/*
+ * Calls the get protocol version SMC.
+ * Fills the parameter pProtocolVersion with the version number returned by the
+ * SMC
+ */
+static inline void tf_smc_get_protocol_version(u32 *protocol_version)
+{
+ struct tf_generic_smc generic_smc;
+
+ generic_smc.reg0 = TF_SMC_GET_PROTOCOL_VERSION;
+ generic_smc.reg1 = 0;
+ generic_smc.reg2 = 0;
+ generic_smc.reg3 = 0;
+ generic_smc.reg4 = 0;
+
+ tf_smc_generic_call(&generic_smc);
+ *protocol_version = generic_smc.reg1;
+}
+
+
+/*
+ * Calls the init SMC with the specified parameters.
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static inline int tf_smc_init(u32 shared_page_descriptor)
+{
+ struct tf_generic_smc generic_smc;
+
+ generic_smc.reg0 = TF_SMC_INIT;
+ /* Descriptor for the layer 1 shared buffer */
+ generic_smc.reg1 = shared_page_descriptor;
+ generic_smc.reg2 = 0;
+ generic_smc.reg3 = 0;
+ generic_smc.reg4 = 0;
+
+ tf_smc_generic_call(&generic_smc);
+ if (generic_smc.reg0 != S_SUCCESS)
+ printk(KERN_ERR "tf_smc_init:"
+ " r0=0x%08X upon return (expected 0x%08X)!\n",
+ generic_smc.reg0,
+ S_SUCCESS);
+
+ return generic_smc.reg0;
+}
+
+
+/*
+ * Calls the reset irq SMC.
+ */
+static inline void tf_smc_reset_irq(void)
+{
+ struct tf_generic_smc generic_smc;
+
+ generic_smc.reg0 = TF_SMC_RESET_IRQ;
+ generic_smc.reg1 = 0;
+ generic_smc.reg2 = 0;
+ generic_smc.reg3 = 0;
+ generic_smc.reg4 = 0;
+
+ tf_smc_generic_call(&generic_smc);
+}
+
+
+/*
+ * Calls the WAKE_UP SMC.
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static inline int tf_smc_wake_up(u32 l1_shared_buffer_descriptor,
+ u32 shared_mem_start_offset,
+ u32 shared_mem_size)
+{
+ struct tf_generic_smc generic_smc;
+
+ generic_smc.reg0 = TF_SMC_WAKE_UP;
+ generic_smc.reg1 = shared_mem_start_offset;
+ /* long form command */
+ generic_smc.reg2 = shared_mem_size | 0x80000000;
+ generic_smc.reg3 = l1_shared_buffer_descriptor;
+ generic_smc.reg4 = 0;
+
+ tf_smc_generic_call(&generic_smc);
+
+ if (generic_smc.reg0 != S_SUCCESS)
+ printk(KERN_ERR "tf_smc_wake_up:"
+ " r0=0x%08X upon return (expected 0x%08X)!\n",
+ generic_smc.reg0,
+ S_SUCCESS);
+
+ return generic_smc.reg0;
+}
+
+/*
+ * Calls the N-Yield SMC.
+ */
+static inline void tf_smc_nyield(void)
+{
+ struct tf_generic_smc generic_smc;
+
+ generic_smc.reg0 = TF_SMC_N_YIELD;
+ generic_smc.reg1 = 0;
+ generic_smc.reg2 = 0;
+ generic_smc.reg3 = 0;
+ generic_smc.reg4 = 0;
+
+ tf_smc_generic_call(&generic_smc);
+}
+
+#ifdef CONFIG_SECURE_TRACES
+static void tf_print_secure_traces(struct tf_comm *comm)
+{
+ spin_lock(&(comm->lock));
+ if (comm->l1_buffer->traces_status != 0) {
+ if (comm->l1_buffer->traces_status > 1)
+ pr_info("TF : traces lost...\n");
+ pr_info("TF : %s", comm->l1_buffer->traces_buffer);
+ comm->l1_buffer->traces_status = 0;
+ }
+ spin_unlock(&(comm->lock));
+}
+#endif
+
+/* Yields the Secure World */
+int tf_schedule_secure_world(struct tf_comm *comm)
+{
+ tf_set_current_time(comm);
+
+ /* yield to the Secure World */
+ tf_smc_nyield();
+
+#ifdef CONFIG_SECURE_TRACES
+ tf_print_secure_traces(comm);
+#endif
+
+ return 0;
+}
+
+/*
+ * Returns the L2 descriptor for the specified user page.
+ */
+
+#define L2_INIT_DESCRIPTOR_BASE (0x00000003)
+#define L2_INIT_DESCRIPTOR_V13_12_SHIFT (4)
+
+static u32 tf_get_l2init_descriptor(u32 vaddr)
+{
+ struct page *page;
+ u32 paddr;
+ u32 descriptor;
+
+ descriptor = L2_INIT_DESCRIPTOR_BASE;
+
+ /* get physical address and add to descriptor */
+ page = virt_to_page(vaddr);
+ paddr = page_to_phys(page);
+ descriptor |= (paddr & L2_DESCRIPTOR_ADDR_MASK);
+
+ /* Add virtual address v[13:12] bits to descriptor */
+ descriptor |= (DESCRIPTOR_V13_12_GET(vaddr)
+ << L2_INIT_DESCRIPTOR_V13_12_SHIFT);
+
+ descriptor |= tf_get_l2_descriptor_common(vaddr, &init_mm);
+
+
+ return descriptor;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Power management
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Free the memory used by the W3B buffer for the specified comm.
+ * This function does nothing if no W3B buffer is allocated for the device.
+ */
+static inline void tf_free_w3b(struct tf_comm *comm)
+{
+ tf_cleanup_shared_memory(
+ &(comm->w3b_cpt_alloc_context),
+ &(comm->w3b_shmem_desc),
+ 0);
+
+ tf_release_coarse_page_table_allocator(&(comm->w3b_cpt_alloc_context));
+
+ internal_vfree((void *)comm->w3b);
+ comm->w3b = 0;
+ comm->w3b_shmem_size = 0;
+ clear_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags));
+}
+
+
+/*
+ * Allocates the W3B buffer for the specified comm.
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static inline int tf_allocate_w3b(struct tf_comm *comm)
+{
+ int error;
+ u32 flags;
+ u32 config_flag_s;
+ u32 *w3b_descriptors;
+ u32 w3b_descriptor_count;
+ u32 w3b_current_size;
+
+ config_flag_s = tf_read_reg32(&comm->l1_buffer->config_flag_s);
+
+retry:
+ if ((test_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags))) == 0) {
+ /*
+ * Initialize the shared memory for the W3B
+ */
+ tf_init_coarse_page_table_allocator(
+ &comm->w3b_cpt_alloc_context);
+ } else {
+ /*
+ * The W3B is allocated but do we have to reallocate a bigger
+ * one?
+ */
+ /* Check H bit */
+ if ((config_flag_s & (1<<4)) != 0) {
+ /* The size of the W3B may change after SMC_INIT */
+ /* Read the current value */
+ w3b_current_size = tf_read_reg32(
+ &comm->l1_buffer->w3b_size_current_s);
+ if (comm->w3b_shmem_size > w3b_current_size)
+ return 0;
+
+ tf_free_w3b(comm);
+ goto retry;
+ } else {
+ return 0;
+ }
+ }
+
+ /* check H bit */
+ if ((config_flag_s & (1<<4)) != 0)
+ /* The size of the W3B may change after SMC_INIT */
+ /* Read the current value */
+ comm->w3b_shmem_size = tf_read_reg32(
+ &comm->l1_buffer->w3b_size_current_s);
+ else
+ comm->w3b_shmem_size = tf_read_reg32(
+ &comm->l1_buffer->w3b_size_max_s);
+
+ comm->w3b = (u32) internal_vmalloc(comm->w3b_shmem_size);
+ if (comm->w3b == 0) {
+ printk(KERN_ERR "tf_allocate_w3b():"
+ " Out of memory for W3B buffer (%u bytes)!\n",
+ (unsigned int)(comm->w3b_shmem_size));
+ error = -ENOMEM;
+ goto error;
+ }
+
+ /* initialize the w3b_shmem_desc structure */
+ comm->w3b_shmem_desc.type = TF_SHMEM_TYPE_PM_HIBERNATE;
+ INIT_LIST_HEAD(&(comm->w3b_shmem_desc.list));
+
+ flags = (TF_SHMEM_TYPE_READ | TF_SHMEM_TYPE_WRITE);
+
+ /* directly point to the L1 shared buffer W3B descriptors */
+ w3b_descriptors = comm->l1_buffer->w3b_descriptors;
+
+ /*
+ * tf_fill_descriptor_table uses the following parameter as an
+ * IN/OUT
+ */
+
+ error = tf_fill_descriptor_table(
+ &(comm->w3b_cpt_alloc_context),
+ &(comm->w3b_shmem_desc),
+ comm->w3b,
+ NULL,
+ w3b_descriptors,
+ comm->w3b_shmem_size,
+ &(comm->w3b_shmem_offset),
+ false,
+ flags,
+ &w3b_descriptor_count);
+ if (error != 0) {
+ printk(KERN_ERR "tf_allocate_w3b():"
+ " tf_fill_descriptor_table failed with "
+ "error code 0x%08x!\n",
+ error);
+ goto error;
+ }
+
+ set_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags));
+
+ /* successful completion */
+ return 0;
+
+error:
+ tf_free_w3b(comm);
+
+ return error;
+}
+
+/*
+ * Perform a Secure World shutdown operation.
+ * The routine does not return if the operation succeeds.
+ * the routine returns an appropriate error code if
+ * the operation fails.
+ */
+int tf_pm_shutdown(struct tf_comm *comm)
+{
+#ifdef CONFIG_TFN
+ /* this function is useless for the TEGRA product */
+ return 0;
+#else
+ int error;
+ union tf_command command;
+ union tf_answer answer;
+
+ dprintk(KERN_INFO "tf_pm_shutdown()\n");
+
+ memset(&command, 0, sizeof(command));
+
+ command.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
+ command.header.message_size =
+ (sizeof(struct tf_command_management) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+
+ command.management.command = TF_MANAGEMENT_SHUTDOWN;
+
+ error = tf_send_receive(
+ comm,
+ &command,
+ &answer,
+ NULL,
+ false);
+
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_pm_shutdown(): "
+ "tf_send_receive failed (error %d)!\n",
+ error);
+ return error;
+ }
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+ if (answer.header.error_code != 0)
+ dprintk(KERN_ERR "tf_driver: shutdown failed.\n");
+ else
+ dprintk(KERN_INFO "tf_driver: shutdown succeeded.\n");
+#endif
+
+ return answer.header.error_code;
+#endif
+}
+
+
+/*
+ * Perform a Secure World hibernate operation.
+ * The routine does not return if the operation succeeds.
+ * the routine returns an appropriate error code if
+ * the operation fails.
+ */
+int tf_pm_hibernate(struct tf_comm *comm)
+{
+#ifdef CONFIG_TFN
+ /* this function is useless for the TEGRA product */
+ return 0;
+#else
+ int error;
+ union tf_command command;
+ union tf_answer answer;
+ u32 first_command;
+ u32 first_free_command;
+
+ dprintk(KERN_INFO "tf_pm_hibernate()\n");
+
+ error = tf_allocate_w3b(comm);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_pm_hibernate(): "
+ "tf_allocate_w3b failed (error %d)!\n",
+ error);
+ return error;
+ }
+
+ /*
+ * As the polling thread is already hibernating, we
+ * should send the message and receive the answer ourself
+ */
+
+ /* build the "prepare to hibernate" message */
+ command.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
+ command.management.command = TF_MANAGEMENT_HIBERNATE;
+ /* Long Form Command */
+ command.management.shared_mem_descriptors[0] = 0;
+ command.management.shared_mem_descriptors[1] = 0;
+ command.management.w3b_size =
+ comm->w3b_shmem_size | 0x80000000;
+ command.management.w3b_start_offset =
+ comm->w3b_shmem_offset;
+ command.header.operation_id = (u32) &answer;
+
+ tf_dump_command(&command);
+
+ /* find a slot to send the message in */
+
+ /* AFY: why not use the function tf_send_receive?? We are
+ * duplicating a lot of subtle code here. And it's not going to be
+ * tested because power management is currently not supported by the
+ * secure world. */
+ for (;;) {
+ int queue_words_count, command_size;
+
+ spin_lock(&(comm->lock));
+
+ first_command = tf_read_reg32(
+ &comm->l1_buffer->first_command);
+ first_free_command = tf_read_reg32(
+ &comm->l1_buffer->first_free_command);
+
+ queue_words_count = first_free_command - first_command;
+ command_size = command.header.message_size
+ + sizeof(struct tf_command_header);
+ if ((queue_words_count + command_size) <
+ TF_N_MESSAGE_QUEUE_CAPACITY) {
+ /* Command queue is not full */
+ memcpy(&comm->l1_buffer->command_queue[
+ first_free_command %
+ TF_N_MESSAGE_QUEUE_CAPACITY],
+ &command,
+ command_size * sizeof(u32));
+
+ tf_write_reg32(&comm->l1_buffer->first_free_command,
+ first_free_command + command_size);
+
+ spin_unlock(&(comm->lock));
+ break;
+ }
+
+ spin_unlock(&(comm->lock));
+ (void)tf_schedule_secure_world(comm);
+ }
+
+ /* now wait for the answer, dispatching other answers */
+ while (1) {
+ u32 first_answer;
+ u32 first_free_answer;
+
+ /* check all the answers */
+ first_free_answer = tf_read_reg32(
+ &comm->l1_buffer->first_free_answer);
+ first_answer = tf_read_reg32(
+ &comm->l1_buffer->first_answer);
+
+ if (first_answer != first_free_answer) {
+ int bFoundAnswer = 0;
+
+ do {
+ /* answer queue not empty */
+ union tf_answer tmp_answer;
+ struct tf_answer_header header;
+ /* size of the command in words of 32bit */
+ int command_size;
+
+ /* get the message_size */
+ memcpy(&header,
+ &comm->l1_buffer->answer_queue[
+ first_answer %
+ TF_S_ANSWER_QUEUE_CAPACITY],
+ sizeof(struct tf_answer_header));
+ command_size = header.message_size +
+ sizeof(struct tf_answer_header);
+
+ /*
+ * NOTE: message_size is the number of words
+ * following the first word
+ */
+ memcpy(&tmp_answer,
+ &comm->l1_buffer->answer_queue[
+ first_answer %
+ TF_S_ANSWER_QUEUE_CAPACITY],
+ command_size * sizeof(u32));
+
+ tf_dump_answer(&tmp_answer);
+
+ if (tmp_answer.header.operation_id ==
+ (u32) &answer) {
+ /*
+ * this is the answer to the "prepare to
+ * hibernate" message
+ */
+ memcpy(&answer,
+ &tmp_answer,
+ command_size * sizeof(u32));
+
+ bFoundAnswer = 1;
+ tf_write_reg32(
+ &comm->l1_buffer->first_answer,
+ first_answer + command_size);
+ break;
+ } else {
+ /*
+ * this is a standard message answer,
+ * dispatch it
+ */
+ struct tf_answer_struct
+ *answerStructure;
+
+ answerStructure =
+ (struct tf_answer_struct *)
+ tmp_answer.header.operation_id;
+
+ memcpy(answerStructure->answer,
+ &tmp_answer,
+ command_size * sizeof(u32));
+
+ answerStructure->answer_copied = true;
+ }
+
+ tf_write_reg32(
+ &comm->l1_buffer->first_answer,
+ first_answer + command_size);
+ } while (first_answer != first_free_answer);
+
+ if (bFoundAnswer)
+ break;
+ }
+
+ /*
+ * since the Secure World is at least running the "prepare to
+ * hibernate" message, its timeout must be immediate So there is
+ * no need to check its timeout and schedule() the current
+ * thread
+ */
+ (void)tf_schedule_secure_world(comm);
+ } /* while (1) */
+
+ printk(KERN_INFO "tf_driver: hibernate.\n");
+ return 0;
+#endif
+}
+
+
+/*
+ * Perform a Secure World resume operation.
+ * The routine returns once the Secure World is active again
+ * or if an error occurs during the "resume" process
+ */
+int tf_pm_resume(struct tf_comm *comm)
+{
+#ifdef CONFIG_TFN
+ /* this function is useless for the TEGRA product */
+ return 0;
+#else
+ int error;
+ u32 status;
+
+ dprintk(KERN_INFO "tf_pm_resume()\n");
+
+ error = tf_smc_wake_up(
+ tf_get_l2init_descriptor((u32)comm->l1_buffer),
+ comm->w3b_shmem_offset,
+ comm->w3b_shmem_size);
+
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_pm_resume(): "
+ "tf_smc_wake_up failed (error %d)!\n",
+ error);
+ return error;
+ }
+
+ status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
+ & TF_STATUS_POWER_STATE_MASK)
+ >> TF_STATUS_POWER_STATE_SHIFT);
+
+ while ((status != TF_POWER_MODE_ACTIVE)
+ && (status != TF_POWER_MODE_PANIC)) {
+ tf_smc_nyield();
+
+ status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
+ & TF_STATUS_POWER_STATE_MASK)
+ >> TF_STATUS_POWER_STATE_SHIFT);
+
+ /*
+ * As this may last quite a while, call the kernel scheduler to
+ * hand over CPU for other operations
+ */
+ schedule();
+ }
+
+ switch (status) {
+ case TF_POWER_MODE_ACTIVE:
+ break;
+
+ case TF_POWER_MODE_PANIC:
+ dprintk(KERN_ERR "tf_pm_resume(): "
+ "Secure World POWER_MODE_PANIC!\n");
+ return -EINVAL;
+
+ default:
+ dprintk(KERN_ERR "tf_pm_resume(): "
+ "unexpected Secure World POWER_MODE (%d)!\n", status);
+ return -EINVAL;
+ }
+
+ dprintk(KERN_INFO "tf_pm_resume() succeeded\n");
+ return 0;
+#endif
+}
+
+/*----------------------------------------------------------------------------
+ * Communication initialization and termination
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Handles the software interrupts issued by the Secure World.
+ */
+static irqreturn_t tf_soft_int_handler(int irq, void *dev_id)
+{
+ struct tf_comm *comm = (struct tf_comm *) dev_id;
+
+ if (comm->l1_buffer == NULL)
+ return IRQ_NONE;
+
+ if ((tf_read_reg32(&comm->l1_buffer->status_s) &
+ TF_STATUS_P_MASK) == 0)
+ /* interrupt not issued by the Trusted Foundations Software */
+ return IRQ_NONE;
+
+ tf_smc_reset_irq();
+
+ /* signal N_SM_EVENT */
+ wake_up(&comm->wait_queue);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Initializes the communication with the Secure World.
+ * The L1 shared buffer is allocated and the Secure World
+ * is yielded for the first time.
+ * returns successfuly once the communication with
+ * the Secure World is up and running
+ *
+ * Returns 0 upon success or appropriate error code
+ * upon failure
+ */
+int tf_init(struct tf_comm *comm)
+{
+ int error;
+ struct page *buffer_page;
+ u32 protocol_version;
+
+ dprintk(KERN_INFO "tf_init()\n");
+
+ spin_lock_init(&(comm->lock));
+ comm->flags = 0;
+ comm->l1_buffer = NULL;
+ init_waitqueue_head(&(comm->wait_queue));
+
+ /*
+ * Check the Secure World protocol version is the expected one.
+ */
+ tf_smc_get_protocol_version(&protocol_version);
+
+ if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version))
+ != TF_S_PROTOCOL_MAJOR_VERSION) {
+ printk(KERN_ERR "tf_init():"
+ " Unsupported Secure World Major Version "
+ "(0x%02X, expected 0x%02X)!\n",
+ GET_PROTOCOL_MAJOR_VERSION(protocol_version),
+ TF_S_PROTOCOL_MAJOR_VERSION);
+ error = -EIO;
+ goto error;
+ }
+
+ /*
+ * Register the software interrupt handler if required to.
+ */
+ if (comm->soft_int_irq != -1) {
+ dprintk(KERN_INFO "tf_init(): "
+ "Registering software interrupt handler (IRQ %d)\n",
+ comm->soft_int_irq);
+
+ error = request_irq(comm->soft_int_irq,
+ tf_soft_int_handler,
+ IRQF_SHARED,
+ TF_DEVICE_BASE_NAME,
+ comm);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_init(): "
+ "request_irq failed for irq %d (error %d)\n",
+ comm->soft_int_irq, error);
+ goto error;
+ }
+ set_bit(TF_COMM_FLAG_IRQ_REQUESTED, &(comm->flags));
+ }
+
+ /*
+ * Allocate and initialize the L1 shared buffer.
+ */
+ comm->l1_buffer = (void *) internal_get_zeroed_page(GFP_KERNEL);
+ if (comm->l1_buffer == NULL) {
+ printk(KERN_ERR "tf_init():"
+ " get_zeroed_page failed for L1 shared buffer!\n");
+ error = -ENOMEM;
+ goto error;
+ }
+
+ /*
+ * Ensure the page storing the L1 shared buffer is mapped.
+ */
+ buffer_page = virt_to_page(comm->l1_buffer);
+ trylock_page(buffer_page);
+
+ dprintk(KERN_INFO "tf_init(): "
+ "L1 shared buffer allocated at virtual:%p, "
+ "physical:%p (page:%p)\n",
+ comm->l1_buffer,
+ (void *)virt_to_phys(comm->l1_buffer),
+ buffer_page);
+
+ set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags));
+
+ /*
+ * Init SMC
+ */
+ error = tf_smc_init(
+ tf_get_l2init_descriptor((u32)comm->l1_buffer));
+ if (error != S_SUCCESS) {
+ dprintk(KERN_ERR "tf_init(): "
+ "tf_smc_init failed (error 0x%08X)!\n",
+ error);
+ goto error;
+ }
+
+ /*
+ * check whether the interrupts are actually enabled
+ * If not, remove irq handler
+ */
+ if ((tf_read_reg32(&comm->l1_buffer->config_flag_s) &
+ TF_CONFIG_FLAG_S) == 0) {
+ if (test_and_clear_bit(TF_COMM_FLAG_IRQ_REQUESTED,
+ &(comm->flags)) != 0) {
+ dprintk(KERN_INFO "tf_init(): "
+ "Interrupts not used, unregistering "
+ "softint (IRQ %d)\n",
+ comm->soft_int_irq);
+
+ free_irq(comm->soft_int_irq, comm);
+ }
+ } else {
+ if (test_bit(TF_COMM_FLAG_IRQ_REQUESTED,
+ &(comm->flags)) == 0) {
+ /*
+ * Interrupts are enabled in the Secure World, but not
+ * handled by driver
+ */
+ dprintk(KERN_ERR "tf_init(): "
+ "soft_interrupt argument not provided\n");
+ error = -EINVAL;
+ goto error;
+ }
+ }
+
+ /*
+ * Successful completion.
+ */
+
+ /* yield for the first time */
+ (void)tf_schedule_secure_world(comm);
+
+ dprintk(KERN_INFO "tf_init(): Success\n");
+ return S_SUCCESS;
+
+error:
+ /*
+ * Error handling.
+ */
+ dprintk(KERN_INFO "tf_init(): Failure (error %d)\n",
+ error);
+ tf_terminate(comm);
+ return error;
+}
+
+
+/*
+ * Attempt to terminate the communication with the Secure World.
+ * The L1 shared buffer is freed.
+ * Calling this routine terminates definitaly the communication
+ * with the Secure World : there is no way to inform the Secure World of a new
+ * L1 shared buffer to be used once it has been initialized.
+ */
+void tf_terminate(struct tf_comm *comm)
+{
+ dprintk(KERN_INFO "tf_terminate()\n");
+
+ set_bit(TF_COMM_FLAG_TERMINATING, &(comm->flags));
+
+ if ((test_bit(TF_COMM_FLAG_W3B_ALLOCATED,
+ &(comm->flags))) != 0) {
+ dprintk(KERN_INFO "tf_terminate(): "
+ "Freeing the W3B buffer...\n");
+ tf_free_w3b(comm);
+ }
+
+ if ((test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED,
+ &(comm->flags))) != 0) {
+ __clear_page_locked(virt_to_page(comm->l1_buffer));
+ internal_free_page((unsigned long) comm->l1_buffer);
+ }
+
+ if ((test_bit(TF_COMM_FLAG_IRQ_REQUESTED,
+ &(comm->flags))) != 0) {
+ dprintk(KERN_INFO "tf_terminate(): "
+ "Unregistering softint (IRQ %d)\n",
+ comm->soft_int_irq);
+ free_irq(comm->soft_int_irq, comm);
+ }
+}
diff --git a/security/tf_driver/tf_conn.c b/security/tf_driver/tf_conn.c
new file mode 100644
index 000000000000..6808f5485b27
--- /dev/null
+++ b/security/tf_driver/tf_conn.c
@@ -0,0 +1,1675 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+#include "s_version.h"
+
+#include "tf_protocol.h"
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_comm.h"
+#include "tf_conn.h"
+
+#ifdef CONFIG_TF_ZEBRA
+#include "tf_zebra.h"
+#include "tf_crypto.h"
+#endif
+
+#ifdef CONFIG_ANDROID
+#define TF_PRIVILEGED_UID_GID 1000 /* Android system AID */
+#else
+#define TF_PRIVILEGED_UID_GID 0
+#endif
+
+/*----------------------------------------------------------------------------
+ * Management of the shared memory blocks.
+ *
+ * Shared memory blocks are the blocks registered through
+ * the commands REGISTER_SHARED_MEMORY and POWER_MANAGEMENT
+ *----------------------------------------------------------------------------*/
+
+/**
+ * Unmaps a shared memory
+ **/
+void tf_unmap_shmem(
+ struct tf_connection *connection,
+ struct tf_shmem_desc *shmem_desc,
+ u32 full_cleanup)
+{
+ /* check shmem_desc contains a descriptor */
+ if (shmem_desc == NULL)
+ return;
+
+ dprintk(KERN_DEBUG "tf_unmap_shmem(%p)\n", shmem_desc);
+
+retry:
+ mutex_lock(&(connection->shmem_mutex));
+ if (atomic_read(&shmem_desc->ref_count) > 1) {
+ /*
+ * Shared mem still in use, wait for other operations completion
+ * before actually unmapping it.
+ */
+ dprintk(KERN_INFO "Descriptor in use\n");
+ mutex_unlock(&(connection->shmem_mutex));
+ schedule();
+ goto retry;
+ }
+
+ tf_cleanup_shared_memory(
+ &(connection->cpt_alloc_context),
+ shmem_desc,
+ full_cleanup);
+
+ list_del(&(shmem_desc->list));
+
+ if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
+ (full_cleanup != 0)) {
+ internal_kfree(shmem_desc);
+
+ atomic_dec(&(connection->shmem_count));
+ } else {
+ /*
+ * This is a preallocated shared memory, add to free list
+ * Since the device context is unmapped last, it is
+ * always the first element of the free list if no
+ * device context has been created
+ */
+ shmem_desc->block_identifier = 0;
+ list_add(&(shmem_desc->list), &(connection->free_shmem_list));
+ }
+
+ mutex_unlock(&(connection->shmem_mutex));
+}
+
+
+/**
+ * Find the first available slot for a new block of shared memory
+ * and map the user buffer.
+ * Update the descriptors to L1 descriptors
+ * Update the buffer_start_offset and buffer_size fields
+ * shmem_desc is updated to the mapped shared memory descriptor
+ **/
+int tf_map_shmem(
+ struct tf_connection *connection,
+ u32 buffer,
+ /* flags for read-write access rights on the memory */
+ u32 flags,
+ bool in_user_space,
+ u32 descriptors[TF_MAX_COARSE_PAGES],
+ u32 *buffer_start_offset,
+ u32 buffer_size,
+ struct tf_shmem_desc **shmem_desc,
+ u32 *descriptor_count)
+{
+ struct tf_shmem_desc *desc = NULL;
+ int error;
+
+ dprintk(KERN_INFO "tf_map_shmem(%p, %p, flags = 0x%08x)\n",
+ connection,
+ (void *) buffer,
+ flags);
+
+ /*
+ * Added temporary to avoid kernel stack buffer
+ */
+ if (!in_user_space) {
+ if (object_is_on_stack((void *)buffer) != 0) {
+ dprintk(KERN_ERR
+ "tf_map_shmem: "
+ "kernel stack buffers "
+ "(addr=0x%08X) "
+ "are not supported",
+ buffer);
+ error = -ENOSYS;
+ goto error;
+ }
+ }
+
+ mutex_lock(&(connection->shmem_mutex));
+
+ /*
+ * Check the list of free shared memory
+ * is not empty
+ */
+ if (list_empty(&(connection->free_shmem_list))) {
+ if (atomic_read(&(connection->shmem_count)) ==
+ TF_SHMEM_MAX_COUNT) {
+ printk(KERN_ERR "tf_map_shmem(%p):"
+ " maximum shared memories already registered\n",
+ connection);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ /* no descriptor available, allocate a new one */
+
+ desc = (struct tf_shmem_desc *) internal_kmalloc(
+ sizeof(*desc), GFP_KERNEL);
+ if (desc == NULL) {
+ printk(KERN_ERR "tf_map_shmem(%p):"
+ " failed to allocate descriptor\n",
+ connection);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ /* Initialize the structure */
+ desc->type = TF_SHMEM_TYPE_REGISTERED_SHMEM;
+ atomic_set(&desc->ref_count, 1);
+ INIT_LIST_HEAD(&(desc->list));
+
+ atomic_inc(&(connection->shmem_count));
+ } else {
+ /* take the first free shared memory descriptor */
+ desc = list_first_entry(&(connection->free_shmem_list),
+ struct tf_shmem_desc, list);
+ list_del(&(desc->list));
+ }
+
+ /* Add the descriptor to the used list */
+ list_add(&(desc->list), &(connection->used_shmem_list));
+
+ error = tf_fill_descriptor_table(
+ &(connection->cpt_alloc_context),
+ desc,
+ buffer,
+ connection->vmas,
+ descriptors,
+ buffer_size,
+ buffer_start_offset,
+ in_user_space,
+ flags,
+ descriptor_count);
+
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_map_shmem(%p):"
+ " tf_fill_descriptor_table failed with error "
+ "code %d!\n",
+ connection,
+ error);
+ goto error;
+ }
+ desc->client_buffer = (u8 *) buffer;
+
+ /*
+ * Successful completion.
+ */
+ *shmem_desc = desc;
+ mutex_unlock(&(connection->shmem_mutex));
+ dprintk(KERN_DEBUG "tf_map_shmem: success\n");
+ return 0;
+
+
+ /*
+ * Error handling.
+ */
+error:
+ mutex_unlock(&(connection->shmem_mutex));
+ dprintk(KERN_ERR "tf_map_shmem: failure with error code %d\n",
+ error);
+
+ tf_unmap_shmem(
+ connection,
+ desc,
+ 0);
+
+ return error;
+}
+
+
+
+/* This function is a copy of the find_vma() function
+in linux kernel 2.6.15 version with some fixes :
+ - memory block may end on vm_end
+ - check the full memory block is in the memory area
+ - guarantee NULL is returned if no memory area is found */
+struct vm_area_struct *tf_find_vma(struct mm_struct *mm,
+ unsigned long addr, unsigned long size)
+{
+ struct vm_area_struct *vma = NULL;
+
+ dprintk(KERN_INFO
+ "tf_find_vma addr=0x%lX size=0x%lX\n", addr, size);
+
+ if (mm) {
+ /* Check the cache first. */
+ /* (Cache hit rate is typically around 35%.) */
+ vma = mm->mmap_cache;
+ if (!(vma && vma->vm_end >= (addr+size) &&
+ vma->vm_start <= addr)) {
+ struct rb_node *rb_node;
+
+ rb_node = mm->mm_rb.rb_node;
+ vma = NULL;
+
+ while (rb_node) {
+ struct vm_area_struct *vma_tmp;
+
+ vma_tmp = rb_entry(rb_node,
+ struct vm_area_struct, vm_rb);
+
+ dprintk(KERN_INFO
+ "vma_tmp->vm_start=0x%lX"
+ "vma_tmp->vm_end=0x%lX\n",
+ vma_tmp->vm_start,
+ vma_tmp->vm_end);
+
+ if (vma_tmp->vm_end >= (addr+size)) {
+ vma = vma_tmp;
+ if (vma_tmp->vm_start <= addr)
+ break;
+
+ rb_node = rb_node->rb_left;
+ } else {
+ rb_node = rb_node->rb_right;
+ }
+ }
+
+ if (vma)
+ mm->mmap_cache = vma;
+ if (rb_node == NULL)
+ vma = NULL;
+ }
+ }
+ return vma;
+}
+
+int tf_validate_shmem_and_flags(
+ u32 shmem,
+ u32 shmem_size,
+ u32 flags)
+{
+ struct vm_area_struct *vma;
+ u32 chunk;
+
+ if (shmem_size == 0)
+ /* This is always valid */
+ return 0;
+
+ if ((shmem + shmem_size) < shmem)
+ /* Overflow */
+ return -EINVAL;
+
+ down_read(&current->mm->mmap_sem);
+
+ /*
+ * When looking for a memory address, split buffer into chunks of
+ * size=PAGE_SIZE.
+ */
+ chunk = PAGE_SIZE - (shmem & (PAGE_SIZE-1));
+ if (chunk > shmem_size)
+ chunk = shmem_size;
+
+ do {
+ vma = tf_find_vma(current->mm, shmem, chunk);
+
+ if (vma == NULL) {
+ dprintk(KERN_ERR "%s: area not found\n", __func__);
+ goto error;
+ }
+
+ if (flags & TF_SHMEM_TYPE_READ)
+ if (!(vma->vm_flags & VM_READ)) {
+ dprintk(KERN_ERR "%s: no read permission\n",
+ __func__);
+ goto error;
+ }
+ if (flags & TF_SHMEM_TYPE_WRITE)
+ if (!(vma->vm_flags & VM_WRITE)) {
+ dprintk(KERN_ERR "%s: no write permission\n",
+ __func__);
+ goto error;
+ }
+
+ shmem_size -= chunk;
+ shmem += chunk;
+ chunk = (shmem_size <= PAGE_SIZE ?
+ shmem_size : PAGE_SIZE);
+ } while (shmem_size != 0);
+
+ up_read(&current->mm->mmap_sem);
+ return 0;
+
+error:
+ up_read(&current->mm->mmap_sem);
+ return -EFAULT;
+}
+
+
+static int tf_map_temp_shmem(struct tf_connection *connection,
+ struct tf_command_param_temp_memref *temp_memref,
+ u32 param_type,
+ struct tf_shmem_desc **shmem_desc)
+{
+ u32 flags;
+ u32 error = S_SUCCESS;
+ bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL;
+
+ dprintk(KERN_INFO "tf_map_temp_shmem(%p, "
+ "0x%08x[size=0x%08x], offset=0x%08x)\n",
+ connection,
+ temp_memref->descriptor,
+ temp_memref->size,
+ temp_memref->offset);
+
+ switch (param_type) {
+ case TF_PARAM_TYPE_MEMREF_TEMP_INPUT:
+ flags = TF_SHMEM_TYPE_READ;
+ break;
+ case TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT:
+ flags = TF_SHMEM_TYPE_WRITE;
+ break;
+ case TF_PARAM_TYPE_MEMREF_TEMP_INOUT:
+ flags = TF_SHMEM_TYPE_WRITE | TF_SHMEM_TYPE_READ;
+ break;
+ default:
+ error = -EINVAL;
+ goto error;
+ }
+
+ if (temp_memref->descriptor == 0) {
+ /* NULL tmpref */
+ temp_memref->offset = 0;
+ *shmem_desc = NULL;
+ } else if ((temp_memref->descriptor != 0) &&
+ (temp_memref->size == 0)) {
+ /* Empty tmpref */
+ temp_memref->offset = temp_memref->descriptor;
+ temp_memref->descriptor = 0;
+ temp_memref->size = 0;
+ *shmem_desc = NULL;
+ } else {
+ /* Map the temp shmem block */
+
+ u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
+ u32 descriptor_count;
+
+ if (in_user_space) {
+ error = tf_validate_shmem_and_flags(
+ temp_memref->descriptor,
+ temp_memref->size,
+ flags);
+ if (error != 0)
+ goto error;
+ }
+
+ error = tf_map_shmem(
+ connection,
+ temp_memref->descriptor,
+ flags,
+ in_user_space,
+ shared_mem_descriptors,
+ &(temp_memref->offset),
+ temp_memref->size,
+ shmem_desc,
+ &descriptor_count);
+ temp_memref->descriptor = shared_mem_descriptors[0];
+ }
+
+error:
+ return error;
+}
+
+/*
+ * Clean up a list of shared memory descriptors.
+ */
+static void tf_shared_memory_cleanup_list(
+ struct tf_connection *connection,
+ struct list_head *shmem_desc_list)
+{
+ while (!list_empty(shmem_desc_list)) {
+ struct tf_shmem_desc *shmem_desc;
+
+ shmem_desc = list_first_entry(shmem_desc_list,
+ struct tf_shmem_desc, list);
+
+ tf_unmap_shmem(connection, shmem_desc, 1);
+ }
+}
+
+
+/*
+ * Clean up the shared memory information in the connection.
+ * Releases all allocated pages.
+ */
+static void tf_cleanup_shared_memories(struct tf_connection *connection)
+{
+ /* clean up the list of used and free descriptors.
+ * done outside the mutex, because tf_unmap_shmem already
+ * mutex()ed
+ */
+ tf_shared_memory_cleanup_list(connection,
+ &connection->used_shmem_list);
+ tf_shared_memory_cleanup_list(connection,
+ &connection->free_shmem_list);
+
+ mutex_lock(&(connection->shmem_mutex));
+
+ /* Free the Vmas page */
+ if (connection->vmas) {
+ internal_free_page((unsigned long) connection->vmas);
+ connection->vmas = NULL;
+ }
+
+ tf_release_coarse_page_table_allocator(
+ &(connection->cpt_alloc_context));
+
+ mutex_unlock(&(connection->shmem_mutex));
+}
+
+
+/*
+ * Initialize the shared memory in a connection.
+ * Allocates the minimum memory to be provided
+ * for shared memory management
+ */
+int tf_init_shared_memory(struct tf_connection *connection)
+{
+ int error;
+ int i;
+ int coarse_page_index;
+
+ /*
+ * We only need to initialize special elements and attempt to allocate
+ * the minimum shared memory descriptors we want to support
+ */
+
+ mutex_init(&(connection->shmem_mutex));
+ INIT_LIST_HEAD(&(connection->free_shmem_list));
+ INIT_LIST_HEAD(&(connection->used_shmem_list));
+ atomic_set(&(connection->shmem_count), 0);
+
+ tf_init_coarse_page_table_allocator(
+ &(connection->cpt_alloc_context));
+
+
+ /*
+ * Preallocate 3 pages to increase the chances that a connection
+ * succeeds in allocating shared mem
+ */
+ for (i = 0;
+ i < 3;
+ i++) {
+ struct tf_shmem_desc *shmem_desc =
+ (struct tf_shmem_desc *) internal_kmalloc(
+ sizeof(*shmem_desc), GFP_KERNEL);
+
+ if (shmem_desc == NULL) {
+ printk(KERN_ERR "tf_init_shared_memory(%p):"
+ " failed to pre allocate descriptor %d\n",
+ connection,
+ i);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ for (coarse_page_index = 0;
+ coarse_page_index < TF_MAX_COARSE_PAGES;
+ coarse_page_index++) {
+ struct tf_coarse_page_table *coarse_pg_table;
+
+ coarse_pg_table = tf_alloc_coarse_page_table(
+ &(connection->cpt_alloc_context),
+ TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED);
+
+ if (coarse_pg_table == NULL) {
+ printk(KERN_ERR "tf_init_shared_memory(%p)"
+ ": descriptor %d coarse page %d - "
+ "tf_alloc_coarse_page_table() "
+ "failed\n",
+ connection,
+ i,
+ coarse_page_index);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ shmem_desc->coarse_pg_table[coarse_page_index] =
+ coarse_pg_table;
+ }
+ shmem_desc->coarse_pg_table_count = 0;
+
+ shmem_desc->type = TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM;
+ atomic_set(&shmem_desc->ref_count, 1);
+
+ /*
+ * add this preallocated descriptor to the list of free
+ * descriptors Keep the device context specific one at the
+ * beginning of the list
+ */
+ INIT_LIST_HEAD(&(shmem_desc->list));
+ list_add_tail(&(shmem_desc->list),
+ &(connection->free_shmem_list));
+ }
+
+ /* allocate memory for the vmas structure */
+ connection->vmas =
+ (struct vm_area_struct **) internal_get_zeroed_page(GFP_KERNEL);
+ if (connection->vmas == NULL) {
+ printk(KERN_ERR "tf_init_shared_memory(%p):"
+ " vmas - failed to get_zeroed_page\n",
+ connection);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ return 0;
+
+error:
+ tf_cleanup_shared_memories(connection);
+ return error;
+}
+
+/*----------------------------------------------------------------------------
+ * Connection operations to the Secure World
+ *----------------------------------------------------------------------------*/
+
+int tf_create_device_context(
+ struct tf_connection *connection)
+{
+ union tf_command command;
+ union tf_answer answer;
+ int error = 0;
+
+ dprintk(KERN_INFO "tf_create_device_context(%p)\n",
+ connection);
+
+ command.create_device_context.message_type =
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT;
+ command.create_device_context.message_size =
+ (sizeof(struct tf_command_create_device_context)
+ - sizeof(struct tf_command_header))/sizeof(u32);
+ command.create_device_context.operation_id = (u32) &answer;
+ command.create_device_context.device_context_id = (u32) connection;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ &command,
+ &answer,
+ connection,
+ true);
+
+ if ((error != 0) ||
+ (answer.create_device_context.error_code != S_SUCCESS))
+ goto error;
+
+ /*
+ * CREATE_DEVICE_CONTEXT succeeded,
+ * store device context handler and update connection status
+ */
+ connection->device_context =
+ answer.create_device_context.device_context;
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_create_device_context(%p):"
+ " device_context=0x%08x\n",
+ connection,
+ answer.create_device_context.device_context);
+ return 0;
+
+error:
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_create_device_context failed with "
+ "error %d\n", error);
+ } else {
+ /*
+ * We sent a DeviceCreateContext. The state is now
+ * TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT It has to be
+ * reset if we ever want to send a DeviceCreateContext again
+ */
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+ dprintk(KERN_ERR "tf_create_device_context failed with "
+ "error_code 0x%08X\n",
+ answer.create_device_context.error_code);
+ if (answer.create_device_context.error_code ==
+ S_ERROR_OUT_OF_MEMORY)
+ error = -ENOMEM;
+ else
+ error = -EFAULT;
+ }
+
+ return error;
+}
+
+/* Check that the current application belongs to the
+ * requested GID */
+static bool tf_check_gid(gid_t requested_gid)
+{
+ if (requested_gid == current_egid()) {
+ return true;
+ } else {
+ u32 size;
+ u32 i;
+ /* Look in the supplementary GIDs */
+ get_group_info(GROUP_INFO);
+ size = GROUP_INFO->ngroups;
+ for (i = 0; i < size; i++)
+ if (requested_gid == GROUP_AT(GROUP_INFO , i))
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Opens a client session to the Secure World
+ */
+int tf_open_client_session(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+ struct tf_shmem_desc *shmem_desc[4] = {NULL};
+ u32 i;
+
+ dprintk(KERN_INFO "tf_open_client_session(%p)\n", connection);
+
+ /*
+ * Initialize the message size with no login data. This will be later
+ * adjusted the the cases below
+ */
+ command->open_client_session.message_size =
+ (sizeof(struct tf_command_open_client_session) - 20
+ - sizeof(struct tf_command_header))/4;
+
+ switch (command->open_client_session.login_type) {
+ case TF_LOGIN_PUBLIC:
+ /* Nothing to do */
+ break;
+
+ case TF_LOGIN_USER:
+ /*
+ * Send the EUID of the calling application in the login data.
+ * Update message size.
+ */
+ *(u32 *) &command->open_client_session.login_data =
+ current_euid();
+#ifndef CONFIG_ANDROID
+ command->open_client_session.login_type =
+ (u32) TF_LOGIN_USER_LINUX_EUID;
+#else
+ command->open_client_session.login_type =
+ (u32) TF_LOGIN_USER_ANDROID_EUID;
+#endif
+
+ /* Added one word */
+ command->open_client_session.message_size += 1;
+ break;
+
+ case TF_LOGIN_GROUP: {
+ /* Check requested GID */
+ gid_t requested_gid =
+ *(u32 *) command->open_client_session.login_data;
+
+ if (!tf_check_gid(requested_gid)) {
+ dprintk(KERN_ERR "tf_open_client_session(%p) "
+ "TF_LOGIN_GROUP: requested GID (0x%x) does "
+ "not match real eGID (0x%x)"
+ "or any of the supplementary GIDs\n",
+ connection, requested_gid, current_egid());
+ error = -EACCES;
+ goto error;
+ }
+#ifndef CONFIG_ANDROID
+ command->open_client_session.login_type =
+ TF_LOGIN_GROUP_LINUX_GID;
+#else
+ command->open_client_session.login_type =
+ TF_LOGIN_GROUP_ANDROID_GID;
+#endif
+
+ command->open_client_session.message_size += 1; /* GID */
+ break;
+ }
+
+#ifndef CONFIG_ANDROID
+ case TF_LOGIN_APPLICATION: {
+ /*
+ * Compute SHA-1 hash of the application fully-qualified path
+ * name. Truncate the hash to 16 bytes and send it as login
+ * data. Update message size.
+ */
+ u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+ error = tf_hash_application_path_and_data(pSHA1Hash,
+ NULL, 0);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "error in tf_hash_application_path_and_data\n");
+ goto error;
+ }
+ memcpy(&command->open_client_session.login_data,
+ pSHA1Hash, 16);
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH;
+ /* 16 bytes */
+ command->open_client_session.message_size += 4;
+ break;
+ }
+#else
+ case TF_LOGIN_APPLICATION:
+ /*
+ * Send the real UID of the calling application in the login
+ * data. Update message size.
+ */
+ *(u32 *) &command->open_client_session.login_data =
+ current_uid();
+
+ command->open_client_session.login_type =
+ (u32) TF_LOGIN_APPLICATION_ANDROID_UID;
+
+ /* Added one word */
+ command->open_client_session.message_size += 1;
+ break;
+#endif
+
+#ifndef CONFIG_ANDROID
+ case TF_LOGIN_APPLICATION_USER: {
+ /*
+ * Compute SHA-1 hash of the concatenation of the application
+ * fully-qualified path name and the EUID of the calling
+ * application. Truncate the hash to 16 bytes and send it as
+ * login data. Update message size.
+ */
+ u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+ error = tf_hash_application_path_and_data(pSHA1Hash,
+ (u8 *) &(current_euid()), sizeof(current_euid()));
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "error in tf_hash_application_path_and_data\n");
+ goto error;
+ }
+ memcpy(&command->open_client_session.login_data,
+ pSHA1Hash, 16);
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH;
+
+ /* 16 bytes */
+ command->open_client_session.message_size += 4;
+
+ break;
+ }
+#else
+ case TF_LOGIN_APPLICATION_USER:
+ /*
+ * Send the real UID and the EUID of the calling application in
+ * the login data. Update message size.
+ */
+ *(u32 *) &command->open_client_session.login_data =
+ current_uid();
+ *(u32 *) &command->open_client_session.login_data[4] =
+ current_euid();
+
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_USER_ANDROID_UID_EUID;
+
+ /* Added two words */
+ command->open_client_session.message_size += 2;
+ break;
+#endif
+
+#ifndef CONFIG_ANDROID
+ case TF_LOGIN_APPLICATION_GROUP: {
+ /*
+ * Check requested GID. Compute SHA-1 hash of the concatenation
+ * of the application fully-qualified path name and the
+ * requested GID. Update message size
+ */
+ gid_t requested_gid;
+ u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+ requested_gid = *(u32 *) &command->open_client_session.
+ login_data;
+
+ if (!tf_check_gid(requested_gid)) {
+ dprintk(KERN_ERR "tf_open_client_session(%p) "
+ "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
+ "does not match real eGID (0x%x)"
+ "or any of the supplementary GIDs\n",
+ connection, requested_gid, current_egid());
+ error = -EACCES;
+ goto error;
+ }
+
+ error = tf_hash_application_path_and_data(pSHA1Hash,
+ &requested_gid, sizeof(u32));
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "error in tf_hash_application_path_and_data\n");
+ goto error;
+ }
+
+ memcpy(&command->open_client_session.login_data,
+ pSHA1Hash, 16);
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH;
+
+ /* 16 bytes */
+ command->open_client_session.message_size += 4;
+ break;
+ }
+#else
+ case TF_LOGIN_APPLICATION_GROUP: {
+ /*
+ * Check requested GID. Send the real UID and the requested GID
+ * in the login data. Update message size.
+ */
+ gid_t requested_gid;
+
+ requested_gid = *(u32 *) &command->open_client_session.
+ login_data;
+
+ if (!tf_check_gid(requested_gid)) {
+ dprintk(KERN_ERR "tf_open_client_session(%p) "
+ "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
+ "does not match real eGID (0x%x)"
+ "or any of the supplementary GIDs\n",
+ connection, requested_gid, current_egid());
+ error = -EACCES;
+ goto error;
+ }
+
+ *(u32 *) &command->open_client_session.login_data =
+ current_uid();
+ *(u32 *) &command->open_client_session.login_data[4] =
+ requested_gid;
+
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID;
+
+ /* Added two words */
+ command->open_client_session.message_size += 2;
+
+ break;
+ }
+#endif
+
+ case TF_LOGIN_PRIVILEGED:
+ /* A privileged login may be performed only on behalf of the
+ kernel itself or on behalf of a process with euid=0 or
+ egid=0 or euid=system or egid=system. */
+ if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
+ dprintk(KERN_DEBUG "tf_open_client_session: "
+ "TF_LOGIN_PRIVILEGED for kernel API\n");
+ } else if ((current_euid() != TF_PRIVILEGED_UID_GID) &&
+ (current_egid() != TF_PRIVILEGED_UID_GID) &&
+ (current_euid() != 0) && (current_egid() != 0)) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ " user %d, group %d not allowed to open "
+ "session with TF_LOGIN_PRIVILEGED\n",
+ current_euid(), current_egid());
+ error = -EACCES;
+ goto error;
+ } else {
+ dprintk(KERN_DEBUG "tf_open_client_session: "
+ "TF_LOGIN_PRIVILEGED for %u:%u\n",
+ current_euid(), current_egid());
+ }
+ command->open_client_session.login_type =
+ TF_LOGIN_PRIVILEGED;
+ break;
+
+ case TF_LOGIN_AUTHENTICATION: {
+ /*
+ * Compute SHA-1 hash of the application binary
+ * Send this hash as the login data (20 bytes)
+ */
+
+ u8 *hash;
+ hash = &(command->open_client_session.login_data[0]);
+
+ error = tf_get_current_process_hash(hash);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "error in tf_get_current_process_hash\n");
+ goto error;
+ }
+ command->open_client_session.login_type =
+ TF_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH;
+
+ /* 20 bytes */
+ command->open_client_session.message_size += 5;
+ break;
+ }
+
+ case TF_LOGIN_PRIVILEGED_KERNEL:
+ /* A kernel login may be performed only on behalf of the
+ kernel itself. */
+ if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
+ dprintk(KERN_DEBUG "tf_open_client_session: "
+ "TF_LOGIN_PRIVILEGED_KERNEL for kernel API\n");
+ command->open_client_session.login_type =
+ TF_LOGIN_PRIVILEGED_KERNEL;
+ } else {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ " user %d, group %d not allowed to open "
+ "session with TF_LOGIN_PRIVILEGED_KERNEL\n",
+ current_euid(), current_egid());
+ error = -EACCES;
+ goto error;
+ }
+ command->open_client_session.login_type =
+ TF_LOGIN_PRIVILEGED_KERNEL;
+ break;
+
+ default:
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "unknown login_type(%08X)\n",
+ command->open_client_session.login_type);
+ error = -EOPNOTSUPP;
+ goto error;
+ }
+
+ /* Map the temporary memory references */
+ for (i = 0; i < 4; i++) {
+ int param_type;
+ param_type = TF_GET_PARAM_TYPE(
+ command->open_client_session.param_types, i);
+ if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
+ TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
+ == TF_PARAM_TYPE_MEMREF_FLAG) {
+ /* Map temp mem ref */
+ error = tf_map_temp_shmem(connection,
+ &command->open_client_session.
+ params[i].temp_memref,
+ param_type,
+ &shmem_desc[i]);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "unable to map temporary memory block "
+ "(%08X)\n", error);
+ goto error;
+ }
+ }
+ }
+
+ /* Fill the handle of the Device Context */
+ command->open_client_session.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+error:
+ /* Unmap the temporary memory references */
+ for (i = 0; i < 4; i++)
+ if (shmem_desc[i] != NULL)
+ tf_unmap_shmem(connection, shmem_desc[i], 0);
+
+ if (error != 0)
+ dprintk(KERN_ERR "tf_open_client_session returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_open_client_session returns "
+ "error_code 0x%08X\n",
+ answer->open_client_session.error_code);
+
+ return error;
+}
+
+
+/*
+ * Closes a client session from the Secure World
+ */
+int tf_close_client_session(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+
+ dprintk(KERN_DEBUG "tf_close_client_session(%p)\n", connection);
+
+ command->close_client_session.message_size =
+ (sizeof(struct tf_command_close_client_session) -
+ sizeof(struct tf_command_header)) / 4;
+ command->close_client_session.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+ if (error != 0)
+ dprintk(KERN_ERR "tf_close_client_session returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_close_client_session returns "
+ "error 0x%08X\n",
+ answer->close_client_session.error_code);
+
+ return error;
+}
+
+
+/*
+ * Registers a shared memory to the Secure World
+ */
+int tf_register_shared_memory(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+ struct tf_shmem_desc *shmem_desc = NULL;
+ bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL;
+ struct tf_command_register_shared_memory *msg =
+ &command->register_shared_memory;
+
+ dprintk(KERN_INFO "tf_register_shared_memory(%p) "
+ "%p[0x%08X][0x%08x]\n",
+ connection,
+ (void *)msg->shared_mem_descriptors[0],
+ msg->shared_mem_size,
+ (u32)msg->memory_flags);
+
+ if (in_user_space) {
+ error = tf_validate_shmem_and_flags(
+ msg->shared_mem_descriptors[0],
+ msg->shared_mem_size,
+ (u32)msg->memory_flags);
+ if (error != 0)
+ goto error;
+ }
+
+ /* Initialize message_size with no descriptors */
+ msg->message_size
+ = (offsetof(struct tf_command_register_shared_memory,
+ shared_mem_descriptors) -
+ sizeof(struct tf_command_header)) / 4;
+
+ /* Map the shmem block and update the message */
+ if (msg->shared_mem_size == 0) {
+ /* Empty shared mem */
+ msg->shared_mem_start_offset = msg->shared_mem_descriptors[0];
+ } else {
+ u32 descriptor_count;
+ error = tf_map_shmem(
+ connection,
+ msg->shared_mem_descriptors[0],
+ msg->memory_flags,
+ in_user_space,
+ msg->shared_mem_descriptors,
+ &(msg->shared_mem_start_offset),
+ msg->shared_mem_size,
+ &shmem_desc,
+ &descriptor_count);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_register_shared_memory: "
+ "unable to map shared memory block\n");
+ goto error;
+ }
+ msg->message_size += descriptor_count;
+ }
+
+ /*
+ * write the correct device context handle and the address of the shared
+ * memory descriptor in the message
+ */
+ msg->device_context = connection->device_context;
+ msg->block_id = (u32)shmem_desc;
+
+ /* Send the updated message */
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+ if ((error != 0) ||
+ (answer->register_shared_memory.error_code
+ != S_SUCCESS)) {
+ dprintk(KERN_ERR "tf_register_shared_memory: "
+ "operation failed. Unmap block\n");
+ goto error;
+ }
+
+ /* Saves the block handle returned by the secure world */
+ if (shmem_desc != NULL)
+ shmem_desc->block_identifier =
+ answer->register_shared_memory.block;
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_register_shared_memory(%p):"
+ " block_id=0x%08x block=0x%08x\n",
+ connection, msg->block_id,
+ answer->register_shared_memory.block);
+ return 0;
+
+ /* error completion */
+error:
+ tf_unmap_shmem(
+ connection,
+ shmem_desc,
+ 0);
+
+ if (error != 0)
+ dprintk(KERN_ERR "tf_register_shared_memory returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_register_shared_memory returns "
+ "error_code 0x%08X\n",
+ answer->register_shared_memory.error_code);
+
+ return error;
+}
+
+
+/*
+ * Releases a shared memory from the Secure World
+ */
+int tf_release_shared_memory(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+
+ dprintk(KERN_DEBUG "tf_release_shared_memory(%p)\n", connection);
+
+ command->release_shared_memory.message_size =
+ (sizeof(struct tf_command_release_shared_memory) -
+ sizeof(struct tf_command_header)) / 4;
+ command->release_shared_memory.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+ if ((error != 0) ||
+ (answer->release_shared_memory.error_code != S_SUCCESS))
+ goto error;
+
+ /* Use block_id to get back the pointer to shmem_desc */
+ tf_unmap_shmem(
+ connection,
+ (struct tf_shmem_desc *)
+ answer->release_shared_memory.block_id,
+ 0);
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_release_shared_memory(%p):"
+ " block_id=0x%08x block=0x%08x\n",
+ connection, answer->release_shared_memory.block_id,
+ command->release_shared_memory.block);
+ return 0;
+
+
+error:
+ if (error != 0)
+ dprintk(KERN_ERR "tf_release_shared_memory returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_release_shared_memory returns "
+ "nChannelStatus 0x%08X\n",
+ answer->release_shared_memory.error_code);
+
+ return error;
+
+}
+
+/*
+ * Invokes a client command to the Secure World
+ */
+int tf_invoke_client_command(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+ struct tf_shmem_desc *shmem_desc[4] = {NULL};
+ int i;
+#ifdef CONFIG_TF_ION
+ struct ion_handle *new_handle = NULL;
+#endif /* CONFIG_TF_ION */
+
+ dprintk(KERN_INFO "tf_invoke_client_command(%p)\n", connection);
+
+ command->release_shared_memory.message_size =
+ (sizeof(struct tf_command_invoke_client_command) -
+ sizeof(struct tf_command_header)) / 4;
+
+#ifdef CONFIG_TF_ZEBRA
+ error = tf_crypto_try_shortcuted_update(connection,
+ (struct tf_command_invoke_client_command *) command,
+ (struct tf_answer_invoke_client_command *) answer);
+ if (error == 0)
+ return error;
+#endif
+
+ /* Map the tmprefs */
+ for (i = 0; i < 4; i++) {
+ int param_type = TF_GET_PARAM_TYPE(
+ command->invoke_client_command.param_types, i);
+
+ if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
+ TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
+ == TF_PARAM_TYPE_MEMREF_FLAG) {
+ /* A temporary memref: map it */
+ error = tf_map_temp_shmem(connection,
+ &command->invoke_client_command.
+ params[i].temp_memref,
+ param_type, &shmem_desc[i]);
+ if (error != 0) {
+ dprintk(KERN_ERR
+ "tf_invoke_client_command: "
+ "unable to map temporary memory "
+ "block\n (%08X)", error);
+ goto error;
+ }
+ }
+#ifdef CONFIG_TF_ION
+ else if (param_type == TF_PARAM_TYPE_MEMREF_ION_HANDLE) {
+ struct tf_command_invoke_client_command *invoke;
+ ion_phys_addr_t ion_addr;
+ size_t ion_len;
+ struct ion_buffer *buffer;
+
+ if (connection->ion_client == NULL) {
+ connection->ion_client = ion_client_create(
+ zebra_ion_device,
+ (1 << ION_HEAP_TYPE_CARVEOUT),
+ "tf");
+ }
+ if (connection->ion_client == NULL) {
+ dprintk(KERN_ERR "%s(%p): "
+ "unable to create ion client\n",
+ __func__, connection);
+ error = -EFAULT;
+ goto error;
+ }
+
+ invoke = &command->invoke_client_command;
+
+ dprintk(KERN_INFO "ion_handle %x",
+ invoke->params[i].value.a);
+ buffer = ion_share(connection->ion_client,
+ (struct ion_handle *)invoke->params[i].value.a);
+ if (buffer == NULL) {
+ dprintk(KERN_ERR "%s(%p): "
+ "unable to share ion handle\n",
+ __func__, connection);
+ error = -EFAULT;
+ goto error;
+ }
+
+ dprintk(KERN_INFO "ion_buffer %p", buffer);
+ new_handle = ion_import(connection->ion_client, buffer);
+ if (new_handle == NULL) {
+ dprintk(KERN_ERR "%s(%p): "
+ "unable to import ion buffer\n",
+ __func__, connection);
+ error = -EFAULT;
+ goto error;
+ }
+
+ dprintk(KERN_INFO "new_handle %x", new_handle);
+ error = ion_phys(connection->ion_client,
+ new_handle,
+ &ion_addr,
+ &ion_len);
+ if (error) {
+ dprintk(KERN_ERR
+ "%s: unable to convert ion handle "
+ "0x%08X (error code 0x%08X)\n",
+ __func__,
+ new_handle,
+ error);
+ error = -EINVAL;
+ goto error;
+ }
+ dprintk(KERN_INFO
+ "%s: handle=0x%08x phys_add=0x%08x length=0x%08x\n",
+ __func__, invoke->params[i].value.a, ion_addr, ion_len);
+
+ invoke->params[i].value.a = (u32) ion_addr;
+ invoke->params[i].value.b = (u32) ion_len;
+
+ invoke->param_types &= ~((0xF) << (4*i));
+ invoke->param_types |=
+ TF_PARAM_TYPE_VALUE_INPUT << (4*i);
+ }
+#endif /* CONFIG_TF_ION */
+ }
+
+ command->invoke_client_command.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(&connection->dev->sm, command,
+ answer, connection, true);
+
+error:
+#ifdef CONFIG_TF_ION
+ if (new_handle != NULL)
+ ion_free(connection->ion_client, new_handle);
+#endif /* CONFIG_TF_ION */
+ /* Unmap de temp mem refs */
+ for (i = 0; i < 4; i++) {
+ if (shmem_desc[i] != NULL) {
+ dprintk(KERN_INFO "tf_invoke_client_command: "
+ "UnMatemp_memref %d\n ", i);
+
+ tf_unmap_shmem(connection, shmem_desc[i], 0);
+ }
+ }
+
+ if (error != 0)
+ dprintk(KERN_ERR "tf_invoke_client_command returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_invoke_client_command returns "
+ "error_code 0x%08X\n",
+ answer->invoke_client_command.error_code);
+
+ return error;
+}
+
+
+/*
+ * Cancels a client command from the Secure World
+ */
+int tf_cancel_client_command(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+
+ dprintk(KERN_DEBUG "tf_cancel_client_command(%p)\n", connection);
+
+ command->cancel_client_operation.device_context =
+ connection->device_context;
+ command->cancel_client_operation.message_size =
+ (sizeof(struct tf_command_cancel_client_operation) -
+ sizeof(struct tf_command_header)) / 4;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+ if ((error != 0) ||
+ (answer->cancel_client_operation.error_code != S_SUCCESS))
+ goto error;
+
+
+ /* successful completion */
+ return 0;
+
+error:
+ if (error != 0)
+ dprintk(KERN_ERR "tf_cancel_client_command returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_cancel_client_command returns "
+ "nChannelStatus 0x%08X\n",
+ answer->cancel_client_operation.error_code);
+
+ return error;
+}
+
+
+
+/*
+ * Destroys a device context from the Secure World
+ */
+int tf_destroy_device_context(
+ struct tf_connection *connection)
+{
+ int error;
+ /*
+ * AFY: better use the specialized tf_command_destroy_device_context
+ * structure: this will save stack
+ */
+ union tf_command command;
+ union tf_answer answer;
+
+ dprintk(KERN_INFO "tf_destroy_device_context(%p)\n", connection);
+
+ BUG_ON(connection == NULL);
+
+ command.header.message_type = TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+ command.header.message_size =
+ (sizeof(struct tf_command_destroy_device_context) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+
+ /*
+ * fill in the device context handler
+ * it is guarantied that the first shared memory descriptor describes
+ * the device context
+ */
+ command.destroy_device_context.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ &command,
+ &answer,
+ connection,
+ false);
+
+ if ((error != 0) ||
+ (answer.destroy_device_context.error_code != S_SUCCESS))
+ goto error;
+
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_destroy_device_context(%p)\n",
+ connection);
+ return 0;
+
+error:
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_destroy_device_context failed with "
+ "error %d\n", error);
+ } else {
+ dprintk(KERN_ERR "tf_destroy_device_context failed with "
+ "error_code 0x%08X\n",
+ answer.destroy_device_context.error_code);
+ if (answer.destroy_device_context.error_code ==
+ S_ERROR_OUT_OF_MEMORY)
+ error = -ENOMEM;
+ else
+ error = -EFAULT;
+ }
+
+ return error;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Connection initialization and cleanup operations
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Opens a connection to the specified device.
+ *
+ * The placeholder referenced by connection is set to the address of the
+ * new connection; it is set to NULL upon failure.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+int tf_open(struct tf_device *dev,
+ struct file *file,
+ struct tf_connection **connection)
+{
+ int error;
+ struct tf_connection *conn = NULL;
+
+ dprintk(KERN_INFO "tf_open(%p, %p)\n", file, connection);
+
+ /*
+ * Allocate and initialize the conn.
+ * kmalloc only allocates sizeof(*conn) virtual memory
+ */
+ conn = (struct tf_connection *) internal_kmalloc(sizeof(*conn),
+ GFP_KERNEL);
+ if (conn == NULL) {
+ printk(KERN_ERR "tf_open(): "
+ "Out of memory for conn!\n");
+ error = -ENOMEM;
+ goto error;
+ }
+
+ memset(conn, 0, sizeof(*conn));
+
+ conn->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ conn->dev = dev;
+ spin_lock_init(&(conn->state_lock));
+ atomic_set(&(conn->pending_op_count), 0);
+ INIT_LIST_HEAD(&(conn->list));
+
+ /*
+ * Initialize the shared memory
+ */
+ error = tf_init_shared_memory(conn);
+ if (error != 0)
+ goto error;
+
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * Initialize CUS specifics
+ */
+ tf_crypto_init_cus(conn);
+#endif
+
+ /*
+ * Attach the conn to the device.
+ */
+ spin_lock(&(dev->connection_list_lock));
+ list_add(&(conn->list), &(dev->connection_list));
+ spin_unlock(&(dev->connection_list_lock));
+
+ /*
+ * Successful completion.
+ */
+
+ *connection = conn;
+
+ dprintk(KERN_INFO "tf_open(): Success (conn=%p)\n", conn);
+ return 0;
+
+ /*
+ * Error handling.
+ */
+
+error:
+ dprintk(KERN_ERR "tf_open(): Failure (error %d)\n", error);
+ /* Deallocate the descriptor pages if necessary */
+ internal_kfree(conn);
+ *connection = NULL;
+ return error;
+}
+
+
+/*
+ * Closes the specified connection.
+ *
+ * Upon return, the connection has been destroyed and cannot be used anymore.
+ *
+ * This function does nothing if connection is set to NULL.
+ */
+void tf_close(struct tf_connection *connection)
+{
+ int error;
+ enum TF_CONN_STATE state;
+
+ dprintk(KERN_DEBUG "tf_close(%p)\n", connection);
+
+ if (connection == NULL)
+ return;
+
+ /*
+ * Assumption: Linux guarantees that no other operation is in progress
+ * and that no other operation will be started when close is called
+ */
+ BUG_ON(atomic_read(&(connection->pending_op_count)) != 0);
+
+ /*
+ * Exchange a Destroy Device Context message if needed.
+ */
+ spin_lock(&(connection->state_lock));
+ state = connection->state;
+ spin_unlock(&(connection->state_lock));
+ if (state == TF_CONN_STATE_VALID_DEVICE_CONTEXT) {
+ /*
+ * A DestroyDeviceContext operation was not performed. Do it
+ * now.
+ */
+ error = tf_destroy_device_context(connection);
+ if (error != 0)
+ /* avoid cleanup if destroy device context fails */
+ goto error;
+ }
+
+ /*
+ * Clean up the shared memory
+ */
+ tf_cleanup_shared_memories(connection);
+
+#ifdef CONFIG_TF_ION
+ if (connection->ion_client != NULL)
+ ion_client_destroy(connection->ion_client);
+#endif
+
+ spin_lock(&(connection->dev->connection_list_lock));
+ list_del(&(connection->list));
+ spin_unlock(&(connection->dev->connection_list_lock));
+
+ internal_kfree(connection);
+
+ return;
+
+error:
+ dprintk(KERN_DEBUG "tf_close(%p) failed with error code %d\n",
+ connection, error);
+}
diff --git a/security/tf_driver/tf_conn.h b/security/tf_driver/tf_conn.h
new file mode 100644
index 000000000000..8bed16f19d5f
--- /dev/null
+++ b/security/tf_driver/tf_conn.h
@@ -0,0 +1,106 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_CONN_H__
+#define __TF_CONN_H__
+
+#include "tf_defs.h"
+
+/*
+ * Returns a pointer to the connection referenced by the
+ * specified file.
+ */
+static inline struct tf_connection *tf_conn_from_file(
+ struct file *file)
+{
+ return file->private_data;
+}
+
+int tf_validate_shmem_and_flags(u32 shmem, u32 shmem_size, u32 flags);
+
+int tf_map_shmem(
+ struct tf_connection *connection,
+ u32 buffer,
+ /* flags for read-write access rights on the memory */
+ u32 flags,
+ bool in_user_space,
+ u32 descriptors[TF_MAX_COARSE_PAGES],
+ u32 *buffer_start_offset,
+ u32 buffer_size,
+ struct tf_shmem_desc **shmem_desc,
+ u32 *descriptor_count);
+
+void tf_unmap_shmem(
+ struct tf_connection *connection,
+ struct tf_shmem_desc *shmem_desc,
+ u32 full_cleanup);
+
+/*----------------------------------------------------------------------------
+ * Connection operations to the Secure World
+ *----------------------------------------------------------------------------*/
+
+int tf_create_device_context(
+ struct tf_connection *connection);
+
+int tf_destroy_device_context(
+ struct tf_connection *connection);
+
+int tf_open_client_session(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_close_client_session(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_register_shared_memory(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_release_shared_memory(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_invoke_client_command(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_cancel_client_command(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+/*----------------------------------------------------------------------------
+ * Connection initialization and cleanup operations
+ *----------------------------------------------------------------------------*/
+
+int tf_open(struct tf_device *dev,
+ struct file *file,
+ struct tf_connection **connection);
+
+void tf_close(
+ struct tf_connection *connection);
+
+
+#endif /* !defined(__TF_CONN_H__) */
diff --git a/security/tf_driver/tf_defs.h b/security/tf_driver/tf_defs.h
new file mode 100644
index 000000000000..7ec4978c63ef
--- /dev/null
+++ b/security/tf_driver/tf_defs.h
@@ -0,0 +1,547 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_DEFS_H__
+#define __TF_DEFS_H__
+
+#include <linux/atomic.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+
+#include "tf_protocol.h"
+
+#ifdef CONFIG_TF_ION
+#include <linux/ion.h>
+#include <linux/omap_ion.h>
+#endif
+
+/*----------------------------------------------------------------------------*/
+
+#define SIZE_1KB 0x400
+
+/*
+ * Maximum number of shared memory blocks that can be reigsters in a connection
+ */
+#define TF_SHMEM_MAX_COUNT (64)
+
+/*
+ * Describes the possible types of shared memories
+ *
+ * TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM :
+ * The descriptor describes a registered shared memory.
+ * Its coarse pages are preallocated when initializing the
+ * connection
+ * TF_SHMEM_TYPE_REGISTERED_SHMEM :
+ * The descriptor describes a registered shared memory.
+ * Its coarse pages are not preallocated
+ * TF_SHMEM_TYPE_PM_HIBERNATE :
+ * The descriptor describes a power management shared memory.
+ */
+enum TF_SHMEM_TYPE {
+ TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM = 0,
+ TF_SHMEM_TYPE_REGISTERED_SHMEM,
+ TF_SHMEM_TYPE_PM_HIBERNATE,
+};
+
+
+/*
+ * This structure contains a pointer on a coarse page table
+ */
+struct tf_coarse_page_table {
+ /*
+ * Identifies the coarse page table descriptor in
+ * free_coarse_page_tables list
+ */
+ struct list_head list;
+
+ /*
+ * The address of the coarse page table
+ */
+ u32 *descriptors;
+
+ /*
+ * The address of the array containing this coarse page table
+ */
+ struct tf_coarse_page_table_array *parent;
+};
+
+
+#define TF_PAGE_DESCRIPTOR_TYPE_NORMAL 0
+#define TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED 1
+
+/*
+ * This structure describes an array of up to 4 coarse page tables
+ * allocated within a single 4KB page.
+ */
+struct tf_coarse_page_table_array {
+ /*
+ * identifies the element in the coarse_page_table_arrays list
+ */
+ struct list_head list;
+
+ /*
+ * Type of page descriptor
+ * can take any of TF_PAGE_DESCRIPTOR_TYPE_XXX value
+ */
+ u32 type;
+
+ struct tf_coarse_page_table coarse_page_tables[4];
+
+ /*
+ * A counter of the number of coarse pages currently used
+ * the max value should be 4 (one coarse page table is 1KB while one
+ * page is 4KB)
+ */
+ u8 ref_count;
+};
+
+
+/*
+ * This structure describes a list of coarse page table arrays
+ * with some of the coarse page tables free. It is used
+ * when the driver needs to allocate a new coarse page
+ * table.
+ */
+struct tf_coarse_page_table_allocation_context {
+ /*
+ * The spin lock protecting concurrent access to the structure.
+ */
+ spinlock_t lock;
+
+ /*
+ * The list of allocated coarse page table arrays
+ */
+ struct list_head coarse_page_table_arrays;
+
+ /*
+ * The list of free coarse page tables
+ */
+ struct list_head free_coarse_page_tables;
+};
+
+
+/*
+ * Fully describes a shared memory block
+ */
+struct tf_shmem_desc {
+ /*
+ * Identifies the shared memory descriptor in the list of free shared
+ * memory descriptors
+ */
+ struct list_head list;
+
+ /*
+ * Identifies the type of shared memory descriptor
+ */
+ enum TF_SHMEM_TYPE type;
+
+ /*
+ * The identifier of the block of shared memory, as returned by the
+ * Secure World.
+ * This identifier is block field of a REGISTER_SHARED_MEMORY answer
+ */
+ u32 block_identifier;
+
+ /* Client buffer */
+ u8 *client_buffer;
+
+ /* Up to eight coarse page table context */
+ struct tf_coarse_page_table *coarse_pg_table[TF_MAX_COARSE_PAGES];
+
+ u32 coarse_pg_table_count;
+
+ /* Reference counter */
+ atomic_t ref_count;
+};
+
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * This structure describes the communication with the Secure World
+ *
+ * Note that this driver supports only one instance of the Secure World
+ */
+struct tf_comm {
+ /*
+ * The spin lock protecting concurrent access to the structure.
+ */
+ spinlock_t lock;
+
+ /*
+ * Bit vector with the following possible flags:
+ * - TF_COMM_FLAG_IRQ_REQUESTED: If set, indicates that
+ * the IRQ has been successfuly requested.
+ * - TF_COMM_FLAG_TERMINATING: If set, indicates that the
+ * communication with the Secure World is being terminated.
+ * Transmissions to the Secure World are not permitted
+ * - TF_COMM_FLAG_W3B_ALLOCATED: If set, indicates that the
+ * W3B buffer has been allocated.
+ *
+ * This bit vector must be accessed with the kernel's atomic bitwise
+ * operations.
+ */
+ unsigned long flags;
+
+ /*
+ * The virtual address of the L1 shared buffer.
+ */
+ struct tf_l1_shared_buffer *l1_buffer;
+
+ /*
+ * The wait queue the client threads are waiting on.
+ */
+ wait_queue_head_t wait_queue;
+
+#ifdef CONFIG_TF_TRUSTZONE
+ /*
+ * The interrupt line used by the Secure World.
+ */
+ int soft_int_irq;
+
+ /* ----- W3B ----- */
+ /* shared memory descriptor to identify the W3B */
+ struct tf_shmem_desc w3b_shmem_desc;
+
+ /* Virtual address of the kernel allocated shared memory */
+ u32 w3b;
+
+ /* offset of data in shared memory coarse pages */
+ u32 w3b_shmem_offset;
+
+ u32 w3b_shmem_size;
+
+ struct tf_coarse_page_table_allocation_context
+ w3b_cpt_alloc_context;
+#endif
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * The SE SDP can only be initialized once...
+ */
+ int se_initialized;
+
+ /*
+ * Lock to be held by a client when executing an RPC
+ */
+ struct mutex rpc_mutex;
+
+ /*
+ * Lock to protect concurrent accesses to DMA channels
+ */
+ struct mutex dma_mutex;
+#endif
+};
+
+
+#define TF_COMM_FLAG_IRQ_REQUESTED (0)
+#define TF_COMM_FLAG_PA_AVAILABLE (1)
+#define TF_COMM_FLAG_TERMINATING (2)
+#define TF_COMM_FLAG_W3B_ALLOCATED (3)
+#define TF_COMM_FLAG_L1_SHARED_ALLOCATED (4)
+
+/*----------------------------------------------------------------------------*/
+
+struct tf_device_stats {
+ atomic_t stat_pages_allocated;
+ atomic_t stat_memories_allocated;
+ atomic_t stat_pages_locked;
+};
+
+/*
+ * This structure describes the information about one device handled by the
+ * driver. Note that the driver supports only a single device. see the global
+ * variable g_tf_dev
+
+ */
+struct tf_device {
+ /*
+ * The kernel object for the device
+ */
+ struct kobject kobj;
+
+ /*
+ * The device number for the device.
+ */
+ dev_t dev_number;
+
+ /*
+ * Interfaces the char device with the kernel.
+ */
+ struct cdev cdev;
+
+#ifdef CONFIG_TF_TEEC
+ struct cdev cdev_teec;
+#endif
+
+#ifdef CONFIG_TF_ZEBRA
+ struct cdev cdev_ctrl;
+
+ /*
+ * Globals for CUS
+ */
+ /* Current key handles loaded in HWAs */
+ u32 aes1_key_context;
+ u32 des_key_context;
+ bool sham1_is_public;
+
+ /* Object used to serialize HWA accesses */
+ struct semaphore aes1_sema;
+ struct semaphore des_sema;
+ struct semaphore sha_sema;
+
+ /*
+ * An aligned and correctly shaped pre-allocated buffer used for DMA
+ * transfers
+ */
+ u32 dma_buffer_length;
+ u8 *dma_buffer;
+ dma_addr_t dma_buffer_phys;
+
+ /* Workspace allocated at boot time and reserved to the Secure World */
+ u32 workspace_addr;
+ u32 workspace_size;
+
+ /*
+ * A Mutex to provide exclusive locking of the ioctl()
+ */
+ struct mutex dev_mutex;
+#endif
+
+ /*
+ * Communications with the SM.
+ */
+ struct tf_comm sm;
+
+ /*
+ * Lists the connections attached to this device. A connection is
+ * created each time a user space application "opens" a file descriptor
+ * on the driver
+ */
+ struct list_head connection_list;
+
+ /*
+ * The spin lock used to protect concurrent access to the connection
+ * list.
+ */
+ spinlock_t connection_list_lock;
+
+ struct tf_device_stats stats;
+};
+
+/*----------------------------------------------------------------------------*/
+/*
+ * This type describes a connection state.
+ * This is used to determine whether a message is valid or not.
+ *
+ * Messages are only valid in a certain device state.
+ * Messages may be invalidated between the start of the ioctl call and the
+ * moment the message is sent to the Secure World.
+ *
+ * TF_CONN_STATE_NO_DEVICE_CONTEXT :
+ * The connection has no DEVICE_CONTEXT created and no
+ * CREATE_DEVICE_CONTEXT being processed by the Secure World
+ * TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT :
+ * The connection has a CREATE_DEVICE_CONTEXT being processed by the Secure
+ * World
+ * TF_CONN_STATE_VALID_DEVICE_CONTEXT :
+ * The connection has a DEVICE_CONTEXT created and no
+ * DESTROY_DEVICE_CONTEXT is being processed by the Secure World
+ * TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT :
+ * The connection has a DESTROY_DEVICE_CONTEXT being processed by the Secure
+ * World
+ */
+enum TF_CONN_STATE {
+ TF_CONN_STATE_NO_DEVICE_CONTEXT = 0,
+ TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT,
+ TF_CONN_STATE_VALID_DEVICE_CONTEXT,
+ TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT
+};
+
+
+/*
+ * This type describes the status of the command.
+ *
+ * PENDING:
+ * The initial state; the command has not been sent yet.
+ * SENT:
+ * The command has been sent, we are waiting for an answer.
+ * ABORTED:
+ * The command cannot be sent because the device context is invalid.
+ * Note that this only covers the case where some other thread
+ * sent a DESTROY_DEVICE_CONTEXT command.
+ */
+enum TF_COMMAND_STATE {
+ TF_COMMAND_STATE_PENDING = 0,
+ TF_COMMAND_STATE_SENT,
+ TF_COMMAND_STATE_ABORTED
+};
+
+/*
+ * The origin of connection parameters such as login data and
+ * memory reference pointers.
+ *
+ * PROCESS: the calling process. All arguments must be validated.
+ * KERNEL: kernel code. All arguments can be trusted by this driver.
+ */
+enum TF_CONNECTION_OWNER {
+ TF_CONNECTION_OWNER_PROCESS = 0,
+ TF_CONNECTION_OWNER_KERNEL,
+};
+
+
+/*
+ * This structure describes a connection to the driver
+ * A connection is created each time an application opens a file descriptor on
+ * the driver
+ */
+struct tf_connection {
+ /*
+ * Identifies the connection in the list of the connections attached to
+ * the same device.
+ */
+ struct list_head list;
+
+ /*
+ * State of the connection.
+ */
+ enum TF_CONN_STATE state;
+
+ /*
+ * A pointer to the corresponding device structure
+ */
+ struct tf_device *dev;
+
+ /*
+ * A spinlock to use to access state
+ */
+ spinlock_t state_lock;
+
+ /*
+ * Counts the number of operations currently pending on the connection.
+ * (for debug only)
+ */
+ atomic_t pending_op_count;
+
+ /*
+ * A handle for the device context
+ */
+ u32 device_context;
+
+ /*
+ * Lists the used shared memory descriptors
+ */
+ struct list_head used_shmem_list;
+
+ /*
+ * Lists the free shared memory descriptors
+ */
+ struct list_head free_shmem_list;
+
+ /*
+ * A mutex to use to access this structure
+ */
+ struct mutex shmem_mutex;
+
+ /*
+ * Counts the number of shared memories registered.
+ */
+ atomic_t shmem_count;
+
+ /*
+ * Page to retrieve memory properties when
+ * registering shared memory through REGISTER_SHARED_MEMORY
+ * messages
+ */
+ struct vm_area_struct **vmas;
+
+ /*
+ * coarse page table allocation context
+ */
+ struct tf_coarse_page_table_allocation_context cpt_alloc_context;
+
+ /* The origin of connection parameters such as login data and
+ memory reference pointers. */
+ enum TF_CONNECTION_OWNER owner;
+
+#ifdef CONFIG_TF_ZEBRA
+ /* Lists all the Cryptoki Update Shortcuts */
+ struct list_head shortcut_list;
+
+ /* Lock to protect concurrent accesses to shortcut_list */
+ spinlock_t shortcut_list_lock;
+#endif
+
+#ifdef CONFIG_TF_ION
+ struct ion_client *ion_client;
+#endif
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * The operation_id field of a message points to this structure.
+ * It is used to identify the thread that triggered the message transmission
+ * Whoever reads an answer can wake up that thread using the completion event
+ */
+struct tf_answer_struct {
+ bool answer_copied;
+ union tf_answer *answer;
+};
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * The ASCII-C string representation of the base name of the devices managed by
+ * this driver.
+ */
+#define TF_DEVICE_BASE_NAME "tf_driver"
+
+
+/**
+ * The major and minor numbers of the registered character device driver.
+ * Only 1 instance of the driver is supported.
+ */
+#define TF_DEVICE_MINOR_NUMBER (0)
+
+struct tf_device *tf_get_device(void);
+
+#define CLEAN_CACHE_CFG_MASK (~0xC) /* 1111 0011 */
+
+/*----------------------------------------------------------------------------*/
+/*
+ * Kernel Differences
+ */
+
+#ifdef CONFIG_ANDROID
+#define GROUP_INFO get_current_groups()
+#else
+#define GROUP_INFO (current->group_info)
+#endif
+
+#endif /* !defined(__TF_DEFS_H__) */
diff --git a/security/tf_driver/tf_device.c b/security/tf_driver/tf_device.c
new file mode 100644
index 000000000000..9db0a41a6fde
--- /dev/null
+++ b/security/tf_driver/tf_device.c
@@ -0,0 +1,873 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * Copyright (C) 2011-2012 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/pm.h>
+#include <linux/syscore_ops.h>
+#include <linux/vmalloc.h>
+#include <linux/signal.h>
+#ifdef CONFIG_ANDROID
+#include <linux/device.h>
+#endif
+
+#include <trace/events/nvsecurity.h>
+
+#include "tf_protocol.h"
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_conn.h"
+#include "tf_comm.h"
+#ifdef CONFIG_TF_ZEBRA
+#include <plat/cpu.h>
+#include "tf_zebra.h"
+#endif
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+#include "tf_crypto.h"
+#endif
+
+#include "s_version.h"
+
+/*----------------------------------------------------------------------------
+ * Forward Declarations
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Creates and registers the device to be managed by the specified driver.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static int tf_device_register(void);
+
+
+/*
+ * Implements the device Open callback.
+ */
+static int tf_device_open(
+ struct inode *inode,
+ struct file *file);
+
+
+/*
+ * Implements the device Release callback.
+ */
+static int tf_device_release(
+ struct inode *inode,
+ struct file *file);
+
+
+/*
+ * Implements the device ioctl callback.
+ */
+static long tf_device_ioctl(
+ struct file *file,
+ unsigned int ioctl_num,
+ unsigned long ioctl_param);
+
+
+/*
+ * Implements the device shutdown callback.
+ */
+static void tf_device_shutdown(void);
+
+
+/*
+ * Implements the device suspend callback.
+ */
+static int tf_device_suspend(void);
+
+
+/*
+ * Implements the device resume callback.
+ */
+static void tf_device_resume(void);
+
+
+/*---------------------------------------------------------------------------
+ * Module Parameters
+ *---------------------------------------------------------------------------*/
+
+/*
+ * The device major number used to register a unique character device driver.
+ * Let the default value be 122
+ */
+static int device_major_number = 122;
+
+module_param(device_major_number, int, 0000);
+MODULE_PARM_DESC(device_major_number,
+ "The device major number used to register a unique character "
+ "device driver");
+
+#ifdef CONFIG_TF_TRUSTZONE
+/**
+ * The softint interrupt line used by the Secure World.
+ */
+static int soft_interrupt = -1;
+
+module_param(soft_interrupt, int, 0000);
+MODULE_PARM_DESC(soft_interrupt,
+ "The softint interrupt line used by the Secure world");
+#endif
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+unsigned tf_debug_level = UINT_MAX;
+module_param_named(debug, tf_debug_level, uint, 0644);
+#endif
+
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+char *tf_integrity_hmac_sha256_expected_value;
+module_param_named(hmac_sha256, tf_integrity_hmac_sha256_expected_value,
+ charp, 0444);
+
+#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
+unsigned tf_fault_injection_mask;
+module_param_named(fault, tf_fault_injection_mask, uint, 0644);
+#endif
+
+int tf_self_test_blkcipher_align;
+module_param_named(post_align, tf_self_test_blkcipher_align, int, 0644);
+int tf_self_test_blkcipher_use_vmalloc;
+module_param_named(post_vmalloc, tf_self_test_blkcipher_use_vmalloc, int, 0644);
+#endif
+
+#ifdef CONFIG_ANDROID
+static struct class *tf_class;
+#endif
+
+/*----------------------------------------------------------------------------
+ * Global Variables
+ *----------------------------------------------------------------------------*/
+
+/*
+ * tf_driver character device definitions.
+ * read and write methods are not defined
+ * and will return an error if used by user space
+ */
+static const struct file_operations g_tf_device_file_ops = {
+ .owner = THIS_MODULE,
+ .open = tf_device_open,
+ .release = tf_device_release,
+ .unlocked_ioctl = tf_device_ioctl,
+ .llseek = no_llseek,
+};
+
+
+static struct syscore_ops g_tf_device_syscore_ops = {
+ .shutdown = tf_device_shutdown,
+ .suspend = tf_device_suspend,
+ .resume = tf_device_resume,
+};
+
+/* The single device supported by this driver */
+static struct tf_device g_tf_dev;
+
+/*----------------------------------------------------------------------------
+ * Implementations
+ *----------------------------------------------------------------------------*/
+
+struct tf_device *tf_get_device(void)
+{
+ return &g_tf_dev;
+}
+
+/*
+ * sysfs entries
+ */
+struct tf_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct tf_device *, char *);
+ ssize_t (*store)(struct tf_device *, const char *, size_t);
+};
+
+/*
+ * sysfs entry showing allocation stats
+ */
+static ssize_t info_show(struct tf_device *dev, char *buf)
+{
+ struct tf_device_stats *dev_stats = &dev->stats;
+
+ return snprintf(buf, PAGE_SIZE,
+ "stat.memories.allocated: %d\n"
+ "stat.pages.allocated: %d\n"
+ "stat.pages.locked: %d\n",
+ atomic_read(&dev_stats->stat_memories_allocated),
+ atomic_read(&dev_stats->stat_pages_allocated),
+ atomic_read(&dev_stats->stat_pages_locked));
+}
+static struct tf_sysfs_entry tf_info_entry = __ATTR_RO(info);
+
+#ifdef CONFIG_TF_ZEBRA
+/*
+ * sysfs entry showing whether secure world is up and running
+ */
+static ssize_t tf_started_show(struct tf_device *dev, char *buf)
+{
+ int tf_started = test_bit(TF_COMM_FLAG_PA_AVAILABLE,
+ &dev->sm.flags);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", tf_started ? "yes" : "no");
+}
+static struct tf_sysfs_entry tf_started_entry =
+ __ATTR_RO(tf_started);
+
+static ssize_t workspace_addr_show(struct tf_device *dev, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", dev->workspace_addr);
+}
+static struct tf_sysfs_entry tf_workspace_addr_entry =
+ __ATTR_RO(workspace_addr);
+
+static ssize_t workspace_size_show(struct tf_device *dev, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", dev->workspace_size);
+}
+static struct tf_sysfs_entry tf_workspace_size_entry =
+ __ATTR_RO(workspace_size);
+#endif
+
+static ssize_t tf_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *page)
+{
+ struct tf_sysfs_entry *entry = container_of(attr, struct tf_sysfs_entry,
+ attr);
+ struct tf_device *dev = container_of(kobj, struct tf_device, kobj);
+
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(dev, page);
+}
+
+static ssize_t tf_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *page, size_t length)
+{
+ struct tf_sysfs_entry *entry = container_of(attr, struct tf_sysfs_entry,
+ attr);
+ struct tf_device *dev = container_of(kobj, struct tf_device, kobj);
+
+ if (!entry->store)
+ return -EIO;
+
+ return entry->store(dev, page, length);
+}
+
+static void tf_kobj_release(struct kobject *kobj) {}
+
+static struct attribute *tf_default_attrs[] = {
+ &tf_info_entry.attr,
+#ifdef CONFIG_TF_ZEBRA
+ &tf_started_entry.attr,
+ &tf_workspace_addr_entry.attr,
+ &tf_workspace_size_entry.attr,
+#endif
+ NULL,
+};
+static const struct sysfs_ops tf_sysfs_ops = {
+ .show = tf_attr_show,
+ .store = tf_attr_store,
+};
+static struct kobj_type tf_ktype = {
+ .release = tf_kobj_release,
+ .sysfs_ops = &tf_sysfs_ops,
+ .default_attrs = tf_default_attrs
+};
+
+/*----------------------------------------------------------------------------*/
+
+#if defined(MODULE) && defined(CONFIG_TF_ZEBRA)
+static char *smc_mem;
+module_param(smc_mem, charp, S_IRUGO);
+#endif
+
+/*
+ * First routine called when the kernel module is loaded
+ */
+static int __init tf_device_register(void)
+{
+ int error;
+ struct tf_device *dev = &g_tf_dev;
+
+ dprintk(KERN_INFO "tf_device_register()\n");
+
+ /*
+ * Initialize the device
+ */
+ dev->dev_number = MKDEV(device_major_number,
+ TF_DEVICE_MINOR_NUMBER);
+ cdev_init(&dev->cdev, &g_tf_device_file_ops);
+ dev->cdev.owner = THIS_MODULE;
+
+ INIT_LIST_HEAD(&dev->connection_list);
+ spin_lock_init(&dev->connection_list_lock);
+
+#if defined(MODULE) && defined(CONFIG_TF_ZEBRA)
+ error = (*tf_comm_early_init)();
+ if (error)
+ goto module_early_init_failed;
+
+ error = tf_device_mshield_init(smc_mem);
+ if (error)
+ goto mshield_init_failed;
+
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+ error = tf_crypto_hmac_module_init();
+ if (error)
+ goto hmac_init_failed;
+
+ error = tf_self_test_register_device();
+ if (error)
+ goto self_test_register_device_failed;
+#endif
+#endif
+
+ /* register the sysfs object driver stats */
+ error = kobject_init_and_add(&dev->kobj, &tf_ktype, NULL, "%s",
+ TF_DEVICE_BASE_NAME);
+ if (error) {
+ printk(KERN_ERR "tf_device_register(): "
+ "kobject_init_and_add failed (error %d)!\n", error);
+ kobject_put(&dev->kobj);
+ goto kobject_init_and_add_failed;
+ }
+
+ /*
+ * Register the system device.
+ */
+ register_syscore_ops(&g_tf_device_syscore_ops);
+
+ /*
+ * Register the char device.
+ */
+ printk(KERN_INFO "Registering char device %s (%u:%u)\n",
+ TF_DEVICE_BASE_NAME,
+ MAJOR(dev->dev_number),
+ MINOR(dev->dev_number));
+ error = register_chrdev_region(dev->dev_number, 1,
+ TF_DEVICE_BASE_NAME);
+ if (error != 0) {
+ printk(KERN_ERR "tf_device_register():"
+ " register_chrdev_region failed (error %d)!\n",
+ error);
+ goto register_chrdev_region_failed;
+ }
+
+ error = cdev_add(&dev->cdev, dev->dev_number, 1);
+ if (error != 0) {
+ printk(KERN_ERR "tf_device_register(): "
+ "cdev_add failed (error %d)!\n",
+ error);
+ goto cdev_add_failed;
+ }
+
+ /*
+ * Initialize the communication with the Secure World.
+ */
+#ifdef CONFIG_TF_TRUSTZONE
+ dev->sm.soft_int_irq = soft_interrupt;
+#endif
+ error = tf_init(&g_tf_dev.sm);
+ if (error != S_SUCCESS) {
+ dprintk(KERN_ERR "tf_device_register(): "
+ "tf_init failed (error %d)!\n",
+ error);
+ goto init_failed;
+ }
+
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+ error = tf_self_test_post_init(&(g_tf_dev.kobj));
+ /* N.B. error > 0 indicates a POST failure, which will not
+ prevent the module from loading. */
+ if (error < 0) {
+ dprintk(KERN_ERR "tf_device_register(): "
+ "tf_self_test_post_vectors failed (error %d)!\n",
+ error);
+ goto post_failed;
+ }
+#endif
+
+#ifdef CONFIG_ANDROID
+ tf_class = class_create(THIS_MODULE, TF_DEVICE_BASE_NAME);
+ device_create(tf_class, NULL,
+ dev->dev_number,
+ NULL, TF_DEVICE_BASE_NAME);
+#endif
+
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * Initializes the /dev/tf_ctrl device node.
+ */
+ error = tf_ctrl_device_register();
+ if (error)
+ goto ctrl_failed;
+#endif
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+ address_cache_property((unsigned long) &tf_device_register);
+#endif
+ /*
+ * Successful completion.
+ */
+
+ dprintk(KERN_INFO "tf_device_register(): Success\n");
+ return 0;
+
+ /*
+ * Error: undo all operations in the reverse order
+ */
+#ifdef CONFIG_TF_ZEBRA
+ctrl_failed:
+#endif
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+ tf_self_test_post_exit();
+post_failed:
+#endif
+init_failed:
+ cdev_del(&dev->cdev);
+cdev_add_failed:
+ unregister_chrdev_region(dev->dev_number, 1);
+register_chrdev_region_failed:
+ unregister_syscore_ops(&g_tf_device_syscore_ops);
+kobject_init_and_add_failed:
+ kobject_del(&g_tf_dev.kobj);
+
+#if defined(MODULE) && defined(CONFIG_TF_ZEBRA)
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+ tf_self_test_unregister_device();
+self_test_register_device_failed:
+ tf_crypto_hmac_module_exit();
+hmac_init_failed:
+#endif
+ tf_device_mshield_exit();
+mshield_init_failed:
+module_early_init_failed:
+#endif
+ dprintk(KERN_INFO "tf_device_register(): Failure (error %d)\n",
+ error);
+ return error;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_open(struct inode *inode, struct file *file)
+{
+ int error;
+ struct tf_device *dev = &g_tf_dev;
+ struct tf_connection *connection = NULL;
+
+ dprintk(KERN_INFO "tf_device_open(%u:%u, %p)\n",
+ imajor(inode), iminor(inode), file);
+
+ /* Dummy lseek for non-seekable driver */
+ error = nonseekable_open(inode, file);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_device_open(%p): "
+ "nonseekable_open failed (error %d)!\n",
+ file, error);
+ goto error;
+ }
+
+#ifndef CONFIG_ANDROID
+ /*
+ * Check file flags. We only autthorize the O_RDWR access
+ */
+ if (file->f_flags != O_RDWR) {
+ dprintk(KERN_ERR "tf_device_open(%p): "
+ "Invalid access mode %u\n",
+ file, file->f_flags);
+ error = -EACCES;
+ goto error;
+ }
+#endif
+
+ /*
+ * Open a new connection.
+ */
+
+ error = tf_open(dev, file, &connection);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_device_open(%p): "
+ "tf_open failed (error %d)!\n",
+ file, error);
+ goto error;
+ }
+
+ file->private_data = connection;
+
+ /*
+ * Send the CreateDeviceContext command to the secure
+ */
+ error = tf_create_device_context(connection);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_device_open(%p): "
+ "tf_create_device_context failed (error %d)!\n",
+ file, error);
+ goto error1;
+ }
+
+ /*
+ * Successful completion.
+ */
+
+ dprintk(KERN_INFO "tf_device_open(%p): Success (connection=%p)\n",
+ file, connection);
+ return 0;
+
+ /*
+ * Error handling.
+ */
+
+error1:
+ tf_close(connection);
+error:
+ dprintk(KERN_INFO "tf_device_open(%p): Failure (error %d)\n",
+ file, error);
+ return error;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_release(struct inode *inode, struct file *file)
+{
+ struct tf_connection *connection;
+
+ dprintk(KERN_INFO "tf_device_release(%u:%u, %p)\n",
+ imajor(inode), iminor(inode), file);
+
+ connection = tf_conn_from_file(file);
+ tf_close(connection);
+
+ dprintk(KERN_INFO "tf_device_release(%p): Success\n", file);
+ return 0;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static long tf_device_ioctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ int result = S_SUCCESS;
+ struct tf_connection *connection;
+ union tf_command command;
+ struct tf_command_header header;
+ union tf_answer answer;
+ u32 command_size;
+ u32 answer_size;
+ void *user_answer;
+
+ dprintk(KERN_INFO "tf_device_ioctl(%p, %u, %p)\n",
+ file, ioctl_num, (void *) ioctl_param);
+
+ switch (ioctl_num) {
+ case IOCTL_TF_GET_VERSION:
+ /* ioctl is asking for the driver interface version */
+ result = TF_DRIVER_INTERFACE_VERSION;
+ goto exit;
+
+#ifdef CONFIG_TF_ION
+ case IOCTL_TF_ION_REGISTER: {
+ int ion_register;
+ /* ioctl is asking to register an ion handle */
+ if (copy_from_user(&ion_register,
+ (int *) ioctl_param,
+ sizeof(int))) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "copy_from_user failed\n",
+ file);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ connection = tf_conn_from_file(file);
+ BUG_ON(connection == NULL);
+
+ /* Initialize ION connection */
+ if (connection->ion_client == NULL) {
+ connection->ion_client = ion_client_create(
+ zebra_ion_device,
+ (1 << ION_HEAP_TYPE_CARVEOUT),
+ "tf");
+ }
+
+ if (connection->ion_client == NULL) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "unable to create ion client\n",
+ file);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ /*
+ * TODO: We should use a reference count on this handle in order
+ * to not unregistered it while using it.
+ */
+ return (long)ion_import_fd(connection->ion_client, ion_register);
+ }
+
+ case IOCTL_TF_ION_UNREGISTER: {
+ int ion_register;
+ /* ioctl is asking to unregister an ion handle */
+
+ if (copy_from_user(&ion_register,
+ (int *) ioctl_param,
+ sizeof(int))) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "copy_from_user failed\n",
+ file);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ connection = tf_conn_from_file(file);
+ BUG_ON(connection == NULL);
+
+ if (connection->ion_client == NULL) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "ion client does not exist\n",
+ file);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ ion_free(connection->ion_client,
+ (struct ion_handle *) ion_register);
+
+ return S_SUCCESS;
+ }
+#endif
+
+ case IOCTL_TF_EXCHANGE:
+ /*
+ * ioctl is asking to perform a message exchange with the Secure
+ * Module
+ */
+
+ /*
+ * Make a local copy of the data from the user application
+ * This routine checks the data is readable
+ *
+ * Get the header first.
+ */
+ if (copy_from_user(&header,
+ (struct tf_command_header *)ioctl_param,
+ sizeof(struct tf_command_header))) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Cannot access ioctl parameter %p\n",
+ file, (void *) ioctl_param);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ /* size in words of u32 */
+ command_size = header.message_size +
+ sizeof(struct tf_command_header)/sizeof(u32);
+ if (command_size > sizeof(command)/sizeof(u32)) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Buffer overflow: too many bytes to copy %d\n",
+ file, command_size);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ if (copy_from_user(&command,
+ (union tf_command *)ioctl_param,
+ command_size * sizeof(u32))) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Cannot access ioctl parameter %p\n",
+ file, (void *) ioctl_param);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ connection = tf_conn_from_file(file);
+ BUG_ON(connection == NULL);
+
+ /*
+ * The answer memory space address is in the operation_id field
+ */
+ user_answer = (void *) command.header.operation_id;
+
+ atomic_inc(&(connection->pending_op_count));
+
+ dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+ "Sending message type 0x%08x\n",
+ file, command.header.message_type);
+
+ switch (command.header.message_type) {
+ case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+ result = tf_open_client_session(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+ result = tf_close_client_session(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+ result = tf_register_shared_memory(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+ result = tf_release_shared_memory(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+ trace_invoke_client_command(NVSEC_INVOKE_CMD_START);
+ result = tf_invoke_client_command(connection,
+ &command, &answer);
+ trace_invoke_client_command(NVSEC_INVOKE_CMD_DONE);
+ break;
+
+ case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+ result = tf_cancel_client_command(connection,
+ &command, &answer);
+ break;
+
+ default:
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Incorrect message type (0x%08x)!\n",
+ connection, command.header.message_type);
+ result = -EOPNOTSUPP;
+ break;
+ }
+
+ atomic_dec(&(connection->pending_op_count));
+
+ if (result != 0) {
+ dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+ "Operation returning error code 0x%08x)!\n",
+ file, result);
+ goto exit;
+ }
+
+ /*
+ * Copy the answer back to the user space application.
+ * The driver does not check this field, only copy back to user
+ * space the data handed over by Secure World
+ */
+ answer_size = answer.header.message_size +
+ sizeof(struct tf_answer_header)/sizeof(u32);
+ if (copy_to_user(user_answer,
+ &answer, answer_size * sizeof(u32))) {
+ dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+ "Failed to copy back the full command "
+ "answer to %p\n", file, user_answer);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_device_ioctl(%p): Success\n", file);
+ break;
+
+ case IOCTL_TF_GET_DESCRIPTION: {
+ /* ioctl asking for the version information buffer */
+ struct tf_version_information_buffer *pInfoBuffer;
+
+ dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION:(%p, %u, %p)\n",
+ file, ioctl_num, (void *) ioctl_param);
+
+ pInfoBuffer =
+ ((struct tf_version_information_buffer *) ioctl_param);
+
+ dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION1: "
+ "driver_description=\"%64s\"\n", S_VERSION_STRING);
+
+ if (copy_to_user(pInfoBuffer->driver_description,
+ S_VERSION_STRING,
+ strlen(S_VERSION_STRING) + 1)) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Fail to copy back the driver description "
+ "to %p\n",
+ file, pInfoBuffer->driver_description);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION2: "
+ "secure_world_description=\"%64s\"\n",
+ tf_get_description(&g_tf_dev.sm));
+
+ if (copy_to_user(pInfoBuffer->secure_world_description,
+ tf_get_description(&g_tf_dev.sm),
+ TF_DESCRIPTION_BUFFER_LENGTH)) {
+ dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+ "Failed to copy back the secure world "
+ "description to %p\n",
+ file, pInfoBuffer->secure_world_description);
+ result = -EFAULT;
+ goto exit;
+ }
+ break;
+ }
+
+ default:
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Unknown IOCTL code 0x%08x!\n",
+ file, ioctl_num);
+ result = -EOPNOTSUPP;
+ goto exit;
+ }
+
+exit:
+ return result;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static void tf_device_shutdown(void)
+{
+ if (0 > tf_power_management(&g_tf_dev.sm, TF_POWER_OPERATION_SHUTDOWN))
+ dprintk(KERN_ERR "tf_device_shutdown failing\n");
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_suspend(void)
+{
+ dprintk(KERN_INFO "tf_device_suspend: Enter\n");
+ return tf_power_management(&g_tf_dev.sm,
+ TF_POWER_OPERATION_HIBERNATE);
+}
+
+
+/*----------------------------------------------------------------------------*/
+
+static void tf_device_resume(void)
+{
+ if (0 > tf_power_management(&g_tf_dev.sm, TF_POWER_OPERATION_RESUME))
+ dprintk(KERN_ERR "tf_device_resume failing\n");
+}
+
+
+/*----------------------------------------------------------------------------*/
+
+module_init(tf_device_register);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Trusted Logic S.A.");
diff --git a/security/tf_driver/tf_protocol.h b/security/tf_driver/tf_protocol.h
new file mode 100644
index 000000000000..cd2300df6e02
--- /dev/null
+++ b/security/tf_driver/tf_protocol.h
@@ -0,0 +1,699 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_PROTOCOL_H__
+#define __TF_PROTOCOL_H__
+
+/*----------------------------------------------------------------------------
+ *
+ * This header file defines the structure used in the SChannel Protocol.
+ * See your Product Reference Manual for a specification of the SChannel
+ * protocol.
+ *---------------------------------------------------------------------------*/
+
+/*
+ * The driver interface version returned by the version ioctl
+ */
+#define TF_DRIVER_INTERFACE_VERSION 0x04000000
+
+/*
+ * Protocol version handling
+ */
+#define TF_S_PROTOCOL_MAJOR_VERSION (0x06)
+#define GET_PROTOCOL_MAJOR_VERSION(a) (a >> 24)
+#define GET_PROTOCOL_MINOR_VERSION(a) ((a >> 16) & 0xFF)
+
+/*
+ * The S flag of the config_flag_s register.
+ */
+#define TF_CONFIG_FLAG_S (1 << 3)
+
+/*
+ * The TimeSlot field of the sync_serial_n register.
+ */
+#define TF_SYNC_SERIAL_TIMESLOT_N (1)
+
+/*
+ * status_s related defines.
+ */
+#define TF_STATUS_P_MASK (0X00000001)
+#define TF_STATUS_POWER_STATE_SHIFT (3)
+#define TF_STATUS_POWER_STATE_MASK (0x1F << TF_STATUS_POWER_STATE_SHIFT)
+
+/*
+ * Possible power states of the POWER_STATE field of the status_s register
+ */
+#define TF_POWER_MODE_COLD_BOOT (0)
+#define TF_POWER_MODE_WARM_BOOT (1)
+#define TF_POWER_MODE_ACTIVE (3)
+#define TF_POWER_MODE_READY_TO_SHUTDOWN (5)
+#define TF_POWER_MODE_READY_TO_HIBERNATE (7)
+#define TF_POWER_MODE_WAKEUP (8)
+#define TF_POWER_MODE_PANIC (15)
+
+/*
+ * Possible command values for MANAGEMENT commands
+ */
+#define TF_MANAGEMENT_HIBERNATE (1)
+#define TF_MANAGEMENT_SHUTDOWN (2)
+#define TF_MANAGEMENT_PREPARE_FOR_CORE_OFF (3)
+#define TF_MANAGEMENT_RESUME_FROM_CORE_OFF (4)
+
+/*
+ * The capacity of the Normal Word message queue, in number of slots.
+ */
+#define TF_N_MESSAGE_QUEUE_CAPACITY (512)
+
+/*
+ * The capacity of the Secure World message answer queue, in number of slots.
+ */
+#define TF_S_ANSWER_QUEUE_CAPACITY (256)
+
+/*
+ * The value of the S-timeout register indicating an infinite timeout.
+ */
+#define TF_S_TIMEOUT_0_INFINITE (0xFFFFFFFF)
+#define TF_S_TIMEOUT_1_INFINITE (0xFFFFFFFF)
+
+/*
+ * The value of the S-timeout register indicating an immediate timeout.
+ */
+#define TF_S_TIMEOUT_0_IMMEDIATE (0x0)
+#define TF_S_TIMEOUT_1_IMMEDIATE (0x0)
+
+/*
+ * Identifies the get protocol version SMC.
+ */
+#define TF_SMC_GET_PROTOCOL_VERSION (0XFFFFFFFB)
+
+/*
+ * Identifies the init SMC.
+ */
+#define TF_SMC_INIT (0XFFFFFFFF)
+
+/*
+ * Identifies the reset irq SMC.
+ */
+#define TF_SMC_RESET_IRQ (0xFFFFFFFE)
+
+/*
+ * Identifies the SET_W3B SMC.
+ */
+#define TF_SMC_WAKE_UP (0xFFFFFFFD)
+
+/*
+ * Identifies the STOP SMC.
+ */
+#define TF_SMC_STOP (0xFFFFFFFC)
+
+/*
+ * Identifies the n-yield SMC.
+ */
+#define TF_SMC_N_YIELD (0X00000003)
+
+
+/* Possible stop commands for SMC_STOP */
+#define SCSTOP_HIBERNATE (0xFFFFFFE1)
+#define SCSTOP_SHUTDOWN (0xFFFFFFE2)
+
+/*
+ * representation of an UUID.
+ */
+struct tf_uuid {
+ u32 time_low;
+ u16 time_mid;
+ u16 time_hi_and_version;
+ u8 clock_seq_and_node[8];
+};
+
+
+/**
+ * Command parameters.
+ */
+struct tf_command_param_value {
+ u32 a;
+ u32 b;
+};
+
+struct tf_command_param_temp_memref {
+ u32 descriptor; /* data pointer for exchange message.*/
+ u32 size;
+ u32 offset;
+};
+
+struct tf_command_param_memref {
+ u32 block;
+ u32 size;
+ u32 offset;
+};
+
+union tf_command_param {
+ struct tf_command_param_value value;
+ struct tf_command_param_temp_memref temp_memref;
+ struct tf_command_param_memref memref;
+};
+
+/**
+ * Answer parameters.
+ */
+struct tf_answer_param_value {
+ u32 a;
+ u32 b;
+};
+
+struct tf_answer_param_size {
+ u32 _ignored;
+ u32 size;
+};
+
+union tf_answer_param {
+ struct tf_answer_param_size size;
+ struct tf_answer_param_value value;
+};
+
+/*
+ * Descriptor tables capacity
+ */
+#define TF_MAX_W3B_COARSE_PAGES (2)
+/* TF_MAX_COARSE_PAGES is the number of level 1 descriptors (describing
+ * 1MB each) that can be shared with the secure world in a single registered
+ * shared memory block. It must be kept in synch with
+ * SCHANNEL6_MAX_DESCRIPTORS_PER_REGISTERED_SHARED_MEM in the SChannel
+ * protocol spec. */
+#define TF_MAX_COARSE_PAGES 128
+#define TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT (8)
+#define TF_DESCRIPTOR_TABLE_CAPACITY \
+ (1 << TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT)
+#define TF_DESCRIPTOR_TABLE_CAPACITY_MASK \
+ (TF_DESCRIPTOR_TABLE_CAPACITY - 1)
+/* Shared memories coarse pages can map up to 1MB */
+#define TF_MAX_COARSE_PAGE_MAPPED_SIZE \
+ (PAGE_SIZE * TF_DESCRIPTOR_TABLE_CAPACITY)
+/* Shared memories cannot exceed 8MB */
+#define TF_MAX_SHMEM_SIZE \
+ (TF_MAX_COARSE_PAGE_MAPPED_SIZE << 3)
+
+/*
+ * Buffer size for version description fields
+ */
+#define TF_DESCRIPTION_BUFFER_LENGTH 64
+
+/*
+ * Shared memory type flags.
+ */
+#define TF_SHMEM_TYPE_READ (0x00000001)
+#define TF_SHMEM_TYPE_WRITE (0x00000002)
+
+/*
+ * Shared mem flags
+ */
+#define TF_SHARED_MEM_FLAG_INPUT 1
+#define TF_SHARED_MEM_FLAG_OUTPUT 2
+#define TF_SHARED_MEM_FLAG_INOUT 3
+
+
+/*
+ * Parameter types
+ */
+#define TF_PARAM_TYPE_NONE 0x0
+#define TF_PARAM_TYPE_VALUE_INPUT 0x1
+#define TF_PARAM_TYPE_VALUE_OUTPUT 0x2
+#define TF_PARAM_TYPE_VALUE_INOUT 0x3
+#define TF_PARAM_TYPE_MEMREF_TEMP_INPUT 0x5
+#define TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT 0x6
+#define TF_PARAM_TYPE_MEMREF_TEMP_INOUT 0x7
+#define TF_PARAM_TYPE_MEMREF_ION_HANDLE 0xB
+#define TF_PARAM_TYPE_MEMREF_INPUT 0xD
+#define TF_PARAM_TYPE_MEMREF_OUTPUT 0xE
+#define TF_PARAM_TYPE_MEMREF_INOUT 0xF
+
+#define TF_PARAM_TYPE_MEMREF_FLAG 0x4
+#define TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG 0x8
+
+
+#define TF_MAKE_PARAM_TYPES(t0, t1, t2, t3) \
+ ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
+#define TF_GET_PARAM_TYPE(t, i) (((t) >> (4 * i)) & 0xF)
+
+/*
+ * Login types.
+ */
+#define TF_LOGIN_PUBLIC 0x00000000
+#define TF_LOGIN_USER 0x00000001
+#define TF_LOGIN_GROUP 0x00000002
+#define TF_LOGIN_APPLICATION 0x00000004
+#define TF_LOGIN_APPLICATION_USER 0x00000005
+#define TF_LOGIN_APPLICATION_GROUP 0x00000006
+#define TF_LOGIN_AUTHENTICATION 0x80000000
+#define TF_LOGIN_PRIVILEGED 0x80000002
+
+/* Login variants */
+
+#define TF_LOGIN_VARIANT(main_type, os, variant) \
+ ((main_type) | (1 << 27) | ((os) << 16) | ((variant) << 8))
+
+#define TF_LOGIN_GET_MAIN_TYPE(type) \
+ ((type) & ~TF_LOGIN_VARIANT(0, 0xFF, 0xFF))
+
+#define TF_LOGIN_OS_ANY 0x00
+#define TF_LOGIN_OS_LINUX 0x01
+#define TF_LOGIN_OS_ANDROID 0x04
+
+/* OS-independent variants */
+#define TF_LOGIN_USER_NONE \
+ TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_ANY, 0xFF)
+#define TF_LOGIN_GROUP_NONE \
+ TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_ANY, 0xFF)
+#define TF_LOGIN_APPLICATION_USER_NONE \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_ANY, 0xFF)
+#define TF_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH \
+ TF_LOGIN_VARIANT(TF_LOGIN_AUTHENTICATION, TF_LOGIN_OS_ANY, 0x01)
+#define TF_LOGIN_PRIVILEGED_KERNEL \
+ TF_LOGIN_VARIANT(TF_LOGIN_PRIVILEGED, TF_LOGIN_OS_ANY, 0x01)
+
+/* Linux variants */
+#define TF_LOGIN_USER_LINUX_EUID \
+ TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_GROUP_LINUX_GID \
+ TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_GROUP, TF_LOGIN_OS_LINUX, 0x01)
+
+/* Android variants */
+#define TF_LOGIN_USER_ANDROID_EUID \
+ TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_ANDROID, 0x01)
+#define TF_LOGIN_GROUP_ANDROID_GID \
+ TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_ANDROID, 0x01)
+#define TF_LOGIN_APPLICATION_ANDROID_UID \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION, TF_LOGIN_OS_ANDROID, 0x01)
+#define TF_LOGIN_APPLICATION_USER_ANDROID_UID_EUID \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_ANDROID, \
+ 0x01)
+#define TF_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_GROUP, TF_LOGIN_OS_ANDROID, \
+ 0x01)
+
+/*
+ * return origins
+ */
+#define TF_ORIGIN_COMMS 2
+#define TF_ORIGIN_TEE 3
+#define TF_ORIGIN_TRUSTED_APP 4
+/*
+ * The message types.
+ */
+#define TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT 0x02
+#define TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT 0xFD
+#define TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY 0xF7
+#define TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY 0xF9
+#define TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION 0xF0
+#define TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION 0xF2
+#define TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND 0xF5
+#define TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND 0xF4
+#define TF_MESSAGE_TYPE_MANAGEMENT 0xFE
+
+
+/*
+ * The SChannel error codes.
+ */
+#define S_SUCCESS 0x00000000
+#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C
+
+
+struct tf_command_header {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info;
+ u32 operation_id;
+};
+
+struct tf_answer_header {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info;
+ u32 operation_id;
+ u32 error_code;
+};
+
+/*
+ * CREATE_DEVICE_CONTEXT command message.
+ */
+struct tf_command_create_device_context {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ u32 operation_id;
+ u32 device_context_id;
+};
+
+/*
+ * CREATE_DEVICE_CONTEXT answer message.
+ */
+struct tf_answer_create_device_context {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+ /* an opaque Normal World identifier for the device context */
+ u32 device_context;
+};
+
+/*
+ * DESTROY_DEVICE_CONTEXT command message.
+ */
+struct tf_command_destroy_device_context {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ u32 operation_id;
+ u32 device_context;
+};
+
+/*
+ * DESTROY_DEVICE_CONTEXT answer message.
+ */
+struct tf_answer_destroy_device_context {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+ u32 device_context_id;
+};
+
+/*
+ * OPEN_CLIENT_SESSION command message.
+ */
+struct tf_command_open_client_session {
+ u8 message_size;
+ u8 message_type;
+ u16 param_types;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 device_context;
+ u32 cancellation_id;
+ u64 timeout;
+ struct tf_uuid destination_uuid;
+ union tf_command_param params[4];
+ u32 login_type;
+ /*
+ * Size = 0 for public, [16] for group identification, [20] for
+ * authentication
+ */
+ u8 login_data[20];
+};
+
+/*
+ * OPEN_CLIENT_SESSION answer message.
+ */
+struct tf_answer_open_client_session {
+ u8 message_size;
+ u8 message_type;
+ u8 error_origin;
+ u8 __reserved;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+ u32 client_session;
+ union tf_answer_param answers[4];
+};
+
+/*
+ * CLOSE_CLIENT_SESSION command message.
+ */
+struct tf_command_close_client_session {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 device_context;
+ u32 client_session;
+};
+
+/*
+ * CLOSE_CLIENT_SESSION answer message.
+ */
+struct tf_answer_close_client_session {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+};
+
+
+/*
+ * REGISTER_SHARED_MEMORY command message
+ */
+struct tf_command_register_shared_memory {
+ u8 message_size;
+ u8 message_type;
+ u16 memory_flags;
+ u32 operation_id;
+ u32 device_context;
+ u32 block_id;
+ u32 shared_mem_size;
+ u32 shared_mem_start_offset;
+ u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
+};
+
+/*
+ * REGISTER_SHARED_MEMORY answer message.
+ */
+struct tf_answer_register_shared_memory {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+ u32 block;
+};
+
+/*
+ * RELEASE_SHARED_MEMORY command message.
+ */
+struct tf_command_release_shared_memory {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 device_context;
+ u32 block;
+};
+
+/*
+ * RELEASE_SHARED_MEMORY answer message.
+ */
+struct tf_answer_release_shared_memory {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ u32 operation_id;
+ u32 error_code;
+ u32 block_id;
+};
+
+/*
+ * INVOKE_CLIENT_COMMAND command message.
+ */
+struct tf_command_invoke_client_command {
+ u8 message_size;
+ u8 message_type;
+ u16 param_types;
+ u32 operation_id;
+ u32 device_context;
+ u32 client_session;
+ u64 timeout;
+ u32 cancellation_id;
+ u32 client_command_identifier;
+ union tf_command_param params[4];
+};
+
+/*
+ * INVOKE_CLIENT_COMMAND command answer.
+ */
+struct tf_answer_invoke_client_command {
+ u8 message_size;
+ u8 message_type;
+ u8 error_origin;
+ u8 __reserved;
+ u32 operation_id;
+ u32 error_code;
+ union tf_answer_param answers[4];
+};
+
+/*
+ * CANCEL_CLIENT_OPERATION command message.
+ */
+struct tf_command_cancel_client_operation {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 device_context;
+ u32 client_session;
+ u32 cancellation_id;
+};
+
+struct tf_answer_cancel_client_operation {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ u32 operation_id;
+ u32 error_code;
+};
+
+/*
+ * MANAGEMENT command message.
+ */
+struct tf_command_management {
+ u8 message_size;
+ u8 message_type;
+ u16 command;
+ u32 operation_id;
+ u32 w3b_size;
+ u32 w3b_start_offset;
+ u32 shared_mem_descriptors[1];
+};
+
+/*
+ * POWER_MANAGEMENT answer message.
+ * The message does not provide message specific parameters.
+ * Therefore no need to define a specific answer structure
+ */
+
+/*
+ * Structure for L2 messages
+ */
+union tf_command {
+ struct tf_command_header header;
+ struct tf_command_create_device_context create_device_context;
+ struct tf_command_destroy_device_context destroy_device_context;
+ struct tf_command_open_client_session open_client_session;
+ struct tf_command_close_client_session close_client_session;
+ struct tf_command_register_shared_memory register_shared_memory;
+ struct tf_command_release_shared_memory release_shared_memory;
+ struct tf_command_invoke_client_command invoke_client_command;
+ struct tf_command_cancel_client_operation cancel_client_operation;
+ struct tf_command_management management;
+};
+
+/*
+ * Structure for any L2 answer
+ */
+
+union tf_answer {
+ struct tf_answer_header header;
+ struct tf_answer_create_device_context create_device_context;
+ struct tf_answer_open_client_session open_client_session;
+ struct tf_answer_close_client_session close_client_session;
+ struct tf_answer_register_shared_memory register_shared_memory;
+ struct tf_answer_release_shared_memory release_shared_memory;
+ struct tf_answer_invoke_client_command invoke_client_command;
+ struct tf_answer_destroy_device_context destroy_device_context;
+ struct tf_answer_cancel_client_operation cancel_client_operation;
+};
+
+/* Structure of the Communication Buffer */
+struct tf_l1_shared_buffer {
+ #ifdef CONFIG_TF_ZEBRA
+ u32 exit_code;
+ u32 l1_shared_buffer_descr;
+ u32 backing_store_addr;
+ u32 backext_storage_addr;
+ u32 workspace_addr;
+ u32 workspace_size;
+ u32 conf_descriptor;
+ u32 conf_size;
+ u32 conf_offset;
+ u32 protocol_version;
+ u32 rpc_command;
+ u32 rpc_status;
+ u8 reserved1[16];
+ #else
+ u32 config_flag_s;
+ u32 w3b_size_max_s;
+ u32 reserved0;
+ u32 w3b_size_current_s;
+ u8 reserved1[48];
+ #endif
+ u8 version_description[TF_DESCRIPTION_BUFFER_LENGTH];
+ u32 status_s;
+ u32 reserved2;
+ u32 sync_serial_n;
+ u32 sync_serial_s;
+ u64 time_n[2];
+ u64 timeout_s[2];
+ u32 first_command;
+ u32 first_free_command;
+ u32 first_answer;
+ u32 first_free_answer;
+ u32 w3b_descriptors[128];
+ #ifdef CONFIG_TF_ZEBRA
+ u8 rpc_trace_buffer[140];
+ u8 rpc_cus_buffer[180];
+ #elif defined(CONFIG_SECURE_TRACES)
+ u32 traces_status;
+ u8 traces_buffer[140];
+ u8 reserved3[176];
+ #else
+ u8 reserved3[320];
+ #endif
+ u32 command_queue[TF_N_MESSAGE_QUEUE_CAPACITY];
+ u32 answer_queue[TF_S_ANSWER_QUEUE_CAPACITY];
+};
+
+
+/*
+ * tf_version_information_buffer structure description
+ * Description of the sVersionBuffer handed over from user space to kernel space
+ * This field is filled by the driver during a CREATE_DEVICE_CONTEXT ioctl
+ * and handed back to user space
+ */
+struct tf_version_information_buffer {
+ u8 driver_description[65];
+ u8 secure_world_description[65];
+};
+
+
+/* The IOCTLs the driver supports */
+#include <linux/ioctl.h>
+
+#define IOCTL_TF_GET_VERSION _IO('z', 0)
+#define IOCTL_TF_EXCHANGE _IOWR('z', 1, union tf_command)
+#define IOCTL_TF_GET_DESCRIPTION _IOR('z', 2, \
+ struct tf_version_information_buffer)
+#ifdef CONFIG_TF_ION
+#define IOCTL_TF_ION_REGISTER _IOR('z', 254, int)
+#define IOCTL_TF_ION_UNREGISTER _IOR('z', 255, int)
+#endif
+
+#endif /* !defined(__TF_PROTOCOL_H__) */
diff --git a/security/tf_driver/tf_teec.c b/security/tf_driver/tf_teec.c
new file mode 100644
index 000000000000..4b772215665d
--- /dev/null
+++ b/security/tf_driver/tf_teec.c
@@ -0,0 +1,618 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifdef CONFIG_TF_TEEC
+
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#include "tf_protocol.h"
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_comm.h"
+#include "tf_conn.h"
+#include "tf_teec.h"
+
+#include "tee_client_api.h"
+
+#define TF_COMMAND_BYTES(cmd) \
+ (sizeof(cmd) - sizeof(struct tf_command_header))
+#define TF_COMMAND_SIZE(cmd) \
+ (TF_COMMAND_BYTES(cmd) / sizeof(u32))
+
+/* Associate TEEC errors to POSIX/Linux errors. The matching is somewhat
+ arbitrary but one-to-one for supported error codes. */
+int TEEC_decode_error(TEEC_Result ret)
+{
+ switch (ret) {
+ case TEEC_SUCCESS: return 0;
+ case TEEC_ERROR_GENERIC: return -EIO;
+ case TEEC_ERROR_ACCESS_DENIED: return -EPERM;
+ case TEEC_ERROR_CANCEL: return -ECANCELED;
+ case TEEC_ERROR_ACCESS_CONFLICT: return -EBUSY;
+ case TEEC_ERROR_EXCESS_DATA: return -E2BIG;
+ case TEEC_ERROR_BAD_FORMAT: return -EDOM;
+ case TEEC_ERROR_BAD_PARAMETERS: return -EINVAL;
+ case TEEC_ERROR_BAD_STATE: return -EBADFD;
+ case TEEC_ERROR_ITEM_NOT_FOUND: return -ENOENT;
+ case TEEC_ERROR_NOT_IMPLEMENTED: return -EPROTONOSUPPORT;
+ case TEEC_ERROR_NOT_SUPPORTED: return -ENOSYS;
+ case TEEC_ERROR_NO_DATA: return -ENODATA;
+ case TEEC_ERROR_OUT_OF_MEMORY: return -ENOMEM;
+ case TEEC_ERROR_BUSY: return -EAGAIN;
+ case TEEC_ERROR_COMMUNICATION: return -EPIPE;
+ case TEEC_ERROR_SECURITY: return -ECONNABORTED;
+ case TEEC_ERROR_SHORT_BUFFER: return -EFBIG;
+ default: return -EIO;
+ }
+}
+EXPORT_SYMBOL(TEEC_decode_error);
+
+/* Associate POSIX/Linux errors to TEEC errors. The matching is somewhat
+ arbitrary, but TEEC_encode_error(TEEC_decode_error(x))==x for supported
+ error codes. */
+TEEC_Result TEEC_encode_error(int err)
+{
+ if (err >= 0)
+ return S_SUCCESS;
+
+ switch (err) {
+ case 0: return TEEC_SUCCESS;
+ case -EIO: return TEEC_ERROR_GENERIC;
+ case -EPERM: return TEEC_ERROR_ACCESS_DENIED;
+ case -ECANCELED: return TEEC_ERROR_CANCEL;
+ case -EBUSY: return TEEC_ERROR_ACCESS_CONFLICT;
+ case -E2BIG: return TEEC_ERROR_EXCESS_DATA;
+ case -EDOM: return TEEC_ERROR_BAD_FORMAT;
+ case -EINVAL: return TEEC_ERROR_BAD_PARAMETERS;
+ case -EBADFD: return TEEC_ERROR_BAD_STATE;
+ case -ENOENT: return TEEC_ERROR_ITEM_NOT_FOUND;
+ case -EPROTONOSUPPORT: return TEEC_ERROR_NOT_IMPLEMENTED;
+ case -ENOSYS: return TEEC_ERROR_NOT_SUPPORTED;
+ case -ENODATA: return TEEC_ERROR_NO_DATA;
+ case -ENOMEM: return TEEC_ERROR_OUT_OF_MEMORY;
+ case -EAGAIN: return TEEC_ERROR_BUSY;
+ case -EPIPE: return TEEC_ERROR_COMMUNICATION;
+ case -ECONNABORTED: return TEEC_ERROR_SECURITY;
+ case -EFBIG: return TEEC_ERROR_SHORT_BUFFER;
+ default: return TEEC_ERROR_GENERIC;
+ }
+}
+EXPORT_SYMBOL(TEEC_encode_error);
+
+/* Encode a TEEC time limit into an SChannel time limit. */
+static u64 TEEC_encode_timeout(const TEEC_TimeLimit *timeLimit)
+{
+ if (timeLimit == NULL)
+ return (u64)-1;
+ else
+ return *timeLimit;
+}
+
+/* Convert a timeout into a time limit in our internal format. */
+void TEEC_GetTimeLimit(TEEC_Context *sContext,
+ uint32_t nTimeout, /*ms from now*/
+ TEEC_TimeLimit *sTimeLimit)
+{
+ /*Use the kernel time as the TEE time*/
+ struct timeval now;
+ do_gettimeofday(&now);
+ *sTimeLimit =
+ ((TEEC_TimeLimit)now.tv_sec * 1000 +
+ now.tv_usec / 1000 +
+ nTimeout);
+}
+EXPORT_SYMBOL(TEEC_GetTimeLimit);
+
+#define TF_PARAM_TYPE_INPUT_FLAG 0x1
+#define TF_PARAM_TYPE_OUTPUT_FLAG 0x2
+#define TF_PARAM_TYPE_MEMREF_FLAG 0x4
+#define TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG 0x8
+
+/* Update the type of a whole memref with the direction deduced from
+ the INPUT and OUTPUT flags of the memref. */
+static void TEEC_encode_whole_memref_flags(u16 *param_types,
+ unsigned i,
+ u32 flags)
+{
+ if (flags & TEEC_MEM_INPUT)
+ *param_types |= TF_PARAM_TYPE_INPUT_FLAG << (4*i);
+ if (flags & TEEC_MEM_OUTPUT)
+ *param_types |= TF_PARAM_TYPE_OUTPUT_FLAG << (4*i);
+}
+
+/* Encode the parameters and type of an operation from the TEE API format
+ into an SChannel message. */
+void TEEC_encode_parameters(u16 *param_types,
+ union tf_command_param *params,
+ TEEC_Operation *operation)
+{
+ unsigned i;
+ if (operation == NULL) {
+ *param_types = 0;
+ return;
+ }
+ *param_types = operation->paramTypes;
+ for (i = 0; i < 4; i++) {
+ unsigned ty = TF_GET_PARAM_TYPE(operation->paramTypes, i);
+ TEEC_Parameter *op = operation->params + i;
+ if (ty & TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG) {
+ TEEC_SharedMemory *sm = op->memref.parent;
+ params[i].memref.block = sm->imp._block;
+ if (ty == TEEC_MEMREF_WHOLE) {
+ TEEC_encode_whole_memref_flags(param_types, i,
+ sm->flags);
+ params[i].memref.size = sm->size;
+ params[i].memref.offset = 0;
+ } else {
+ params[i].memref.size = op->memref.size;
+ params[i].memref.offset = op->memref.offset;
+ }
+ } else if (ty & TF_PARAM_TYPE_MEMREF_FLAG) {
+ /* Set up what tf_map_temp_shmem (called by
+ tf_open_client_session and
+ tf_invoke_client_command) expects:
+ .descriptor and .offset to both be set to the
+ address of the buffer. */
+ u32 address = (u32)op->tmpref.buffer;
+ params[i].temp_memref.descriptor = address;
+ params[i].temp_memref.size = op->tmpref.size;
+ params[i].temp_memref.offset = address;
+ } else if (ty & TF_PARAM_TYPE_INPUT_FLAG) {
+ params[i].value.a = op->value.a;
+ params[i].value.b = op->value.b;
+ } else {
+ /* output-only value or none, so nothing to do */
+ }
+ }
+}
+
+/* Decode updated parameters from an SChannel answer into the TEE API format. */
+void TEEC_decode_parameters(union tf_answer_param *params,
+ TEEC_Operation *operation)
+{
+ unsigned i;
+
+ if (operation == NULL)
+ return;
+
+ for (i = 0; i < 4; i++) {
+ unsigned ty = TF_GET_PARAM_TYPE(operation->paramTypes, i);
+ TEEC_Parameter *op = operation->params + i;
+ if (!(ty & TF_PARAM_TYPE_OUTPUT_FLAG)) {
+ /* input-only or none, so nothing to do */
+ } else if (ty & TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG) {
+ op->memref.size = params[i].size.size;
+ } else if (ty & TF_PARAM_TYPE_MEMREF_FLAG) {
+ op->tmpref.size = params[i].size.size;
+ } else {
+ op->value.a = params[i].value.a;
+ op->value.b = params[i].value.b;
+ }
+ }
+}
+
+/* Start a potentially-cancellable operation. */
+void TEEC_start_operation(TEEC_Context *context,
+ TEEC_Session *session,
+ TEEC_Operation *operation)
+{
+ if (operation != NULL) {
+ operation->imp._pSession = session;
+ /* Flush the assignment to imp._pSession, so that
+ RequestCancellation can read that field if started==1. */
+ barrier();
+ operation->started = 1;
+ }
+}
+
+/* Mark a potentially-cancellable operation as finished. */
+void TEEC_finish_operation(TEEC_Operation *operation)
+{
+ if (operation != NULL) {
+ operation->started = 2;
+ barrier();
+ }
+}
+
+
+
+TEEC_Result TEEC_InitializeContext(const char *name,
+ TEEC_Context *context)
+{
+ int error;
+ struct tf_connection *connection = NULL;
+
+ error = tf_open(tf_get_device(), NULL, &connection);
+ if (error != 0) {
+ dprintk(KERN_ERR "TEEC_InitializeContext(%s): "
+ "tf_open failed (error %d)!\n",
+ (name == NULL ? "(null)" : name), error);
+ goto error;
+ }
+ BUG_ON(connection == NULL);
+ connection->owner = TF_CONNECTION_OWNER_KERNEL;
+
+ error = tf_create_device_context(connection);
+ if (error != 0) {
+ dprintk(KERN_ERR "TEEC_InitializeContext(%s): "
+ "tf_create_device_context failed (error %d)!\n",
+ (name == NULL ? "(null)" : name), error);
+ goto error;
+ }
+
+ context->imp._connection = connection;
+ /*spin_lock_init(&context->imp._operations_lock);*/
+ return S_SUCCESS;
+
+error:
+ tf_close(connection);
+ return TEEC_encode_error(error);
+}
+EXPORT_SYMBOL(TEEC_InitializeContext);
+
+void TEEC_FinalizeContext(TEEC_Context *context)
+{
+ struct tf_connection *connection = context->imp._connection;
+ dprintk(KERN_DEBUG "TEEC_FinalizeContext: connection=%p", connection);
+ tf_close(connection);
+ context->imp._connection = NULL;
+}
+EXPORT_SYMBOL(TEEC_FinalizeContext);
+
+TEEC_Result TEEC_RegisterSharedMemory(TEEC_Context *context,
+ TEEC_SharedMemory *sharedMem)
+{
+ union tf_command command_message = { { 0, } };
+ struct tf_command_register_shared_memory *cmd =
+ &command_message.register_shared_memory;
+ union tf_answer answer_message;
+ struct tf_answer_register_shared_memory *ans =
+ &answer_message.register_shared_memory;
+ TEEC_Result ret;
+ memset(&sharedMem->imp, 0, sizeof(sharedMem->imp));
+
+ cmd->message_size = TF_COMMAND_SIZE(*cmd);
+ cmd->message_type = TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY;
+ cmd->memory_flags = sharedMem->flags;
+ cmd->operation_id = (u32)&answer_message;
+ cmd->device_context = (u32)context;
+ /*cmd->block_id will be set by tf_register_shared_memory*/
+ cmd->shared_mem_size = sharedMem->size;
+ cmd->shared_mem_start_offset = 0;
+ cmd->shared_mem_descriptors[0] = (u32)sharedMem->buffer;
+
+ ret = TEEC_encode_error(
+ tf_register_shared_memory(context->imp._connection,
+ &command_message,
+ &answer_message));
+ if (ret == TEEC_SUCCESS)
+ ret = ans->error_code;
+
+ if (ret == S_SUCCESS) {
+ sharedMem->imp._context = context;
+ sharedMem->imp._block = ans->block;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(TEEC_RegisterSharedMemory);
+
+#define TEEC_POINTER_TO_ZERO_SIZED_BUFFER ((void *)0x010)
+
+TEEC_Result TEEC_AllocateSharedMemory(TEEC_Context *context,
+ TEEC_SharedMemory *sharedMem)
+{
+ TEEC_Result ret;
+ dprintk(KERN_DEBUG "TEEC_AllocateSharedMemory: requested=%lu",
+ (unsigned long)sharedMem->size);
+ if (sharedMem->size == 0) {
+ /* Allocating 0 bytes must return a non-NULL pointer, but the
+ pointer doesn't need to be to memory that is mapped
+ anywhere. So we return a pointer into an unmapped page. */
+ sharedMem->buffer = TEEC_POINTER_TO_ZERO_SIZED_BUFFER;
+ } else {
+ sharedMem->buffer = internal_vmalloc(sharedMem->size);
+ if (sharedMem->buffer == NULL) {
+ dprintk(KERN_INFO "TEEC_AllocateSharedMemory: could "
+ "not allocate %lu bytes",
+ (unsigned long)sharedMem->size);
+ return TEEC_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ ret = TEEC_RegisterSharedMemory(context, sharedMem);
+ if (ret == TEEC_SUCCESS) {
+ sharedMem->imp._allocated = 1;
+ } else {
+ internal_vfree(sharedMem->buffer);
+ sharedMem->buffer = NULL;
+ memset(&sharedMem->imp, 0, sizeof(sharedMem->imp));
+ }
+ return ret;
+}
+EXPORT_SYMBOL(TEEC_AllocateSharedMemory);
+
+void TEEC_ReleaseSharedMemory(TEEC_SharedMemory *sharedMem)
+{
+ TEEC_Context *context = sharedMem->imp._context;
+ union tf_command command_message = { { 0, } };
+ struct tf_command_release_shared_memory *cmd =
+ &command_message.release_shared_memory;
+ union tf_answer answer_message;
+
+ cmd->message_size = TF_COMMAND_SIZE(*cmd);
+ cmd->message_type = TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY;
+ cmd->operation_id = (u32)&answer_message;
+ cmd->device_context = (u32)context;
+ cmd->block = sharedMem->imp._block;
+
+ tf_release_shared_memory(context->imp._connection,
+ &command_message,
+ &answer_message);
+ if (sharedMem->imp._allocated) {
+ if (sharedMem->buffer != TEEC_POINTER_TO_ZERO_SIZED_BUFFER)
+ internal_vfree(sharedMem->buffer);
+ sharedMem->buffer = NULL;
+ sharedMem->size = 0;
+ }
+ memset(&sharedMem->imp, 0, sizeof(sharedMem->imp));
+}
+EXPORT_SYMBOL(TEEC_ReleaseSharedMemory);
+
+TEEC_Result TEEC_OpenSessionEx(TEEC_Context *context,
+ TEEC_Session *session,
+ const TEEC_TimeLimit *timeLimit,
+ const TEEC_UUID * destination,
+ u32 connectionMethod,
+ void *connectionData,
+ TEEC_Operation *operation,
+ u32 *errorOrigin)
+{
+ union tf_command command_message = { { 0, } };
+ struct tf_command_open_client_session *cmd =
+ &command_message.open_client_session;
+ union tf_answer answer_message = { { 0, } };
+ struct tf_answer_open_client_session *ans =
+ &answer_message.open_client_session;
+ TEEC_Result ret;
+
+ /* Note that we set the message size to the whole size of the
+ structure. tf_open_client_session will adjust it down
+ to trim the unnecessary portion of the login_data field. */
+ cmd->message_size = TF_COMMAND_SIZE(*cmd);
+ cmd->message_type = TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION;
+ cmd->operation_id = (u32)&answer_message;
+ cmd->device_context = (u32)context;
+ cmd->cancellation_id = (u32)operation;
+ cmd->timeout = TEEC_encode_timeout(timeLimit);
+ memcpy(&cmd->destination_uuid, destination,
+ sizeof(cmd->destination_uuid));
+ cmd->login_type = connectionMethod;
+ TEEC_encode_parameters(&cmd->param_types, cmd->params, operation);
+
+ switch (connectionMethod) {
+ case TEEC_LOGIN_PRIVILEGED:
+ case TEEC_LOGIN_PUBLIC:
+ break;
+ case TEEC_LOGIN_APPLICATION:
+ case TEEC_LOGIN_USER:
+ case TEEC_LOGIN_USER_APPLICATION:
+ case TEEC_LOGIN_GROUP:
+ case TEEC_LOGIN_GROUP_APPLICATION:
+ default:
+ return TEEC_ERROR_NOT_IMPLEMENTED;
+ }
+
+ TEEC_start_operation(context, session, operation);
+
+ ret = TEEC_encode_error(
+ tf_open_client_session(context->imp._connection,
+ &command_message,
+ &answer_message));
+
+ TEEC_finish_operation(operation);
+ TEEC_decode_parameters(ans->answers, operation);
+ if (errorOrigin != NULL) {
+ *errorOrigin = (ret == TEEC_SUCCESS ?
+ ans->error_origin :
+ TEEC_ORIGIN_COMMS);
+ }
+
+ if (ret == TEEC_SUCCESS)
+ ret = ans->error_code;
+
+ if (ret == S_SUCCESS) {
+ session->imp._client_session = ans->client_session;
+ session->imp._context = context;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(TEEC_OpenSessionEx);
+
+TEEC_Result TEEC_OpenSession(TEEC_Context *context,
+ TEEC_Session *session,
+ const TEEC_UUID * destination,
+ u32 connectionMethod,
+ void *connectionData,
+ TEEC_Operation *operation,
+ u32 *errorOrigin)
+{
+ return TEEC_OpenSessionEx(context, session,
+ NULL, /*timeLimit*/
+ destination,
+ connectionMethod, connectionData,
+ operation, errorOrigin);
+}
+EXPORT_SYMBOL(TEEC_OpenSession);
+
+void TEEC_CloseSession(TEEC_Session *session)
+{
+ if (session != NULL) {
+ TEEC_Context *context = session->imp._context;
+ union tf_command command_message = { { 0, } };
+ struct tf_command_close_client_session *cmd =
+ &command_message.close_client_session;
+ union tf_answer answer_message;
+
+ cmd->message_size = TF_COMMAND_SIZE(*cmd);
+ cmd->message_type = TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION;
+ cmd->operation_id = (u32)&answer_message;
+ cmd->device_context = (u32)context;
+ cmd->client_session = session->imp._client_session;
+
+ tf_close_client_session(context->imp._connection,
+ &command_message,
+ &answer_message);
+
+ session->imp._client_session = 0;
+ session->imp._context = NULL;
+ }
+}
+EXPORT_SYMBOL(TEEC_CloseSession);
+
+TEEC_Result TEEC_InvokeCommandEx(TEEC_Session *session,
+ const TEEC_TimeLimit *timeLimit,
+ u32 commandID,
+ TEEC_Operation *operation,
+ u32 *errorOrigin)
+{
+ TEEC_Context *context = session->imp._context;
+ union tf_command command_message = { { 0, } };
+ struct tf_command_invoke_client_command *cmd =
+ &command_message.invoke_client_command;
+ union tf_answer answer_message = { { 0, } };
+ struct tf_answer_invoke_client_command *ans =
+ &answer_message.invoke_client_command;
+ TEEC_Result ret;
+
+ cmd->message_size = TF_COMMAND_SIZE(*cmd);
+ cmd->message_type = TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND;
+ cmd->operation_id = (u32)&answer_message;
+ cmd->device_context = (u32)context;
+ cmd->client_session = session->imp._client_session;
+ cmd->timeout = TEEC_encode_timeout(timeLimit);
+ cmd->cancellation_id = (u32)operation;
+ cmd->client_command_identifier = commandID;
+ TEEC_encode_parameters(&cmd->param_types, cmd->params, operation);
+
+ TEEC_start_operation(context, session, operation);
+
+ ret = TEEC_encode_error(
+ tf_invoke_client_command(context->imp._connection,
+ &command_message,
+ &answer_message));
+
+ TEEC_finish_operation(operation);
+ TEEC_decode_parameters(ans->answers, operation);
+ if (errorOrigin != NULL) {
+ *errorOrigin = (ret == TEEC_SUCCESS ?
+ ans->error_origin :
+ TEEC_ORIGIN_COMMS);
+ }
+
+ if (ret == TEEC_SUCCESS)
+ ret = ans->error_code;
+ return ret;
+}
+EXPORT_SYMBOL(TEEC_InvokeCommandEx);
+
+TEEC_Result TEEC_InvokeCommand(TEEC_Session *session,
+ u32 commandID,
+ TEEC_Operation *operation,
+ u32 *errorOrigin)
+{
+ return TEEC_InvokeCommandEx(session,
+ NULL, /*timeLimit*/
+ commandID,
+ operation, errorOrigin);
+}
+EXPORT_SYMBOL(TEEC_InvokeCommand);
+
+TEEC_Result TEEC_send_cancellation_message(TEEC_Context *context,
+ u32 client_session,
+ u32 cancellation_id)
+{
+ union tf_command command_message = { { 0, } };
+ struct tf_command_cancel_client_operation *cmd =
+ &command_message.cancel_client_operation;
+ union tf_answer answer_message = { { 0, } };
+ struct tf_answer_cancel_client_operation *ans =
+ &answer_message.cancel_client_operation;
+ TEEC_Result ret;
+
+ cmd->message_size = TF_COMMAND_SIZE(*cmd);
+ cmd->message_type = TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND;
+ cmd->operation_id = (u32)&answer_message;
+ cmd->device_context = (u32)context;
+ cmd->client_session = client_session;
+ cmd->cancellation_id = cancellation_id;
+
+ ret = TEEC_encode_error(
+ tf_cancel_client_command(context->imp._connection,
+ &command_message,
+ &answer_message));
+
+ if (ret == TEEC_SUCCESS)
+ ret = ans->error_code;
+ return ret;
+}
+
+void TEEC_RequestCancellation(TEEC_Operation *operation)
+{
+ TEEC_Result ret;
+ while (1) {
+ u32 state = operation->started;
+ switch (state) {
+ case 0: /*The operation data structure isn't initialized yet*/
+ break;
+
+ case 1: /*operation is in progress in the client*/
+ ret = TEEC_send_cancellation_message(
+ operation->imp._pSession->imp._context,
+ operation->imp._pSession->imp._client_session,
+ (u32)operation);
+ if (ret == TEEC_SUCCESS) {
+ /*The cancellation was successful*/
+ return;
+ }
+ /* The command has either not reached the secure world
+ yet or has completed already. Either way, retry. */
+ break;
+
+ case 2: /*operation has completed already*/
+ return;
+ }
+ /* Since we're busy-waiting for the operation to be started
+ or finished, yield. */
+ schedule();
+ }
+}
+EXPORT_SYMBOL(TEEC_RequestCancellation);
+
+#endif /* defined(CONFIG_TF_TEEC) */
diff --git a/security/tf_driver/tf_teec.h b/security/tf_driver/tf_teec.h
new file mode 100644
index 000000000000..28b32878f800
--- /dev/null
+++ b/security/tf_driver/tf_teec.h
@@ -0,0 +1,33 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_TEEC_H__
+#define __TF_TEEC_H__
+
+#ifdef CONFIG_TF_TEEC
+
+#include "tf_defs.h"
+#include "tee_client_api.h"
+
+TEEC_Result TEEC_encode_error(int err);
+int TEEC_decode_error(TEEC_Result ret);
+
+#endif /* defined(CONFIG_TF_TEEC) */
+
+#endif /* !defined(__TF_TEEC_H__) */
diff --git a/security/tf_driver/tf_util.c b/security/tf_driver/tf_util.c
new file mode 100644
index 000000000000..b5b24497e138
--- /dev/null
+++ b/security/tf_driver/tf_util.c
@@ -0,0 +1,1138 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * Copyright (C) 2011-2012 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/mman.h>
+#include "tf_util.h"
+
+/*----------------------------------------------------------------------------
+ * Debug printing routines
+ *----------------------------------------------------------------------------*/
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+
+void tf_trace_array(const char *fun, const char *msg,
+ const void *ptr, size_t len)
+{
+ char hex[511];
+ bool ell = (len > sizeof(hex)/2);
+ unsigned lim = (len > sizeof(hex)/2 ? sizeof(hex)/2 : len);
+ unsigned i;
+ for (i = 0; i < lim; i++)
+ sprintf(hex + 2 * i, "%02x", ((unsigned char *)ptr)[i]);
+ pr_info("%s: %s[%u] = %s%s\n",
+ fun, msg, len, hex, ell ? "..." : "");
+}
+
+void address_cache_property(unsigned long va)
+{
+ unsigned long pa;
+ unsigned long inner;
+ unsigned long outer;
+
+ asm volatile ("mcr p15, 0, %0, c7, c8, 0" : : "r" (va));
+ asm volatile ("mrc p15, 0, %0, c7, c4, 0" : "=r" (pa));
+
+ dprintk(KERN_INFO "VA:%x, PA:%x\n",
+ (unsigned int) va,
+ (unsigned int) pa);
+
+ if (pa & 1) {
+ dprintk(KERN_INFO "Prop Error\n");
+ return;
+ }
+
+ outer = (pa >> 2) & 3;
+ dprintk(KERN_INFO "\touter : %x", (unsigned int) outer);
+
+ switch (outer) {
+ case 3:
+ dprintk(KERN_INFO "Write-Back, no Write-Allocate\n");
+ break;
+ case 2:
+ dprintk(KERN_INFO "Write-Through, no Write-Allocate.\n");
+ break;
+ case 1:
+ dprintk(KERN_INFO "Write-Back, Write-Allocate.\n");
+ break;
+ case 0:
+ dprintk(KERN_INFO "Non-cacheable.\n");
+ break;
+ }
+
+ inner = (pa >> 4) & 7;
+ dprintk(KERN_INFO "\tinner : %x", (unsigned int)inner);
+
+ switch (inner) {
+ case 7:
+ dprintk(KERN_INFO "Write-Back, no Write-Allocate\n");
+ break;
+ case 6:
+ dprintk(KERN_INFO "Write-Through.\n");
+ break;
+ case 5:
+ dprintk(KERN_INFO "Write-Back, Write-Allocate.\n");
+ break;
+ case 3:
+ dprintk(KERN_INFO "Device.\n");
+ break;
+ case 1:
+ dprintk(KERN_INFO "Strongly-ordered.\n");
+ break;
+ case 0:
+ dprintk(KERN_INFO "Non-cacheable.\n");
+ break;
+ }
+
+ if (pa & 0x00000002)
+ dprintk(KERN_INFO "SuperSection.\n");
+ if (pa & 0x00000080)
+ dprintk(KERN_INFO "Memory is shareable.\n");
+ else
+ dprintk(KERN_INFO "Memory is non-shareable.\n");
+
+ if (pa & 0x00000200)
+ dprintk(KERN_INFO "Non-secure.\n");
+}
+
+/*
+ * Dump the L1 shared buffer.
+ */
+void tf_dump_l1_shared_buffer(struct tf_l1_shared_buffer *buffer)
+{
+ dprintk(KERN_INFO
+ "buffer@%p:\n"
+ #ifndef CONFIG_TF_ZEBRA
+ " config_flag_s=%08X\n"
+ #endif
+ " version_description=%64s\n"
+ " status_s=%08X\n"
+ " sync_serial_n=%08X\n"
+ " sync_serial_s=%08X\n"
+ " time_n[0]=%016llX\n"
+ " time_n[1]=%016llX\n"
+ " timeout_s[0]=%016llX\n"
+ " timeout_s[1]=%016llX\n"
+ " first_command=%08X\n"
+ " first_free_command=%08X\n"
+ " first_answer=%08X\n"
+ " first_free_answer=%08X\n\n",
+ buffer,
+ #ifndef CONFIG_TF_ZEBRA
+ buffer->config_flag_s,
+ #endif
+ buffer->version_description,
+ buffer->status_s,
+ buffer->sync_serial_n,
+ buffer->sync_serial_s,
+ buffer->time_n[0],
+ buffer->time_n[1],
+ buffer->timeout_s[0],
+ buffer->timeout_s[1],
+ buffer->first_command,
+ buffer->first_free_command,
+ buffer->first_answer,
+ buffer->first_free_answer);
+}
+
+
+/*
+ * Dump the specified SChannel message using dprintk.
+ */
+void tf_dump_command(union tf_command *command)
+{
+ u32 i;
+
+ dprintk(KERN_INFO "message@%p:\n", command);
+
+ switch (command->header.message_type) {
+ case TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT\n"
+ " operation_id = 0x%08X\n"
+ " device_context_id = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->create_device_context.device_context_id
+ );
+ break;
+
+ case TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->destroy_device_context.device_context);
+ break;
+
+ case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION\n"
+ " param_types = 0x%04X\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " cancellation_id = 0x%08X\n"
+ " timeout = 0x%016llX\n"
+ " destination_uuid = "
+ "%08X-%04X-%04X-%02X%02X-"
+ "%02X%02X%02X%02X%02X%02X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->open_client_session.param_types,
+ command->header.operation_id,
+ command->open_client_session.device_context,
+ command->open_client_session.cancellation_id,
+ command->open_client_session.timeout,
+ command->open_client_session.destination_uuid.
+ time_low,
+ command->open_client_session.destination_uuid.
+ time_mid,
+ command->open_client_session.destination_uuid.
+ time_hi_and_version,
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[0],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[1],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[2],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[3],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[4],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[5],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[6],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[7]
+ );
+
+ for (i = 0; i < 4; i++) {
+ uint32_t *param = (uint32_t *) &command->
+ open_client_session.params[i];
+ dprintk(KERN_INFO " params[%d] = "
+ "0x%08X:0x%08X:0x%08X\n",
+ i, param[0], param[1], param[2]);
+ }
+
+ switch (TF_LOGIN_GET_MAIN_TYPE(
+ command->open_client_session.login_type)) {
+ case TF_LOGIN_PUBLIC:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_PUBLIC\n");
+ break;
+ case TF_LOGIN_USER:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_USER\n");
+ break;
+ case TF_LOGIN_GROUP:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_GROUP\n");
+ break;
+ case TF_LOGIN_APPLICATION:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_APPLICATION\n");
+ break;
+ case TF_LOGIN_APPLICATION_USER:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_APPLICATION_USER\n");
+ break;
+ case TF_LOGIN_APPLICATION_GROUP:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_APPLICATION_GROUP\n");
+ break;
+ case TF_LOGIN_AUTHENTICATION:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_AUTHENTICATION\n");
+ break;
+ case TF_LOGIN_PRIVILEGED:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_PRIVILEGED\n");
+ break;
+ case TF_LOGIN_PRIVILEGED_KERNEL:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_PRIVILEGED_KERNEL\n");
+ break;
+ default:
+ dprintk(
+ KERN_ERR " login_type = "
+ "0x%08X (Unknown login type)\n",
+ command->open_client_session.login_type);
+ break;
+ }
+
+ dprintk(
+ KERN_INFO " login_data = ");
+ for (i = 0; i < 20; i++)
+ dprintk(
+ KERN_INFO "%d",
+ command->open_client_session.
+ login_data[i]);
+ dprintk("\n");
+ break;
+
+ case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " client_session = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->close_client_session.device_context,
+ command->close_client_session.client_session
+ );
+ break;
+
+ case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY\n"
+ " memory_flags = 0x%04X\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " block_id = 0x%08X\n"
+ " shared_mem_size = 0x%08X\n"
+ " shared_mem_start_offset = 0x%08X\n"
+ " shared_mem_descriptors[0] = 0x%08X\n"
+ " shared_mem_descriptors[1] = 0x%08X\n"
+ " shared_mem_descriptors[2] = 0x%08X\n"
+ " shared_mem_descriptors[3] = 0x%08X\n"
+ " shared_mem_descriptors[4] = 0x%08X\n"
+ " shared_mem_descriptors[5] = 0x%08X\n"
+ " shared_mem_descriptors[6] = 0x%08X\n"
+ " shared_mem_descriptors[7] = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->register_shared_memory.memory_flags,
+ command->header.operation_id,
+ command->register_shared_memory.device_context,
+ command->register_shared_memory.block_id,
+ command->register_shared_memory.shared_mem_size,
+ command->register_shared_memory.
+ shared_mem_start_offset,
+ command->register_shared_memory.
+ shared_mem_descriptors[0],
+ command->register_shared_memory.
+ shared_mem_descriptors[1],
+ command->register_shared_memory.
+ shared_mem_descriptors[2],
+ command->register_shared_memory.
+ shared_mem_descriptors[3],
+ command->register_shared_memory.
+ shared_mem_descriptors[4],
+ command->register_shared_memory.
+ shared_mem_descriptors[5],
+ command->register_shared_memory.
+ shared_mem_descriptors[6],
+ command->register_shared_memory.
+ shared_mem_descriptors[7]);
+ break;
+
+ case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " block = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->release_shared_memory.device_context,
+ command->release_shared_memory.block);
+ break;
+
+ case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND\n"
+ " param_types = 0x%04X\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " client_session = 0x%08X\n"
+ " timeout = 0x%016llX\n"
+ " cancellation_id = 0x%08X\n"
+ " client_command_identifier = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->invoke_client_command.param_types,
+ command->header.operation_id,
+ command->invoke_client_command.device_context,
+ command->invoke_client_command.client_session,
+ command->invoke_client_command.timeout,
+ command->invoke_client_command.cancellation_id,
+ command->invoke_client_command.
+ client_command_identifier
+ );
+
+ for (i = 0; i < 4; i++) {
+ uint32_t *param = (uint32_t *) &command->
+ open_client_session.params[i];
+ dprintk(KERN_INFO " params[%d] = "
+ "0x%08X:0x%08X:0x%08X\n", i,
+ param[0], param[1], param[2]);
+ }
+ break;
+
+ case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " client_session = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->cancel_client_operation.device_context,
+ command->cancel_client_operation.client_session);
+ break;
+
+ case TF_MESSAGE_TYPE_MANAGEMENT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_MANAGEMENT\n"
+ " operation_id = 0x%08X\n"
+ " command = 0x%08X\n"
+ " w3b_size = 0x%08X\n"
+ " w3b_start_offset = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->management.command,
+ command->management.w3b_size,
+ command->management.w3b_start_offset);
+ break;
+
+ default:
+ dprintk(
+ KERN_ERR " message_type = 0x%08X "
+ "(Unknown message type)\n",
+ command->header.message_type);
+ break;
+ }
+}
+
+
+/*
+ * Dump the specified SChannel answer using dprintk.
+ */
+void tf_dump_answer(union tf_answer *answer)
+{
+ u32 i;
+ dprintk(
+ KERN_INFO "answer@%p:\n",
+ answer);
+
+ switch (answer->header.message_type) {
+ case TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "tf_answer_create_device_context\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " device_context = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->create_device_context.error_code,
+ answer->create_device_context.device_context);
+ break;
+
+ case TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "ANSWER_DESTROY_DEVICE_CONTEXT\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " device_context_id = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->destroy_device_context.error_code,
+ answer->destroy_device_context.device_context_id);
+ break;
+
+
+ case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "tf_answer_open_client_session\n"
+ " error_origin = 0x%02X\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " client_session = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->open_client_session.error_origin,
+ answer->header.operation_id,
+ answer->open_client_session.error_code,
+ answer->open_client_session.client_session);
+ for (i = 0; i < 4; i++) {
+ dprintk(KERN_INFO " answers[%d]=0x%08X:0x%08X\n",
+ i,
+ answer->open_client_session.answers[i].
+ value.a,
+ answer->open_client_session.answers[i].
+ value.b);
+ }
+ break;
+
+ case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "ANSWER_CLOSE_CLIENT_SESSION\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->close_client_session.error_code);
+ break;
+
+ case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "tf_answer_register_shared_memory\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " block = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->register_shared_memory.error_code,
+ answer->register_shared_memory.block);
+ break;
+
+ case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "ANSWER_RELEASE_SHARED_MEMORY\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " block_id = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->release_shared_memory.error_code,
+ answer->release_shared_memory.block_id);
+ break;
+
+ case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "tf_answer_invoke_client_command\n"
+ " error_origin = 0x%02X\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->invoke_client_command.error_origin,
+ answer->header.operation_id,
+ answer->invoke_client_command.error_code
+ );
+ for (i = 0; i < 4; i++) {
+ dprintk(KERN_INFO " answers[%d]=0x%08X:0x%08X\n",
+ i,
+ answer->invoke_client_command.answers[i].
+ value.a,
+ answer->invoke_client_command.answers[i].
+ value.b);
+ }
+ break;
+
+ case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_ANSWER_CANCEL_CLIENT_COMMAND\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->cancel_client_operation.error_code);
+ break;
+
+ case TF_MESSAGE_TYPE_MANAGEMENT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_MANAGEMENT\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->header.error_code);
+ break;
+
+ default:
+ dprintk(
+ KERN_ERR " message_type = 0x%02X "
+ "(Unknown message type)\n",
+ answer->header.message_type);
+ break;
+
+ }
+}
+
+#endif /* defined(TF_DRIVER_DEBUG_SUPPORT) */
+
+/*----------------------------------------------------------------------------
+ * SHA-1 implementation
+ * This is taken from the Linux kernel source crypto/sha1.c
+ *----------------------------------------------------------------------------*/
+
+struct sha1_ctx {
+ u64 count;
+ u32 state[5];
+ u8 buffer[64];
+};
+
+static inline u32 rol(u32 value, u32 bits)
+{
+ return ((value) << (bits)) | ((value) >> (32 - (bits)));
+}
+
+/* blk0() and blk() perform the initial expand. */
+/* I got the idea of expanding during the round function from SSLeay */
+#define blk0(i) block32[i]
+
+#define blk(i) (block32[i & 15] = rol( \
+ block32[(i + 13) & 15] ^ block32[(i + 8) & 15] ^ \
+ block32[(i + 2) & 15] ^ block32[i & 15], 1))
+
+/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
+#define R0(v, w, x, y, z, i) do { \
+ z += ((w & (x ^ y)) ^ y) + blk0(i) + 0x5A827999 + rol(v, 5); \
+ w = rol(w, 30); } \
+ while (0)
+
+#define R1(v, w, x, y, z, i) do { \
+ z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \
+ w = rol(w, 30); } \
+ while (0)
+
+#define R2(v, w, x, y, z, i) do { \
+ z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); \
+ w = rol(w, 30); } \
+ while (0)
+
+#define R3(v, w, x, y, z, i) do { \
+ z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \
+ w = rol(w, 30); } \
+ while (0)
+
+#define R4(v, w, x, y, z, i) do { \
+ z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \
+ w = rol(w, 30); } \
+ while (0)
+
+
+/* Hash a single 512-bit block. This is the core of the algorithm. */
+static void sha1_transform(u32 *state, const u8 *in)
+{
+ u32 a, b, c, d, e;
+ u32 block32[16];
+
+ /* convert/copy data to workspace */
+ for (a = 0; a < sizeof(block32)/sizeof(u32); a++)
+ block32[a] = ((u32) in[4 * a]) << 24 |
+ ((u32) in[4 * a + 1]) << 16 |
+ ((u32) in[4 * a + 2]) << 8 |
+ ((u32) in[4 * a + 3]);
+
+ /* Copy context->state[] to working vars */
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+
+ /* 4 rounds of 20 operations each. Loop unrolled. */
+ R0(a, b, c, d, e, 0); R0(e, a, b, c, d, 1);
+ R0(d, e, a, b, c, 2); R0(c, d, e, a, b, 3);
+ R0(b, c, d, e, a, 4); R0(a, b, c, d, e, 5);
+ R0(e, a, b, c, d, 6); R0(d, e, a, b, c, 7);
+ R0(c, d, e, a, b, 8); R0(b, c, d, e, a, 9);
+ R0(a, b, c, d, e, 10); R0(e, a, b, c, d, 11);
+ R0(d, e, a, b, c, 12); R0(c, d, e, a, b, 13);
+ R0(b, c, d, e, a, 14); R0(a, b, c, d, e, 15);
+
+ R1(e, a, b, c, d, 16); R1(d, e, a, b, c, 17);
+ R1(c, d, e, a, b, 18); R1(b, c, d, e, a, 19);
+
+ R2(a, b, c, d, e, 20); R2(e, a, b, c, d, 21);
+ R2(d, e, a, b, c, 22); R2(c, d, e, a, b, 23);
+ R2(b, c, d, e, a, 24); R2(a, b, c, d, e, 25);
+ R2(e, a, b, c, d, 26); R2(d, e, a, b, c, 27);
+ R2(c, d, e, a, b, 28); R2(b, c, d, e, a, 29);
+ R2(a, b, c, d, e, 30); R2(e, a, b, c, d, 31);
+ R2(d, e, a, b, c, 32); R2(c, d, e, a, b, 33);
+ R2(b, c, d, e, a, 34); R2(a, b, c, d, e, 35);
+ R2(e, a, b, c, d, 36); R2(d, e, a, b, c, 37);
+ R2(c, d, e, a, b, 38); R2(b, c, d, e, a, 39);
+
+ R3(a, b, c, d, e, 40); R3(e, a, b, c, d, 41);
+ R3(d, e, a, b, c, 42); R3(c, d, e, a, b, 43);
+ R3(b, c, d, e, a, 44); R3(a, b, c, d, e, 45);
+ R3(e, a, b, c, d, 46); R3(d, e, a, b, c, 47);
+ R3(c, d, e, a, b, 48); R3(b, c, d, e, a, 49);
+ R3(a, b, c, d, e, 50); R3(e, a, b, c, d, 51);
+ R3(d, e, a, b, c, 52); R3(c, d, e, a, b, 53);
+ R3(b, c, d, e, a, 54); R3(a, b, c, d, e, 55);
+ R3(e, a, b, c, d, 56); R3(d, e, a, b, c, 57);
+ R3(c, d, e, a, b, 58); R3(b, c, d, e, a, 59);
+
+ R4(a, b, c, d, e, 60); R4(e, a, b, c, d, 61);
+ R4(d, e, a, b, c, 62); R4(c, d, e, a, b, 63);
+ R4(b, c, d, e, a, 64); R4(a, b, c, d, e, 65);
+ R4(e, a, b, c, d, 66); R4(d, e, a, b, c, 67);
+ R4(c, d, e, a, b, 68); R4(b, c, d, e, a, 69);
+ R4(a, b, c, d, e, 70); R4(e, a, b, c, d, 71);
+ R4(d, e, a, b, c, 72); R4(c, d, e, a, b, 73);
+ R4(b, c, d, e, a, 74); R4(a, b, c, d, e, 75);
+ R4(e, a, b, c, d, 76); R4(d, e, a, b, c, 77);
+ R4(c, d, e, a, b, 78); R4(b, c, d, e, a, 79);
+
+ /* Add the working vars back into context.state[] */
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ /* Wipe variables */
+ a = b = c = d = e = 0;
+ memset(block32, 0x00, sizeof(block32));
+}
+
+
+static void sha1_init(void *ctx)
+{
+ struct sha1_ctx *sctx = ctx;
+ static const struct sha1_ctx initstate = {
+ 0,
+ { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 },
+ { 0, }
+ };
+
+ *sctx = initstate;
+}
+
+
+static void sha1_update(void *ctx, const u8 *data, unsigned int len)
+{
+ struct sha1_ctx *sctx = ctx;
+ unsigned int i, j;
+
+ j = (sctx->count >> 3) & 0x3f;
+ sctx->count += len << 3;
+
+ if ((j + len) > 63) {
+ memcpy(&sctx->buffer[j], data, (i = 64 - j));
+ sha1_transform(sctx->state, sctx->buffer);
+ for ( ; i + 63 < len; i += 64)
+ sha1_transform(sctx->state, &data[i]);
+ j = 0;
+ } else
+ i = 0;
+ memcpy(&sctx->buffer[j], &data[i], len - i);
+}
+
+
+/* Add padding and return the message digest. */
+static void sha1_final(void *ctx, u8 *out)
+{
+ struct sha1_ctx *sctx = ctx;
+ u32 i, j, index, padlen;
+ u64 t;
+ u8 bits[8] = { 0, };
+ static const u8 padding[64] = { 0x80, };
+
+ t = sctx->count;
+ bits[7] = 0xff & t; t >>= 8;
+ bits[6] = 0xff & t; t >>= 8;
+ bits[5] = 0xff & t; t >>= 8;
+ bits[4] = 0xff & t; t >>= 8;
+ bits[3] = 0xff & t; t >>= 8;
+ bits[2] = 0xff & t; t >>= 8;
+ bits[1] = 0xff & t; t >>= 8;
+ bits[0] = 0xff & t;
+
+ /* Pad out to 56 mod 64 */
+ index = (sctx->count >> 3) & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64+56) - index);
+ sha1_update(sctx, padding, padlen);
+
+ /* Append length */
+ sha1_update(sctx, bits, sizeof(bits));
+
+ /* Store state in digest */
+ for (i = j = 0; i < 5; i++, j += 4) {
+ u32 t2 = sctx->state[i];
+ out[j+3] = t2 & 0xff; t2 >>= 8;
+ out[j+2] = t2 & 0xff; t2 >>= 8;
+ out[j+1] = t2 & 0xff; t2 >>= 8;
+ out[j] = t2 & 0xff;
+ }
+
+ /* Wipe context */
+ memset(sctx, 0, sizeof(*sctx));
+}
+
+
+
+
+/*----------------------------------------------------------------------------
+ * Process identification
+ *----------------------------------------------------------------------------*/
+
+/* This function generates a processes hash table for authentication */
+int tf_get_current_process_hash(void *hash)
+{
+ int result = 0;
+ void *buffer;
+ struct mm_struct *mm;
+ unsigned long populate;
+
+ buffer = internal_kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (buffer == NULL) {
+ dprintk(
+ KERN_ERR "tf_get_current_process_hash:"
+ " Out of memory for buffer!\n");
+ return -ENOMEM;
+ }
+
+ mm = current->mm;
+
+ down_read(&(mm->mmap_sem));
+ if (mm->exe_file) {
+ struct dentry *dentry;
+ unsigned long start;
+ unsigned long cur;
+ unsigned long end;
+ struct sha1_ctx sha1;
+
+ dentry = dget(mm->exe_file->f_dentry);
+
+ dprintk(
+ KERN_DEBUG "tf_get_current_process_hash: "
+ "Found executable VMA for inode %lu "
+ "(%lu bytes).\n",
+ dentry->d_inode->i_ino,
+ (unsigned long) (dentry->d_inode->
+ i_size));
+
+ start = do_mmap_pgoff(mm->exe_file, 0,
+ dentry->d_inode->i_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_PRIVATE, 0, &populate);
+ if (start < 0) {
+ dprintk(
+ KERN_ERR "tf_get_current_process_hash"
+ "Hash: do_mmap failed (error %d)!\n",
+ (int) start);
+ dput(dentry);
+ result = -EFAULT;
+ goto out;
+ }
+
+ end = start + dentry->d_inode->i_size;
+
+ sha1_init(&sha1);
+ cur = start;
+ while (cur < end) {
+ unsigned long chunk;
+
+ chunk = end - cur;
+ if (chunk > PAGE_SIZE)
+ chunk = PAGE_SIZE;
+ if (copy_from_user(buffer, (const void *) cur,
+ chunk) != 0) {
+ dprintk(
+ KERN_ERR "tf_get_current_"
+ "process_hash: copy_from_user "
+ "failed!\n");
+ result = -EINVAL;
+ (void) do_munmap(mm, start,
+ dentry->d_inode->i_size);
+ dput(dentry);
+ goto out;
+ }
+ sha1_update(&sha1, buffer, chunk);
+ cur += chunk;
+ }
+ sha1_final(&sha1, hash);
+ result = 0;
+
+ (void) do_munmap(mm, start, dentry->d_inode->i_size);
+ dput(dentry);
+ }
+out:
+ up_read(&(mm->mmap_sem));
+
+ internal_kfree(buffer);
+
+ if (result == -ENOENT)
+ dprintk(
+ KERN_ERR "tf_get_current_process_hash: "
+ "No executable VMA found for process!\n");
+ return result;
+}
+
+#ifndef CONFIG_ANDROID
+/* This function hashes the path of the current application.
+ * If data = NULL ,nothing else is added to the hash
+ else add data to the hash
+ */
+int tf_hash_application_path_and_data(char *buffer, void *data,
+ u32 data_len)
+{
+ int result = -ENOENT;
+ char *tmp = NULL;
+ struct mm_struct *mm;
+
+ tmp = internal_kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (tmp == NULL) {
+ result = -ENOMEM;
+ goto end;
+ }
+
+ mm = current->mm;
+
+ down_read(&(mm->mmap_sem));
+ if (mm->exe_file) {
+ struct path *path;
+ char *endpath;
+ size_t pathlen;
+ struct sha1_ctx sha1;
+ u8 hash[SHA1_DIGEST_SIZE];
+
+ path = &mm->exe_file->f_path;
+
+ endpath = d_path(path, tmp, PAGE_SIZE);
+ if (IS_ERR(path)) {
+ result = PTR_ERR(endpath);
+ up_read(&(mm->mmap_sem));
+ goto end;
+ }
+ pathlen = (tmp + PAGE_SIZE) - endpath;
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+ {
+ char *c;
+ dprintk(KERN_DEBUG "current process path = ");
+ for (c = endpath;
+ c < tmp + PAGE_SIZE;
+ c++)
+ dprintk("%c", *c);
+
+ dprintk(", uid=%d, euid=%d\n", current_uid(),
+ current_euid());
+ }
+#endif /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+ sha1_init(&sha1);
+ sha1_update(&sha1, endpath, pathlen);
+ if (data != NULL) {
+ dprintk(KERN_INFO "current process path: "
+ "Hashing additional data\n");
+ sha1_update(&sha1, data, data_len);
+ }
+ sha1_final(&sha1, hash);
+ memcpy(buffer, hash, sizeof(hash));
+
+ result = 0;
+ }
+ up_read(&(mm->mmap_sem));
+
+end:
+ if (tmp != NULL)
+ internal_kfree(tmp);
+
+ return result;
+}
+#endif /* !CONFIG_ANDROID */
+
+void *internal_kmalloc(size_t size, int priority)
+{
+ void *ptr;
+ struct tf_device *dev = tf_get_device();
+
+ ptr = kmalloc(size, priority);
+
+ if (ptr != NULL)
+ atomic_inc(
+ &dev->stats.stat_memories_allocated);
+
+ return ptr;
+}
+
+void internal_kfree(void *ptr)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (ptr != NULL)
+ atomic_dec(
+ &dev->stats.stat_memories_allocated);
+ return kfree(ptr);
+}
+
+void internal_vunmap(void *ptr)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (ptr != NULL)
+ atomic_dec(
+ &dev->stats.stat_memories_allocated);
+
+ vunmap((void *) (((unsigned int)ptr) & 0xFFFFF000));
+}
+
+void *internal_vmalloc(size_t size)
+{
+ void *ptr;
+ struct tf_device *dev = tf_get_device();
+
+ ptr = vmalloc(size);
+
+ if (ptr != NULL)
+ atomic_inc(
+ &dev->stats.stat_memories_allocated);
+
+ return ptr;
+}
+
+void internal_vfree(void *ptr)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (ptr != NULL)
+ atomic_dec(
+ &dev->stats.stat_memories_allocated);
+ return vfree(ptr);
+}
+
+unsigned long internal_get_zeroed_page(int priority)
+{
+ unsigned long result;
+ struct tf_device *dev = tf_get_device();
+
+ result = get_zeroed_page(priority);
+
+ if (result != 0)
+ atomic_inc(&dev->stats.
+ stat_pages_allocated);
+
+ return result;
+}
+
+void internal_free_page(unsigned long addr)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (addr != 0)
+ atomic_dec(
+ &dev->stats.stat_pages_allocated);
+ return free_page(addr);
+}
+
+int internal_get_user_pages(
+ struct task_struct *tsk,
+ struct mm_struct *mm,
+ unsigned long start,
+ int len,
+ int write,
+ int force,
+ struct page **pages,
+ struct vm_area_struct **vmas)
+{
+ int result;
+ struct tf_device *dev = tf_get_device();
+
+ result = get_user_pages(
+ tsk,
+ mm,
+ start,
+ len,
+ write,
+ force,
+ pages,
+ vmas);
+
+ if (result > 0)
+ atomic_add(result,
+ &dev->stats.stat_pages_locked);
+
+ return result;
+}
+
+void internal_get_page(struct page *page)
+{
+ struct tf_device *dev = tf_get_device();
+
+ atomic_inc(&dev->stats.stat_pages_locked);
+
+ get_page(page);
+}
+
+void internal_page_cache_release(struct page *page)
+{
+ struct tf_device *dev = tf_get_device();
+
+ atomic_dec(&dev->stats.stat_pages_locked);
+
+ page_cache_release(page);
+}
diff --git a/security/tf_driver/tf_util.h b/security/tf_driver/tf_util.h
new file mode 100644
index 000000000000..3b124ed9ce95
--- /dev/null
+++ b/security/tf_driver/tf_util.h
@@ -0,0 +1,123 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * Copyright (C) 2011-2012 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_UTIL_H__
+#define __TF_UTIL_H__
+
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/crypto.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <asm/byteorder.h>
+
+#include "tf_protocol.h"
+#include "tf_defs.h"
+
+/*----------------------------------------------------------------------------
+ * Debug printing routines
+ *----------------------------------------------------------------------------*/
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+extern unsigned tf_debug_level;
+
+void address_cache_property(unsigned long va);
+
+#define dprintk(args...) ((void)(tf_debug_level >= 6 ? printk(args) : 0))
+#define dpr_info(args...) ((void)(tf_debug_level >= 3 ? pr_info(args) : 0))
+#define dpr_err(args...) ((void)(tf_debug_level >= 1 ? pr_err(args) : 0))
+#define INFO(fmt, args...) \
+ ((void)dprintk(KERN_INFO "%s: " fmt "\n", __func__, ## args))
+#define WARNING(fmt, args...) \
+ (tf_debug_level >= 3 ? \
+ printk(KERN_WARNING "%s: " fmt "\n", __func__, ## args) : \
+ (void)0)
+#define ERROR(fmt, args...) \
+ (tf_debug_level >= 1 ? \
+ printk(KERN_ERR "%s: " fmt "\n", __func__, ## args) : \
+ (void)0)
+void tf_trace_array(const char *fun, const char *msg,
+ const void *ptr, size_t len);
+#define TF_TRACE_ARRAY(ptr, len) \
+ (tf_debug_level >= 7 ? \
+ tf_trace_array(__func__, #ptr "/" #len, ptr, len) : \
+ 0)
+
+void tf_dump_l1_shared_buffer(struct tf_l1_shared_buffer *buffer);
+
+void tf_dump_command(union tf_command *command);
+
+void tf_dump_answer(union tf_answer *answer);
+
+#else /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+#define dprintk(args...) do { ; } while (0)
+#define dpr_info(args...) do { ; } while (0)
+#define dpr_err(args...) do { ; } while (0)
+#define INFO(fmt, args...) ((void)0)
+#define WARNING(fmt, args...) ((void)0)
+#define ERROR(fmt, args...) ((void)0)
+#define TF_TRACE_ARRAY(ptr, len) ((void)(ptr), (void)(len))
+#define tf_dump_l1_shared_buffer(buffer) ((void) 0)
+#define tf_dump_command(command) ((void) 0)
+#define tf_dump_answer(answer) ((void) 0)
+
+#endif /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+#define SHA1_DIGEST_SIZE 20
+
+/*----------------------------------------------------------------------------
+ * Process identification
+ *----------------------------------------------------------------------------*/
+
+int tf_get_current_process_hash(void *hash);
+
+#ifndef CONFIG_ANDROID
+int tf_hash_application_path_and_data(char *buffer, void *data, u32 data_len);
+#endif /* !CONFIG_ANDROID */
+
+/*----------------------------------------------------------------------------
+ * Statistic computation
+ *----------------------------------------------------------------------------*/
+
+void *internal_kmalloc(size_t size, int priority);
+void internal_kfree(void *ptr);
+void internal_vunmap(void *ptr);
+void *internal_vmalloc(size_t size);
+void internal_vfree(void *ptr);
+unsigned long internal_get_zeroed_page(int priority);
+void internal_free_page(unsigned long addr);
+int internal_get_user_pages(
+ struct task_struct *tsk,
+ struct mm_struct *mm,
+ unsigned long start,
+ int len,
+ int write,
+ int force,
+ struct page **pages,
+ struct vm_area_struct **vmas);
+void internal_get_page(struct page *page);
+void internal_page_cache_release(struct page *page);
+#endif /* __TF_UTIL_H__ */
diff --git a/security/tlk_driver/Kconfig b/security/tlk_driver/Kconfig
new file mode 100644
index 000000000000..5199be43dd20
--- /dev/null
+++ b/security/tlk_driver/Kconfig
@@ -0,0 +1,14 @@
+config TRUSTED_LITTLE_KERNEL
+ bool "Enable Open Trusted Execution driver"
+ select TEGRA_USE_SECURE_KERNEL
+ help
+ This option adds kernel support for communication with the
+ Trusted LK secure OS monitor/runtime support.
+ If you are unsure how to answer this question, answer N.
+
+config OTE_ENABLE_LOGGER
+ bool "Enable TLK logs in linux kmsg"
+ depends on TRUSTED_LITTLE_KERNEL
+ help
+ This option adds support in the kernel driver to read the logs
+ from the secure world and make them available as a part of kmsg.
diff --git a/security/tlk_driver/Makefile b/security/tlk_driver/Makefile
new file mode 100644
index 000000000000..86a293d2f009
--- /dev/null
+++ b/security/tlk_driver/Makefile
@@ -0,0 +1,30 @@
+#
+# Copyright (c) 2013 NVIDIA Corporation. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+plus_sec := $(call as-instr,.arch_extension sec,+sec)
+AFLAGS_ote_irq.o :=-Wa,-march=armv7-a$(plus_sec)
+CFLAGS_ote_comms.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
+CFLAGS_ote_fs.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
+
+tlk_driver-objs += ote_device.o
+tlk_driver-objs += ote_comms.o
+tlk_driver-objs += ote_fs.o
+tlk_driver-objs += ote_irq.o
+tlk_driver-objs += ote_log.o
+
+obj-$(CONFIG_TRUSTED_LITTLE_KERNEL) += tlk_driver.o
diff --git a/security/tlk_driver/ote_comms.c b/security/tlk_driver/ote_comms.c
new file mode 100644
index 000000000000..46d7fc679b78
--- /dev/null
+++ b/security/tlk_driver/ote_comms.c
@@ -0,0 +1,542 @@
+/*
+ * Copyright (c) 2012-2014 NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/printk.h>
+#include <linux/ioctl.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+
+#include "ote_protocol.h"
+
+bool verbose_smc;
+core_param(verbose_smc, verbose_smc, bool, 0644);
+
+#define SET_RESULT(req, r, ro) { req->result = r; req->result_origin = ro; }
+
+static struct te_shmem_desc *te_add_shmem_desc(void *buffer, size_t size,
+ struct tlk_context *context)
+{
+ struct te_shmem_desc *shmem_desc = NULL;
+ shmem_desc = kzalloc(sizeof(struct te_shmem_desc), GFP_KERNEL);
+ if (shmem_desc) {
+ INIT_LIST_HEAD(&(shmem_desc->list));
+ shmem_desc->buffer = buffer;
+ shmem_desc->size = size;
+ list_add_tail(&shmem_desc->list, &(context->shmem_alloc_list));
+ }
+
+ return shmem_desc;
+}
+
+static int te_pin_mem_buffers(void *buffer, size_t size,
+ struct tlk_context *context)
+{
+ struct te_shmem_desc *shmem_desc = NULL;
+ int ret = 0;
+
+ shmem_desc = te_add_shmem_desc(buffer, size, context);
+ if (!shmem_desc) {
+ pr_err("%s: te_add_shmem_desc Failed\n", __func__);
+ ret = OTE_ERROR_OUT_OF_MEMORY;
+ goto error;
+ }
+
+ return OTE_SUCCESS;
+error:
+ return ret;
+}
+
+static int te_setup_temp_buffers(struct te_request *request,
+ struct tlk_context *context)
+{
+ uint32_t i;
+ int ret = OTE_SUCCESS;
+ struct te_oper_param *params = request->params;
+
+ for (i = 0; i < request->params_size; i++) {
+ switch (params[i].type) {
+ case TE_PARAM_TYPE_NONE:
+ case TE_PARAM_TYPE_INT_RO:
+ case TE_PARAM_TYPE_INT_RW:
+ break;
+ case TE_PARAM_TYPE_MEM_RO:
+ case TE_PARAM_TYPE_MEM_RW:
+ ret = te_pin_mem_buffers(
+ params[i].u.Mem.base,
+ params[i].u.Mem.len,
+ context);
+ if (ret < 0) {
+ pr_err("%s failed with err (%d)\n",
+ __func__, ret);
+ ret = OTE_ERROR_BAD_PARAMETERS;
+ break;
+ }
+ break;
+ default:
+ pr_err("%s: OTE_ERROR_BAD_PARAMETERS\n", __func__);
+ ret = OTE_ERROR_BAD_PARAMETERS;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int te_setup_temp_buffers_compat(struct te_request_compat *request,
+ struct tlk_context *context)
+{
+ uint32_t i;
+ int ret = OTE_SUCCESS;
+ struct te_oper_param_compat *params;
+
+ params = (struct te_oper_param_compat *)(uintptr_t)request->params;
+ for (i = 0; i < request->params_size; i++) {
+ switch (params[i].type) {
+ case TE_PARAM_TYPE_NONE:
+ case TE_PARAM_TYPE_INT_RO:
+ case TE_PARAM_TYPE_INT_RW:
+ break;
+ case TE_PARAM_TYPE_MEM_RO:
+ case TE_PARAM_TYPE_MEM_RW:
+ ret = te_pin_mem_buffers(
+ (void *)(uintptr_t)params[i].u.Mem.base,
+ params[i].u.Mem.len,
+ context);
+ if (ret < 0) {
+ pr_err("%s failed with err (%d)\n",
+ __func__, ret);
+ ret = OTE_ERROR_BAD_PARAMETERS;
+ break;
+ }
+ break;
+ default:
+ pr_err("%s: OTE_ERROR_BAD_PARAMETERS\n", __func__);
+ ret = OTE_ERROR_BAD_PARAMETERS;
+ break;
+ }
+ }
+ return ret;
+}
+
+static void te_del_shmem_desc(void *buffer, struct tlk_context *context)
+{
+ struct te_shmem_desc *shmem_desc, *tmp_shmem_desc;
+
+ list_for_each_entry_safe(shmem_desc, tmp_shmem_desc,
+ &(context->shmem_alloc_list), list) {
+ if (shmem_desc->buffer == buffer) {
+ list_del(&shmem_desc->list);
+ kfree(shmem_desc);
+ }
+ }
+}
+
+/*
+ * Deregister previously initialized shared memory
+ */
+void te_unregister_memory(void *buffer,
+ struct tlk_context *context)
+{
+ if (!(list_empty(&(context->shmem_alloc_list))))
+ te_del_shmem_desc(buffer, context);
+ else
+ pr_err("No buffers to unpin\n");
+}
+
+static void te_unpin_temp_buffers(struct te_request *request,
+ struct tlk_context *context)
+{
+ uint32_t i;
+ struct te_oper_param *params = request->params;
+
+ for (i = 0; i < request->params_size; i++) {
+ switch (params[i].type) {
+ case TE_PARAM_TYPE_NONE:
+ case TE_PARAM_TYPE_INT_RO:
+ case TE_PARAM_TYPE_INT_RW:
+ break;
+ case TE_PARAM_TYPE_MEM_RO:
+ case TE_PARAM_TYPE_MEM_RW:
+ te_unregister_memory(params[i].u.Mem.base, context);
+ break;
+ default:
+ pr_err("%s: OTE_ERROR_BAD_PARAMETERS\n", __func__);
+ break;
+ }
+ }
+}
+
+static void te_unpin_temp_buffers_compat(struct te_request_compat *request,
+ struct tlk_context *context)
+{
+ uint32_t i;
+ struct te_oper_param_compat *params;
+
+ params = (struct te_oper_param_compat *)(uintptr_t)request->params;
+ for (i = 0; i < request->params_size; i++) {
+ switch (params[i].type) {
+ case TE_PARAM_TYPE_NONE:
+ case TE_PARAM_TYPE_INT_RO:
+ case TE_PARAM_TYPE_INT_RW:
+ break;
+ case TE_PARAM_TYPE_MEM_RO:
+ case TE_PARAM_TYPE_MEM_RW:
+ te_unregister_memory(
+ (void *)(uintptr_t)params[i].u.Mem.base,
+ context);
+ break;
+ default:
+ pr_err("%s: OTE_ERROR_BAD_PARAMETERS\n", __func__);
+ break;
+ }
+ }
+}
+
+#ifdef CONFIG_SMP
+cpumask_t saved_cpu_mask;
+static void switch_cpumask_to_cpu0(void)
+{
+ long ret;
+ cpumask_t local_cpu_mask = CPU_MASK_NONE;
+
+ cpu_set(0, local_cpu_mask);
+ cpumask_copy(&saved_cpu_mask, tsk_cpus_allowed(current));
+ ret = sched_setaffinity(0, &local_cpu_mask);
+ if (ret)
+ pr_err("sched_setaffinity #1 -> 0x%lX", ret);
+}
+
+static void restore_cpumask(void)
+{
+ long ret = sched_setaffinity(0, &saved_cpu_mask);
+ if (ret)
+ pr_err("sched_setaffinity #2 -> 0x%lX", ret);
+}
+#else
+static inline void switch_cpumask_to_cpu0(void) {};
+static inline void restore_cpumask(void) {};
+#endif
+
+static uint32_t _tlk_generic_smc(uint32_t arg0, uint32_t arg1, uint32_t arg2)
+{
+ register uint32_t r0 asm("r0") = arg0;
+ register uint32_t r1 asm("r1") = arg1;
+ register uint32_t r2 asm("r2") = arg2;
+
+ asm volatile(
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r0")
+ __asmeq("%2", "r1")
+ __asmeq("%3", "r2")
+#ifdef REQUIRES_SEC
+ ".arch_extension sec \n"
+#endif
+ "smc #0 @ switch to secure world\n"
+ : "=r" (r0)
+ : "r" (r0), "r" (r1), "r" (r2)
+ );
+
+ return r0;
+}
+
+uint32_t tlk_generic_smc(uint32_t arg0, uint32_t arg1, uint32_t arg2)
+{
+ uint32_t retval;
+
+ switch_cpumask_to_cpu0();
+
+ retval = _tlk_generic_smc(arg0, arg1, arg2);
+ while (retval == 0xFFFFFFFD)
+ retval = _tlk_generic_smc((60 << 24), 0, 0);
+
+ restore_cpumask();
+
+ return retval;
+}
+
+static uint32_t _tlk_extended_smc(uint32_t *regs)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)regs;
+
+ /* allows MAX_EXT_SMC_ARGS (r0-r11) to be passed in registers */
+ asm volatile(
+ __asmeq("%0", "r0")
+ "stmfd sp!, {r4-r12} @ save reg state\n"
+ "mov r12, r0 @ reg ptr to r12\n"
+ "ldmia r12, {r0-r11} @ load arg regs\n"
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
+ "smc #0 @ switch to secure world\n"
+ "ldmfd sp!, {r4-r12} @ restore saved regs\n"
+ : "=r" (r0)
+ : "r" (r0)
+ );
+
+ return r0;
+}
+
+uint32_t tlk_extended_smc(uint32_t *regs)
+{
+ uint32_t retval;
+
+ switch_cpumask_to_cpu0();
+
+ retval = _tlk_extended_smc(regs);
+ while (retval == 0xFFFFFFFD)
+ retval = _tlk_generic_smc((60 << 24), 0, 0);
+
+ restore_cpumask();
+
+ return retval;
+}
+
+/*
+ * Do an SMC call
+ */
+static void do_smc(struct te_request *request, struct tlk_device *dev)
+{
+ uint32_t smc_args;
+ uint32_t smc_params = 0;
+
+ if (dev->req_param_buf) {
+ smc_args = (char *)request - dev->req_param_buf;
+ if (request->params)
+ smc_params = (char *)request->params -
+ dev->req_param_buf;
+ } else {
+ smc_args = (uint32_t)virt_to_phys(request);
+ if (request->params)
+ smc_params = (uint32_t)virt_to_phys(request->params);
+ }
+
+ tlk_generic_smc(request->type, smc_args, smc_params);
+
+ /*
+ * Check to see if there are any logs in written by TLK.
+ * If there are, print them out.
+ */
+ ote_print_logs();
+}
+
+/*
+ * Do an SMC call
+ */
+static void do_smc_compat(struct te_request_compat *request,
+ struct tlk_device *dev)
+{
+ uint32_t smc_args;
+ uint32_t smc_params = 0;
+
+ smc_args = (char *)request - dev->req_param_buf;
+ if (request->params) {
+ smc_params =
+ (char *)(uintptr_t)request->params - dev->req_param_buf;
+ }
+
+ tlk_generic_smc(request->type, smc_args, smc_params);
+
+ /*
+ * Check to see if there are any logs in written by TLK.
+ * If there are, print them out.
+ */
+ ote_print_logs();
+}
+
+/*
+ * VPR programming SMC
+ */
+int te_set_vpr_params(void *vpr_base, size_t vpr_size)
+{
+ uint32_t retval;
+
+ /* Share the same lock used when request is send from user side */
+ mutex_lock(&smc_lock);
+
+ retval = tlk_generic_smc(TE_SMC_PROGRAM_VPR, (uint32_t)vpr_base,
+ vpr_size);
+
+ mutex_unlock(&smc_lock);
+
+ if (retval != OTE_SUCCESS) {
+ pr_err("te_set_vpr_params failed err (0x%x)\n", retval);
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(te_set_vpr_params);
+
+/*
+ * Open session SMC (supporting client-based te_open_session() calls)
+ */
+void te_open_session(struct te_opensession *cmd,
+ struct te_request *request,
+ struct tlk_context *context)
+{
+ int ret;
+
+ ret = te_setup_temp_buffers(request, context);
+ if (ret != OTE_SUCCESS) {
+ pr_err("te_setup_temp_buffers failed err (0x%x)\n", ret);
+ SET_RESULT(request, ret, OTE_RESULT_ORIGIN_API);
+ return;
+ }
+
+ memcpy(&request->dest_uuid,
+ &cmd->dest_uuid,
+ sizeof(struct te_service_id));
+
+ pr_info("OPEN_CLIENT_SESSION: 0x%x 0x%x 0x%x 0x%x\n",
+ request->dest_uuid[0],
+ request->dest_uuid[1],
+ request->dest_uuid[2],
+ request->dest_uuid[3]);
+
+ request->type = TE_SMC_OPEN_SESSION;
+
+ do_smc(request, context->dev);
+
+ te_unpin_temp_buffers(request, context);
+}
+
+/*
+ * Close session SMC (supporting client-based te_close_session() calls)
+ */
+void te_close_session(struct te_closesession *cmd,
+ struct te_request *request,
+ struct tlk_context *context)
+{
+ request->session_id = cmd->session_id;
+ request->type = TE_SMC_CLOSE_SESSION;
+
+ do_smc(request, context->dev);
+ if (request->result)
+ pr_info("Error closing session: %08x\n", request->result);
+}
+
+/*
+ * Launch operation SMC (supporting client-based te_launch_operation() calls)
+ */
+void te_launch_operation(struct te_launchop *cmd,
+ struct te_request *request,
+ struct tlk_context *context)
+{
+ int ret;
+
+ ret = te_setup_temp_buffers(request, context);
+ if (ret != OTE_SUCCESS) {
+ pr_err("te_setup_temp_buffers failed err (0x%x)\n", ret);
+ SET_RESULT(request, ret, OTE_RESULT_ORIGIN_API);
+ return;
+ }
+
+ request->session_id = cmd->session_id;
+ request->command_id = cmd->operation.command;
+ request->type = TE_SMC_LAUNCH_OPERATION;
+
+ do_smc(request, context->dev);
+
+ te_unpin_temp_buffers(request, context);
+}
+
+/*
+ * Open session SMC (supporting client-based te_open_session() calls)
+ */
+void te_open_session_compat(struct te_opensession_compat *cmd,
+ struct te_request_compat *request,
+ struct tlk_context *context)
+{
+ int ret;
+
+ ret = te_setup_temp_buffers_compat(request, context);
+ if (ret != OTE_SUCCESS) {
+ pr_err("te_setup_temp_buffers failed err (0x%x)\n", ret);
+ SET_RESULT(request, ret, OTE_RESULT_ORIGIN_API);
+ return;
+ }
+
+ memcpy(&request->dest_uuid,
+ &cmd->dest_uuid,
+ sizeof(struct te_service_id));
+
+ pr_info("OPEN_CLIENT_SESSION_COMPAT: 0x%x 0x%x 0x%x 0x%x\n",
+ request->dest_uuid[0],
+ request->dest_uuid[1],
+ request->dest_uuid[2],
+ request->dest_uuid[3]);
+
+ request->type = TE_SMC_OPEN_SESSION;
+
+ do_smc_compat(request, context->dev);
+
+ te_unpin_temp_buffers_compat(request, context);
+}
+
+/*
+ * Close session SMC (supporting client-based te_close_session() calls)
+ */
+void te_close_session_compat(struct te_closesession_compat *cmd,
+ struct te_request_compat *request,
+ struct tlk_context *context)
+{
+ request->session_id = cmd->session_id;
+ request->type = TE_SMC_CLOSE_SESSION;
+
+ do_smc_compat(request, context->dev);
+ if (request->result)
+ pr_info("Error closing session: %08x\n", request->result);
+}
+
+/*
+ * Launch operation SMC (supporting client-based te_launch_operation() calls)
+ */
+void te_launch_operation_compat(struct te_launchop_compat *cmd,
+ struct te_request_compat *request,
+ struct tlk_context *context)
+{
+ int ret;
+
+ ret = te_setup_temp_buffers_compat(request, context);
+ if (ret != OTE_SUCCESS) {
+ pr_err("te_setup_temp_buffers failed err (0x%x)\n", ret);
+ SET_RESULT(request, ret, OTE_RESULT_ORIGIN_API);
+ return;
+ }
+
+ request->session_id = cmd->session_id;
+ request->command_id = cmd->operation.command;
+ request->type = TE_SMC_LAUNCH_OPERATION;
+
+ do_smc_compat(request, context->dev);
+
+ te_unpin_temp_buffers_compat(request, context);
+}
+
+static int __init tlk_register_irq_handler(void)
+{
+ tlk_generic_smc(TE_SMC_REGISTER_IRQ_HANDLER,
+ (unsigned int)tlk_irq_handler, 0);
+ return 0;
+}
+
+arch_initcall(tlk_register_irq_handler);
diff --git a/security/tlk_driver/ote_device.c b/security/tlk_driver/ote_device.c
new file mode 100644
index 000000000000..170f46c06e14
--- /dev/null
+++ b/security/tlk_driver/ote_device.c
@@ -0,0 +1,769 @@
+/*
+ * Copyright (c) 2013-2014 NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/printk.h>
+#include <linux/ioctl.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+
+#include "ote_protocol.h"
+
+#define SET_ANSWER(a, r, ro) { a.result = r; a.result_origin = ro; }
+
+struct tlk_device tlk_dev;
+DEFINE_MUTEX(smc_lock);
+
+static int te_create_free_cmd_list(struct tlk_device *dev)
+{
+ int cmd_desc_count, ret = 0;
+ struct te_cmd_req_desc *req_desc;
+ struct te_cmd_req_desc_compat *req_desc_compat;
+ int bitmap_size;
+ bool use_reqbuf;
+
+ /*
+ * Check if new shared req/param register SMC is supported.
+ *
+ * If it is, TLK can map in the shared req/param buffers and do_smc
+ * only needs to send the offsets within each (with cache coherency
+ * being maintained by HW through an NS mapping).
+ *
+ * If the SMC support is not yet present, then fallback to the old
+ * mode of writing to an uncached buffer to maintain coherency (and
+ * phys addresses are passed in do_smc).
+ */
+ dev->req_param_buf = NULL;
+ use_reqbuf = !tlk_generic_smc(TE_SMC_REGISTER_REQ_BUF, 0, 0);
+
+ if (use_reqbuf) {
+ dev->req_param_buf = kmalloc((2 * PAGE_SIZE), GFP_KERNEL);
+
+ /* requests in the first page, params in the second */
+ dev->req_addr = (struct te_request *) dev->req_param_buf;
+ dev->param_addr = (struct te_oper_param *)
+ (dev->req_param_buf + PAGE_SIZE);
+
+ tlk_generic_smc(TE_SMC_REGISTER_REQ_BUF,
+ (uint32_t)dev->req_addr, (2 * PAGE_SIZE));
+ } else {
+ dev->req_addr = dma_alloc_coherent(NULL, PAGE_SIZE,
+ &dev->req_addr_phys, GFP_KERNEL);
+ dev->param_addr = dma_alloc_coherent(NULL, PAGE_SIZE,
+ &dev->param_addr_phys, GFP_KERNEL);
+ }
+
+ if ((dev->req_addr == NULL) || (dev->param_addr == NULL)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* requests in the first page, params in the second */
+ dev->req_addr_compat = (struct te_request_compat *)
+ dev->req_param_buf;
+ dev->param_addr_compat = (struct te_oper_param_compat *)
+ (dev->req_param_buf + PAGE_SIZE);
+
+ /* alloc param bitmap allocator */
+ bitmap_size = BITS_TO_LONGS(TE_PARAM_MAX) * sizeof(long);
+ dev->param_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+
+ for (cmd_desc_count = 0;
+ cmd_desc_count < TE_CMD_DESC_MAX; cmd_desc_count++) {
+
+ req_desc = kzalloc(sizeof(struct te_cmd_req_desc), GFP_KERNEL);
+ if (req_desc == NULL) {
+ pr_err("Failed to allocate cmd req descriptor\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+ req_desc->req_addr = dev->req_addr + cmd_desc_count;
+ INIT_LIST_HEAD(&(req_desc->list));
+
+ /* Add the cmd param descriptor to free list */
+ list_add_tail(&req_desc->list, &(dev->free_cmd_list));
+ }
+
+ for (cmd_desc_count = 0;
+ cmd_desc_count < TE_CMD_DESC_MAX_COMPAT; cmd_desc_count++) {
+
+ req_desc_compat = kzalloc(sizeof(struct te_cmd_req_desc_compat),
+ GFP_KERNEL);
+ if (req_desc_compat == NULL) {
+ pr_err("Failed to allocate cmd req descriptor\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+ req_desc_compat->req_addr =
+ dev->req_addr_compat + cmd_desc_count;
+ INIT_LIST_HEAD(&(req_desc_compat->list));
+
+ /* Add the cmd param descriptor to free list */
+ list_add_tail(&req_desc_compat->list, &(dev->free_cmd_list));
+ }
+
+error:
+ return ret;
+}
+
+static struct te_oper_param *te_get_free_params(struct tlk_device *dev,
+ unsigned int nparams)
+{
+ struct te_oper_param *params = NULL;
+ int idx, nbits;
+
+ if (nparams) {
+ nbits = get_count_order(nparams);
+ idx = bitmap_find_free_region(dev->param_bitmap,
+ TE_PARAM_MAX, nbits);
+ if (idx >= 0)
+ params = dev->param_addr + idx;
+ }
+ return params;
+}
+
+static void te_put_free_params(struct tlk_device *dev,
+ struct te_oper_param *params, uint32_t nparams)
+{
+ int idx, nbits;
+
+ idx = (params - dev->param_addr);
+ nbits = get_count_order(nparams);
+ bitmap_release_region(dev->param_bitmap, idx, nbits);
+}
+
+static struct te_oper_param_compat *
+ te_get_free_params_compat(struct tlk_device *dev, unsigned int nparams)
+{
+ struct te_oper_param_compat *params = NULL;
+ int idx, nbits;
+
+ if (nparams) {
+ nbits = get_count_order(nparams);
+ idx = bitmap_find_free_region(dev->param_bitmap,
+ TE_PARAM_MAX, nbits);
+ if (idx >= 0)
+ params = dev->param_addr_compat + idx;
+ }
+ return params;
+}
+
+static void te_put_free_params_compat(struct tlk_device *dev,
+ struct te_oper_param_compat *params, uint32_t nparams)
+{
+ int idx, nbits;
+
+ idx = (params - dev->param_addr_compat);
+ nbits = get_count_order(nparams);
+ bitmap_release_region(dev->param_bitmap, idx, nbits);
+}
+
+static struct te_cmd_req_desc *te_get_free_cmd_desc(struct tlk_device *dev)
+{
+ struct te_cmd_req_desc *cmd_desc = NULL;
+
+ if (!(list_empty(&(dev->free_cmd_list)))) {
+ cmd_desc = list_first_entry(&(dev->free_cmd_list),
+ struct te_cmd_req_desc, list);
+ list_del(&(cmd_desc->list));
+ list_add_tail(&cmd_desc->list, &(dev->used_cmd_list));
+ }
+ return cmd_desc;
+}
+
+static void te_put_used_cmd_desc(struct tlk_device *dev,
+ struct te_cmd_req_desc *cmd_desc)
+{
+ struct te_cmd_req_desc *param_desc, *tmp_param_desc;
+
+ if (cmd_desc) {
+ list_for_each_entry_safe(param_desc, tmp_param_desc,
+ &(dev->used_cmd_list), list) {
+ if (cmd_desc->req_addr == param_desc->req_addr) {
+ list_del(&param_desc->list);
+ list_add_tail(&param_desc->list,
+ &(dev->free_cmd_list));
+ }
+ }
+ }
+}
+
+static struct te_cmd_req_desc_compat *
+te_get_free_cmd_desc_compat(struct tlk_device *dev)
+{
+ struct te_cmd_req_desc_compat *cmd_desc = NULL;
+
+ if (!(list_empty(&(dev->free_cmd_list)))) {
+ cmd_desc = list_first_entry(&(dev->free_cmd_list),
+ struct te_cmd_req_desc_compat, list);
+ list_del(&(cmd_desc->list));
+ list_add_tail(&cmd_desc->list, &(dev->used_cmd_list));
+ }
+ return cmd_desc;
+}
+
+static void te_put_used_cmd_desc_compat(struct tlk_device *dev,
+ struct te_cmd_req_desc_compat *cmd_desc)
+{
+ struct te_cmd_req_desc_compat *param_desc, *tmp_param_desc;
+
+ if (cmd_desc) {
+ list_for_each_entry_safe(param_desc, tmp_param_desc,
+ &(dev->used_cmd_list), list) {
+ if (cmd_desc->req_addr == param_desc->req_addr) {
+ list_del(&param_desc->list);
+ list_add_tail(&param_desc->list,
+ &(dev->free_cmd_list));
+ }
+ }
+ }
+}
+
+static void __attribute__((unused)) te_print_cmd_list(
+ struct tlk_device *dev, int used_list)
+{
+ struct te_cmd_req_desc *param_desc;
+
+ if (!used_list) {
+ pr_info("Printing free cmd list\n");
+ if (!(list_empty(&(dev->free_cmd_list)))) {
+ list_for_each_entry(param_desc, &(dev->free_cmd_list),
+ list)
+ pr_info("Phys addr for cmd req desc (%p)\n",
+ param_desc->req_addr);
+ }
+ } else {
+ pr_info("Printing used cmd list\n");
+ if (!(list_empty(&(dev->used_cmd_list)))) {
+ list_for_each_entry(param_desc, &(dev->used_cmd_list),
+ list)
+ pr_info("Phys addr for cmd req desc (%p)\n",
+ param_desc->req_addr);
+ }
+ }
+}
+
+static int tlk_device_open(struct inode *inode, struct file *file)
+{
+ struct tlk_context *context;
+ int ret = 0;
+
+ context = kzalloc(sizeof(struct tlk_context), GFP_KERNEL);
+ if (!context) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ context->dev = &tlk_dev;
+ INIT_LIST_HEAD(&(context->shmem_alloc_list));
+
+ file->private_data = context;
+ return 0;
+error:
+ return ret;
+}
+
+static int tlk_device_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ file->private_data = NULL;
+ return 0;
+}
+
+static int copy_params_from_user(struct te_request *req,
+ struct te_operation *operation)
+{
+ struct te_oper_param *param_array;
+ struct te_oper_param *user_param;
+ uint32_t i;
+
+ if (operation->list_count == 0)
+ return 0;
+
+ param_array = req->params;
+ if (param_array == NULL) {
+ pr_err("param_array empty\n");
+ return 1;
+ }
+
+ user_param = operation->list_head;
+ for (i = 0; i < operation->list_count && user_param != NULL; i++) {
+ if (copy_from_user(param_array + i, user_param,
+ sizeof(struct te_oper_param))) {
+ pr_err("Failed to copy operation parameter:%d, %p, " \
+ "list_count: %d\n",
+ i, user_param, operation->list_count);
+ return 1;
+ }
+ user_param = param_array[i].next_ptr_user;
+ }
+ return 0;
+}
+
+static int copy_params_to_user(struct te_request *req,
+ struct te_operation *operation)
+{
+ struct te_oper_param *param_array;
+ struct te_oper_param *user_param;
+ uint32_t i;
+
+ if (operation->list_count == 0)
+ return 0;
+
+ param_array = req->params;
+ if (param_array == NULL) {
+ pr_err("param_array empty\n");
+ return 1;
+ }
+
+ user_param = operation->list_head;
+ for (i = 0; i < req->params_size; i++) {
+ if (copy_to_user(user_param, param_array + i,
+ sizeof(struct te_oper_param))) {
+ pr_err("Failed to copy back parameter:%d %p\n", i,
+ user_param);
+ return 1;
+ }
+ user_param = param_array[i].next_ptr_user;
+ }
+ return 0;
+}
+
+static long te_handle_trustedapp_ioctl(struct file *file,
+ unsigned int ioctl_num, unsigned long ioctl_param)
+{
+ long err = 0;
+ union te_cmd cmd;
+ void *ptr_user_answer = NULL;
+ struct te_operation *operation = NULL;
+ struct te_oper_param *params = NULL;
+ struct te_answer answer;
+ struct te_request *request;
+
+ struct te_cmd_req_desc *cmd_desc = NULL;
+ struct tlk_context *context = file->private_data;
+ struct tlk_device *dev = context->dev;
+
+ if (copy_from_user(&cmd, (void __user *)ioctl_param,
+ sizeof(union te_cmd))) {
+ pr_err("Failed to copy command request\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ memset(&answer, 0, sizeof(struct te_answer));
+
+ switch (ioctl_num) {
+ case TE_IOCTL_OPEN_CLIENT_SESSION:
+ operation = &cmd.opensession.operation;
+ ptr_user_answer = (void *)cmd.opensession.answer;
+
+ cmd_desc = te_get_free_cmd_desc(dev);
+ params = te_get_free_params(dev, operation->list_count);
+
+ if (!cmd_desc || (operation->list_count && !params)) {
+ SET_ANSWER(answer,
+ OTE_ERROR_OUT_OF_MEMORY,
+ OTE_RESULT_ORIGIN_COMMS);
+ pr_err("failed to get cmd_desc/params\n");
+ goto error;
+ }
+
+ request = cmd_desc->req_addr;
+ memset(request, 0, sizeof(struct te_request));
+
+ request->params = params;
+ request->params_size = operation->list_count;
+
+ if (copy_params_from_user(request, operation)) {
+ err = -EFAULT;
+ pr_info("failed to copy params from user\n");
+ goto error;
+ }
+
+ te_open_session(&cmd.opensession, request, context);
+
+ SET_ANSWER(answer, request->result, request->result_origin);
+ answer.session_id = request->session_id;
+ break;
+
+ case TE_IOCTL_CLOSE_CLIENT_SESSION:
+ ptr_user_answer = (void *)cmd.closesession.answer;
+ cmd_desc = te_get_free_cmd_desc(dev);
+ if (!cmd_desc) {
+ SET_ANSWER(answer,
+ OTE_ERROR_OUT_OF_MEMORY,
+ OTE_RESULT_ORIGIN_COMMS);
+ pr_err("failed to get cmd_desc\n");
+ goto error;
+ }
+
+ request = cmd_desc->req_addr;
+ memset(request, 0, sizeof(struct te_request));
+
+ /* close session cannot fail */
+ te_close_session(&cmd.closesession, request, context);
+ break;
+
+ case TE_IOCTL_LAUNCH_OPERATION:
+ operation = &cmd.launchop.operation;
+ ptr_user_answer = (void *)cmd.launchop.answer;
+
+ cmd_desc = te_get_free_cmd_desc(dev);
+ params = te_get_free_params(dev, operation->list_count);
+
+ if (!cmd_desc || (operation->list_count && !params)) {
+ SET_ANSWER(answer,
+ OTE_ERROR_OUT_OF_MEMORY,
+ OTE_RESULT_ORIGIN_COMMS);
+ pr_err("failed to get cmd_desc/params\n");
+ goto error;
+ }
+
+ request = cmd_desc->req_addr;
+ memset(request, 0, sizeof(struct te_request));
+
+ request->params = params;
+ request->params_size = operation->list_count;
+
+ if (copy_params_from_user(request, operation)) {
+ err = -EFAULT;
+ pr_info("failed to copy params from user\n");
+ goto error;
+ }
+
+ te_launch_operation(&cmd.launchop, request, context);
+
+ SET_ANSWER(answer, request->result, request->result_origin);
+ break;
+
+ default:
+ pr_err("Invalid IOCTL Cmd\n");
+ err = -EINVAL;
+ goto error;
+ }
+ if (ptr_user_answer && !err) {
+ if (copy_to_user(ptr_user_answer, &answer,
+ sizeof(struct te_answer))) {
+ pr_err("Failed to copy answer\n");
+ err = -EFAULT;
+ }
+ }
+ if (request->params && !err) {
+ if (copy_params_to_user(request, operation)) {
+ pr_err("Failed to copy return params\n");
+ err = -EFAULT;
+ }
+ }
+
+error:
+ if (cmd_desc)
+ te_put_used_cmd_desc(dev, cmd_desc);
+ if (params)
+ te_put_free_params(dev, params, operation->list_count);
+ return err;
+}
+
+static int copy_params_from_user_compat(struct te_request_compat *req,
+ struct te_operation_compat *operation)
+{
+ struct te_oper_param_compat *param_array;
+ struct te_oper_param_compat *user_param;
+ uint32_t i;
+
+ if (operation->list_count == 0)
+ return 0;
+
+ param_array = (struct te_oper_param_compat *)(uintptr_t)req->params;
+ if (param_array == NULL) {
+ pr_err("param_array empty\n");
+ return 1;
+ }
+
+ user_param = (struct te_oper_param_compat *)(uintptr_t)
+ operation->list_head;
+ for (i = 0; i < operation->list_count && user_param != NULL; i++) {
+ if (copy_from_user(param_array + i, user_param,
+ sizeof(struct te_oper_param_compat))) {
+ pr_err("Failed to copy operation parameter:%d, %p, " \
+ "list_count: %d\n",
+ i, user_param, operation->list_count);
+ return 1;
+ }
+ user_param = (struct te_oper_param_compat *)(uintptr_t)
+ param_array[i].next_ptr_user;
+ }
+ return 0;
+}
+
+static int copy_params_to_user_compat(struct te_request_compat *req,
+ struct te_operation_compat *operation)
+{
+ struct te_oper_param_compat *param_array;
+ struct te_oper_param_compat *user_param;
+ uint32_t i;
+
+ if (operation->list_count == 0)
+ return 0;
+
+ param_array =
+ (struct te_oper_param_compat *)(uintptr_t)req->params;
+ if (param_array == NULL) {
+ pr_err("param_array empty\n");
+ return 1;
+ }
+
+ user_param =
+ (struct te_oper_param_compat *)(uintptr_t)operation->list_head;
+ for (i = 0; i < req->params_size; i++) {
+ if (copy_to_user(user_param, param_array + i,
+ sizeof(struct te_oper_param_compat))) {
+ pr_err("Failed to copy back parameter:%d %p\n", i,
+ user_param);
+ return 1;
+ }
+ user_param = (struct te_oper_param_compat *)(uintptr_t)
+ param_array[i].next_ptr_user;
+ }
+ return 0;
+}
+
+static long te_handle_trustedapp_ioctl_compat(struct file *file,
+ unsigned int ioctl_num, unsigned long ioctl_param)
+{
+ long err = 0;
+ union te_cmd_compat cmd_compat;
+ struct te_operation_compat *operation = NULL;
+ struct te_oper_param_compat *params = NULL;
+ struct te_request_compat *request;
+ void __user *ptr_user_answer = NULL;
+ struct te_answer answer;
+ struct te_cmd_req_desc_compat *cmd_desc = NULL;
+ struct tlk_context *context = file->private_data;
+ struct tlk_device *dev = context->dev;
+
+ if (copy_from_user(&cmd_compat, (void __user *)ioctl_param,
+ sizeof(union te_cmd_compat))) {
+ pr_err("Failed to copy command request\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ memset(&answer, 0, sizeof(struct te_answer));
+
+ switch (ioctl_num) {
+ case TE_IOCTL_OPEN_CLIENT_SESSION_COMPAT:
+ operation = &cmd_compat.opensession.operation;
+ ptr_user_answer = (void *)(uintptr_t)
+ cmd_compat.opensession.answer;
+
+ cmd_desc = te_get_free_cmd_desc_compat(dev);
+ params = te_get_free_params_compat(dev, operation->list_count);
+
+ if (!cmd_desc || (operation->list_count && !params)) {
+ SET_ANSWER(answer,
+ OTE_ERROR_OUT_OF_MEMORY,
+ OTE_RESULT_ORIGIN_COMMS);
+ pr_err("failed to get cmd_desc/params\n");
+ goto error;
+ }
+
+ request = cmd_desc->req_addr;
+ memset(request, 0, sizeof(struct te_request_compat));
+
+ request->params = (uintptr_t)params;
+ request->params_size = operation->list_count;
+
+ if (copy_params_from_user_compat(request, operation)) {
+ err = -EFAULT;
+ pr_info("failed to copy params from user\n");
+ goto error;
+ }
+
+ te_open_session_compat(&cmd_compat.opensession,
+ request, context);
+
+ SET_ANSWER(answer, request->result, request->result_origin);
+ answer.session_id = request->session_id;
+ break;
+
+ case TE_IOCTL_CLOSE_CLIENT_SESSION_COMPAT:
+ ptr_user_answer = (void *)(uintptr_t)
+ cmd_compat.closesession.answer;
+ cmd_desc = te_get_free_cmd_desc_compat(dev);
+ if (!cmd_desc) {
+ SET_ANSWER(answer,
+ OTE_ERROR_OUT_OF_MEMORY,
+ OTE_RESULT_ORIGIN_COMMS);
+ pr_err("failed to get cmd_desc\n");
+ goto error;
+ }
+
+ request = cmd_desc->req_addr;
+ memset(request, 0, sizeof(struct te_request_compat));
+
+ /* close session cannot fail */
+ te_close_session_compat(&cmd_compat.closesession,
+ request, context);
+ break;
+
+ case TE_IOCTL_LAUNCH_OPERATION_COMPAT:
+ operation = &cmd_compat.launchop.operation;
+ ptr_user_answer = (void *)(uintptr_t)cmd_compat.launchop.answer;
+
+ cmd_desc = te_get_free_cmd_desc_compat(dev);
+ params = te_get_free_params_compat(dev, operation->list_count);
+
+ if (!cmd_desc || (operation->list_count && !params)) {
+ SET_ANSWER(answer,
+ OTE_ERROR_OUT_OF_MEMORY,
+ OTE_RESULT_ORIGIN_COMMS);
+ pr_err("failed to get cmd_desc/params\n");
+ goto error;
+ }
+
+ request = cmd_desc->req_addr;
+ memset(request, 0, sizeof(struct te_request_compat));
+
+ request->params = (uintptr_t)params;
+ request->params_size = operation->list_count;
+
+ if (copy_params_from_user_compat(request, operation)) {
+ err = -EFAULT;
+ pr_info("failed to copy params from user\n");
+ goto error;
+ }
+
+ te_launch_operation_compat(&cmd_compat.launchop,
+ request, context);
+
+ SET_ANSWER(answer, request->result, request->result_origin);
+ break;
+
+ default:
+ pr_err("Invalid IOCTL Cmd\n");
+ err = -EINVAL;
+ goto error;
+ }
+ if (ptr_user_answer && !err) {
+ if (copy_to_user(ptr_user_answer, &answer,
+ sizeof(struct te_answer))) {
+ pr_err("Failed to copy answer\n");
+ err = -EFAULT;
+ }
+ }
+ if (request->params && !err) {
+ if (copy_params_to_user_compat(request, operation)) {
+ pr_err("Failed to copy return params\n");
+ err = -EFAULT;
+ }
+ }
+
+error:
+ if (cmd_desc)
+ te_put_used_cmd_desc_compat(dev, cmd_desc);
+ if (params)
+ te_put_free_params_compat(dev, params, operation->list_count);
+ return err;
+}
+
+static long tlk_device_ioctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ int err;
+
+ switch (ioctl_num) {
+ case TE_IOCTL_OPEN_CLIENT_SESSION:
+ case TE_IOCTL_CLOSE_CLIENT_SESSION:
+ case TE_IOCTL_LAUNCH_OPERATION:
+ mutex_lock(&smc_lock);
+ err = te_handle_trustedapp_ioctl(file, ioctl_num, ioctl_param);
+ mutex_unlock(&smc_lock);
+ break;
+
+ case TE_IOCTL_OPEN_CLIENT_SESSION_COMPAT:
+ case TE_IOCTL_CLOSE_CLIENT_SESSION_COMPAT:
+ case TE_IOCTL_LAUNCH_OPERATION_COMPAT:
+ mutex_lock(&smc_lock);
+ err = te_handle_trustedapp_ioctl_compat(file, ioctl_num,
+ ioctl_param);
+ mutex_unlock(&smc_lock);
+ break;
+
+ case TE_IOCTL_FILE_NEW_REQ:
+ case TE_IOCTL_FILE_FILL_BUF:
+ case TE_IOCTL_FILE_REQ_COMPLETE:
+ err = te_handle_fs_ioctl(file, ioctl_num, ioctl_param);
+ break;
+
+ case TE_IOCTL_SS_NEW_REQ:
+ case TE_IOCTL_SS_REQ_COMPLETE:
+ err = te_handle_ss_ioctl(file, ioctl_num, ioctl_param);
+ break;
+
+ default:
+ pr_err("%s: Invalid IOCTL (0x%x) id 0x%x max 0x%x\n", __func__,
+ ioctl_num, _IOC_NR(ioctl_num), TE_IOCTL_MAX_NR);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * tlk_driver function definitions.
+ */
+static const struct file_operations tlk_device_fops = {
+ .owner = THIS_MODULE,
+ .open = tlk_device_open,
+ .release = tlk_device_release,
+ .unlocked_ioctl = tlk_device_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = tlk_device_ioctl,
+#endif
+};
+
+struct miscdevice tlk_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "tlk_device",
+ .fops = &tlk_device_fops,
+};
+
+static int __init tlk_init(void)
+{
+ int ret;
+
+ INIT_LIST_HEAD(&(tlk_dev.used_cmd_list));
+ INIT_LIST_HEAD(&(tlk_dev.free_cmd_list));
+
+ ret = te_create_free_cmd_list(&tlk_dev);
+ if (ret != 0)
+ return ret;
+
+ return misc_register(&tlk_misc_device);
+}
+
+module_init(tlk_init);
diff --git a/security/tlk_driver/ote_fs.c b/security/tlk_driver/ote_fs.c
new file mode 100644
index 000000000000..d398bebed9d2
--- /dev/null
+++ b/security/tlk_driver/ote_fs.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright (c) 2013-2014 NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/uaccess.h>
+#include <linux/dma-mapping.h>
+
+#include "ote_protocol.h"
+
+#define TE_SHMEM_FNAME_SZ SZ_64
+#define TE_SHMEM_DATA_SZ SZ_128K
+
+struct te_file_req_shmem {
+ char file_name[TE_SHMEM_FNAME_SZ];
+ char file_data[TE_SHMEM_DATA_SZ];
+};
+
+struct te_file_req_node {
+ struct list_head node;
+ struct te_file_req *req;
+};
+
+static struct list_head req_list;
+static DECLARE_COMPLETION(req_ready);
+static DECLARE_COMPLETION(req_complete);
+static unsigned long secure_error;
+
+static struct te_ss_op *ss_op_shmem;
+static uint32_t ss_op_size;
+
+static void indicate_complete(unsigned long ret)
+{
+ tlk_generic_smc(TE_SMC_FS_OP_DONE, ret, 0);
+}
+
+static void indicate_ss_op_complete(void)
+{
+ tlk_generic_smc(TE_SMC_SS_REQ_COMPLETE, 0, 0);
+}
+
+int te_handle_fs_ioctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ struct te_file_req new_req, *ptr_user_req = NULL;
+ struct te_file_req_node *req_node;
+
+ switch (ioctl_num) {
+ case TE_IOCTL_FILE_NEW_REQ: /* new request */
+
+ ptr_user_req = (struct te_file_req *)ioctl_param;
+
+ /* wait for a new request */
+ if (wait_for_completion_interruptible(&req_ready)) {
+ return -ENODATA;
+ }
+
+ /* dequeue new request from the secure world */
+ req_node = list_first_entry(&req_list, struct te_file_req_node,
+ node);
+
+ /* populate request for the non-secure client */
+ if (req_node) {
+ if (copy_to_user(ptr_user_req, req_node->req,
+ sizeof(struct te_file_req))) {
+ pr_err("copy_to_user failed for new request\n");
+ return -EFAULT;
+ }
+
+ list_del(&req_node->node);
+ kfree(req_node);
+ } else {
+ pr_err("no request available\n");
+ return -ENOMEM;
+ }
+
+ break;
+
+ case TE_IOCTL_FILE_FILL_BUF: /* pass data to be written to the file */
+
+ if (copy_from_user(&new_req, (void __user *)ioctl_param,
+ sizeof(struct te_file_req))) {
+ pr_err("copy_from_user failed for request\n");
+ return -EFAULT;
+ }
+
+ if (new_req.type != OTE_FILE_REQ_WRITE)
+ return -EINVAL;
+
+ if (!new_req.kern_data_buf || !new_req.user_data_buf)
+ return -EINVAL;
+
+ if (copy_to_user(new_req.user_data_buf, new_req.kern_data_buf,
+ new_req.data_len)) {
+ pr_err("copy_to_user failed for fill buffer\n");
+ return -EFAULT;
+ }
+ break;
+
+ case TE_IOCTL_FILE_REQ_COMPLETE: /* request complete */
+
+ if (copy_from_user(&new_req, (void __user *)ioctl_param,
+ sizeof(struct te_file_req))) {
+ pr_err("copy_from_user failed for request\n");
+ return -EFAULT;
+ }
+
+ if (new_req.type == OTE_FILE_REQ_READ && !new_req.error) {
+ if (copy_from_user(new_req.kern_data_buf,
+ (void __user *)new_req.user_data_buf,
+ new_req.data_len)) {
+ pr_err("copy_from_user failed for request\n");
+ return -EFAULT;
+ }
+ }
+
+ /* get error code */
+ secure_error = (new_req.error) ? OTE_ERROR_NO_DATA
+ : new_req.result;
+
+ /* signal the producer */
+ complete(&req_complete);
+ break;
+
+ default:
+ pr_err("copy_from_user failed for request\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void _te_fs_file_operation(const char *name, void *buf, int len,
+ enum te_file_req_type type)
+{
+ struct te_file_req *new_req;
+ struct te_file_req_node *req_node;
+
+ BUG_ON(!name);
+
+ if (type == OTE_FILE_REQ_READ || type == OTE_FILE_REQ_WRITE)
+ BUG_ON(!buf);
+
+ /* allocate te_file_req structure */
+ new_req = kzalloc(sizeof(struct te_file_req), GFP_KERNEL);
+ BUG_ON(!new_req);
+
+ /* prepare a new request */
+ strncpy(new_req->name, name, strlen(name));
+ new_req->type = type;
+ new_req->data_len = len;
+ new_req->result = 0;
+ new_req->kern_data_buf = buf;
+ new_req->error = 0;
+
+ req_node = kzalloc(sizeof(struct te_file_req_node), GFP_KERNEL);
+ BUG_ON(!req_node);
+
+ req_node->req = new_req;
+ INIT_LIST_HEAD(&req_node->node);
+
+ /* add it to the pending queue and signal the consumer */
+ list_add_tail(&req_list, &req_node->node);
+ complete(&req_ready);
+
+ /* wait for the consumer's signal */
+ wait_for_completion(&req_complete);
+
+ kfree(new_req);
+
+ /* signal completion to the secure world */
+ indicate_complete(secure_error);
+}
+
+void tlk_fread(const char *name, void *buf, int len)
+{
+ if (!buf)
+ _te_fs_file_operation(name, buf, len, OTE_FILE_REQ_SIZE);
+ else
+ _te_fs_file_operation(name, buf, len, OTE_FILE_REQ_READ);
+}
+
+void tlk_fwrite(const char *name, void *buf, int len)
+{
+ _te_fs_file_operation(name, buf, len, OTE_FILE_REQ_WRITE);
+}
+
+void tlk_fdelete(const char *name)
+{
+ _te_fs_file_operation(name, NULL, 0, OTE_FILE_REQ_DELETE);
+}
+
+int te_handle_ss_ioctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ switch (ioctl_num) {
+ case TE_IOCTL_SS_NEW_REQ:
+ /* wait for a new request */
+ if (wait_for_completion_interruptible(&req_ready))
+ return -ENODATA;
+
+ /* transfer pending request to daemon's buffer */
+ if (copy_to_user((void __user *)ioctl_param, ss_op_shmem,
+ ss_op_size)) {
+ pr_err("copy_to_user failed for new request\n");
+ return -EFAULT;
+ }
+ break;
+
+ case TE_IOCTL_SS_REQ_COMPLETE: /* request complete */
+ if (copy_from_user(ss_op_shmem, (void __user *)ioctl_param,
+ ss_op_size)) {
+ pr_err("copy_from_user failed for request\n");
+ return -EFAULT;
+ }
+
+ /* signal the producer */
+ complete(&req_complete);
+ break;
+ }
+
+ return 0;
+}
+
+void tlk_ss_op(uint32_t size)
+{
+ /* store size of request */
+ ss_op_size = size;
+
+ /* signal consumer */
+ complete(&req_ready);
+
+ /* wait for the consumer's signal */
+ wait_for_completion(&req_complete);
+
+ /* signal completion to the secure world */
+ indicate_ss_op_complete();
+}
+
+static int tlk_fs_register_handlers(void)
+{
+ struct te_file_req_shmem *shmem_ptr;
+ uint32_t smc_args[MAX_EXT_SMC_ARGS];
+ dma_addr_t shmem_dma;
+
+ shmem_ptr = dma_alloc_coherent(NULL, sizeof(struct te_file_req_shmem),
+ &shmem_dma, GFP_KERNEL);
+ if (!shmem_ptr) {
+ pr_err("%s: no memory available for fs operations\n", __func__);
+ return -ENOMEM;
+ }
+
+ memset(shmem_ptr, 0, sizeof(struct te_file_req_shmem));
+
+ INIT_LIST_HEAD(&req_list);
+ init_completion(&req_ready);
+ init_completion(&req_complete);
+
+ smc_args[0] = TE_SMC_REGISTER_FS_HANDLERS;
+ smc_args[1] = (uint32_t)tlk_fread;
+ smc_args[2] = (uint32_t)tlk_fwrite;
+ smc_args[3] = (uint32_t)tlk_fdelete;
+ smc_args[4] = (uint32_t)shmem_ptr->file_name;
+ smc_args[5] = (uint32_t)shmem_ptr->file_data;
+
+ tlk_extended_smc(smc_args);
+
+ return 0;
+}
+
+static int __init tlk_ss_init(void)
+{
+ dma_addr_t ss_op_shmem_dma;
+
+ /* register legacy support */
+ tlk_fs_register_handlers();
+
+ /* allocate shared memory buffer */
+ ss_op_shmem = dma_alloc_coherent(NULL, sizeof(struct te_ss_op),
+ &ss_op_shmem_dma, GFP_KERNEL);
+ if (!ss_op_shmem) {
+ pr_err("%s: no memory available for fs operations\n", __func__);
+ return -ENOMEM;
+ }
+
+ tlk_generic_smc(TE_SMC_SS_REGISTER_HANDLER,
+ (uint32_t)tlk_ss_op, (uint32_t)ss_op_shmem);
+
+ return 0;
+}
+
+arch_initcall(tlk_ss_init);
diff --git a/security/tlk_driver/ote_irq.S b/security/tlk_driver/ote_irq.S
new file mode 100644
index 000000000000..3a4ca6875801
--- /dev/null
+++ b/security/tlk_driver/ote_irq.S
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+ENTRY(tlk_irq_handler)
+ movw r0, #0x5
+ movt r0, #0x3200 @ TE_SMC_NS_IRQ_DONE
+ mov r1, #0
+ mov r2, #0
+ smc #0
+ENDPROC(tlk_irq_handler)
diff --git a/security/tlk_driver/ote_log.c b/security/tlk_driver/ote_log.c
new file mode 100644
index 000000000000..0cd4412d1470
--- /dev/null
+++ b/security/tlk_driver/ote_log.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2013-2014 NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/uaccess.h>
+
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
+#include <linux/string.h>
+
+#include "ote_protocol.h"
+
+#define LOGBUF_SIZE 8192
+
+struct circular_buffer {
+ uint32_t size; /* Indicates the total size of the buffer */
+ uint32_t start; /* Starting point of valid data in buffer */
+ uint32_t end; /* First character which is empty (can be written to) */
+ uint32_t overflow; /* Indicator whether buffer has overwritten itself */
+ char *buf;
+};
+
+#if defined(CONFIG_OTE_ENABLE_LOGGER)
+
+static int ote_logging_enabled;
+struct circular_buffer *cb;
+
+/*
+ * Initialize the shared buffer for TLK logging.
+ * The shared buffer is allocated in DMA memory to get uncached memory
+ * since TLK directly writes to the physical address of the shared buffer.
+ * The structure is declared in DMA memory too since it's members will
+ * also be updated by the TLK directly to their physical addresses.
+ */
+static int circ_buf_init(struct circular_buffer **cbptr)
+{
+
+ dma_addr_t tp;
+
+ *cbptr = (struct circular_buffer *) dma_alloc_coherent(NULL,
+ sizeof(struct circular_buffer), &tp, GFP_KERNEL);
+ if (!*cbptr) {
+ pr_err("%s: no memory avaiable for circular buffer struct\n",
+ __func__);
+ return -ENOMEM;
+ }
+ memset(*cbptr, 0, sizeof(struct circular_buffer));
+
+ (*cbptr)->start = 0;
+ (*cbptr)->end = 0;
+ (*cbptr)->size = LOGBUF_SIZE;
+
+ (*cbptr)->buf = (char *) dma_alloc_coherent(NULL, LOGBUF_SIZE,
+ &tp, GFP_KERNEL);
+ if (!(*cbptr)->buf) {
+ pr_err("%s: no memory avaiable for shared buffer\n",
+ __func__);
+ /* Frees the memory allocated using dma_alloc_coherent */
+ dma_free_coherent(NULL,
+ sizeof(struct circular_buffer), cbptr, tp);
+ return -ENOMEM;
+ }
+ memset((*cbptr)->buf, 0, LOGBUF_SIZE);
+
+ (*cbptr)->overflow = 0;
+
+ return 0;
+}
+
+/*
+ * Copy the contents of the circular buffer into a char buffer in order.
+ * This helps to treat the buffer like a string and use it to tokenize it
+ * into lines, tag and display it.
+ */
+static int circ_buf_copy(struct circular_buffer *cb, char *text)
+{
+ if (cb->end == cb->start)
+ return 0;
+
+ if (cb->end > cb->start) {
+ if (abs(cb->end - cb->start) > LOGBUF_SIZE) {
+ pr_err("%s: cbuf pointers corrupted\n", __func__);
+ return -EINVAL;
+ }
+
+ memcpy(text, cb->buf + cb->start, cb->end - cb->start);
+
+ } else if (cb->start > cb->end) {
+ if (abs(cb->end - cb->start) > LOGBUF_SIZE) {
+ pr_err("%s: cbuf pointers corrupted\n", __func__);
+ return -EINVAL;
+ }
+
+ memcpy(text, cb->buf + cb->start, cb->size - cb->start);
+ memcpy(text + cb->size - cb->start, cb->buf, cb->end);
+
+ }
+
+ return 0;
+}
+
+/*
+ * Function which prints TLK logs.
+ * Tokenizes the TLK logs into lines, tags each line
+ * and prints it out to kmsg file.
+ */
+void ote_print_logs(void)
+{
+ char *text = NULL;
+ char *temp = NULL;
+
+ if (!ote_logging_enabled)
+ return;
+
+ text = kzalloc(LOGBUF_SIZE, GFP_KERNEL);
+ BUG_ON(!text);
+
+ /* This detects if the buffer proved to be too small to hold the data.
+ * If buffer is not large enough, it overwrites it's oldest data,
+ * This warning serves to alert the user to possibly use a bigger buffer
+ */
+ if (cb->overflow == 1) {
+ pr_info("\n[TLK] **WARNING** TLK buffer overwritten.\n\n");
+ cb->overflow = 0;
+ }
+
+ if (circ_buf_copy(cb, text) != 0) {
+ kfree(text);
+ return;
+ }
+ cb->buf[cb->end] = '\0';
+
+ temp = strsep(&text, "\n");
+ while (temp != NULL) {
+ if (strnlen(temp, LOGBUF_SIZE))
+ pr_info("[TLK] %s\n", temp);
+ temp = strsep(&text, "\n");
+ }
+
+ /* Indicate that buffer is empty */
+ cb->start = cb->end;
+ kfree(text);
+}
+#else
+void ote_print_logs(void) {}
+#endif
+
+/*
+ * Call function to initialize circular buffer.
+ * An SMC is made to send the virtual address of the structure to
+ * the secure OS.
+ */
+static int __init ote_logger_init(void)
+{
+ uint32_t smc_args[MAX_EXT_SMC_ARGS];
+
+#if defined(CONFIG_OTE_ENABLE_LOGGER)
+ if (circ_buf_init(&cb) != 0)
+ return -1;
+
+ smc_args[0] = TE_SMC_INIT_LOGGER;
+ smc_args[1] = (uint32_t)cb;
+ tlk_generic_smc(smc_args[0], smc_args[1], 0);
+
+ ote_logging_enabled = 1;
+ ote_print_logs();
+#else
+ smc_args[0] = TE_SMC_INIT_LOGGER;
+ smc_args[1] = 0;
+ tlk_generic_smc(smc_args[0], smc_args[1], 0);
+#endif
+
+ return 0;
+}
+
+arch_initcall(ote_logger_init);
diff --git a/security/tlk_driver/ote_protocol.h b/security/tlk_driver/ote_protocol.h
new file mode 100644
index 000000000000..66228e4a9eaa
--- /dev/null
+++ b/security/tlk_driver/ote_protocol.h
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2013-2014 NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __OTE_PROTOCOL_H__
+#define __OTE_PROTOCOL_H__
+
+#include "ote_types.h"
+
+#define TE_IOCTL_MAGIC_NUMBER ('t')
+#define TE_IOCTL_OPEN_CLIENT_SESSION \
+ _IOWR(TE_IOCTL_MAGIC_NUMBER, 0x10, union te_cmd)
+#define TE_IOCTL_CLOSE_CLIENT_SESSION \
+ _IOWR(TE_IOCTL_MAGIC_NUMBER, 0x11, union te_cmd)
+#define TE_IOCTL_LAUNCH_OPERATION \
+ _IOWR(TE_IOCTL_MAGIC_NUMBER, 0x14, union te_cmd)
+
+/* ioctls using new structs (eventually to replace current ioctls) */
+#define TE_IOCTL_OPEN_CLIENT_SESSION_COMPAT \
+ _IOWR(TE_IOCTL_MAGIC_NUMBER, 0x10, union te_cmd_compat)
+#define TE_IOCTL_CLOSE_CLIENT_SESSION_COMPAT \
+ _IOWR(TE_IOCTL_MAGIC_NUMBER, 0x11, union te_cmd_compat)
+#define TE_IOCTL_LAUNCH_OPERATION_COMPAT \
+ _IOWR(TE_IOCTL_MAGIC_NUMBER, 0x14, union te_cmd_compat)
+
+#define TE_IOCTL_FILE_NEW_REQ \
+ _IOR(TE_IOCTL_MAGIC_NUMBER, 0x16, struct te_file_req)
+#define TE_IOCTL_FILE_FILL_BUF \
+ _IOR(TE_IOCTL_MAGIC_NUMBER, 0x17, struct te_file_req)
+#define TE_IOCTL_FILE_REQ_COMPLETE \
+ _IOWR(TE_IOCTL_MAGIC_NUMBER, 0x18, struct te_file_req)
+#define TE_IOCTL_SS_NEW_REQ \
+ _IOR(TE_IOCTL_MAGIC_NUMBER, 0x20, struct te_ss_op)
+#define TE_IOCTL_SS_REQ_COMPLETE \
+ _IOWR(TE_IOCTL_MAGIC_NUMBER, 0x21, struct te_ss_op)
+
+#define TE_IOCTL_MIN_NR _IOC_NR(TE_IOCTL_OPEN_CLIENT_SESSION)
+#define TE_IOCTL_MAX_NR _IOC_NR(TE_IOCTL_SS_REQ_COMPLETE)
+
+/* shared buffer is 2 pages: 1st are requests, 2nd are params */
+#define TE_CMD_DESC_MAX (PAGE_SIZE / sizeof(struct te_request))
+#define TE_PARAM_MAX (PAGE_SIZE / sizeof(struct te_oper_param))
+
+#define TE_CMD_DESC_MAX_COMPAT \
+ (PAGE_SIZE / sizeof(struct te_request_compat))
+#define TE_PARAM_MAX_COMPAT \
+ (PAGE_SIZE / sizeof(struct te_oper_param_compat))
+
+#define MAX_EXT_SMC_ARGS 12
+
+extern struct mutex smc_lock;
+
+uint32_t tlk_generic_smc(uint32_t arg0, uint32_t arg1, uint32_t arg2);
+uint32_t tlk_extended_smc(uint32_t *args);
+void tlk_irq_handler(void);
+
+struct tlk_device {
+ struct te_request *req_addr;
+ dma_addr_t req_addr_phys;
+ struct te_oper_param *param_addr;
+ dma_addr_t param_addr_phys;
+
+ struct te_request_compat *req_addr_compat;
+ struct te_oper_param_compat *param_addr_compat;
+
+ char *req_param_buf;
+
+ unsigned long *param_bitmap;
+
+ struct list_head used_cmd_list;
+ struct list_head free_cmd_list;
+};
+
+struct te_cmd_req_desc {
+ struct te_request *req_addr;
+ struct list_head list;
+};
+
+struct te_cmd_req_desc_compat {
+ struct te_request_compat *req_addr;
+ struct list_head list;
+};
+
+struct te_shmem_desc {
+ struct list_head list;
+ void *buffer;
+ size_t size;
+ unsigned int mem_type;
+};
+
+struct tlk_context {
+ struct tlk_device *dev;
+ struct list_head shmem_alloc_list;
+};
+
+enum {
+ /* Trusted Application Calls */
+ TE_SMC_OPEN_SESSION = 0x30000001,
+ TE_SMC_CLOSE_SESSION = 0x30000002,
+ TE_SMC_LAUNCH_OPERATION = 0x30000003,
+
+ /* Trusted OS calls */
+ TE_SMC_REGISTER_FS_HANDLERS = 0x32000001,
+ TE_SMC_REGISTER_REQ_BUF = 0x32000002,
+ TE_SMC_PROGRAM_VPR = 0x32000003,
+ TE_SMC_REGISTER_IRQ_HANDLER = 0x32000004,
+ TE_SMC_NS_IRQ_DONE = 0x32000005,
+ TE_SMC_FS_OP_DONE = 0x32000006,
+ TE_SMC_INIT_LOGGER = 0x32000007,
+ TE_SMC_SS_REGISTER_HANDLER = 0x32000008,
+ TE_SMC_SS_REQ_COMPLETE = 0x32000009,
+};
+
+enum {
+ TE_PARAM_TYPE_NONE = 0,
+ TE_PARAM_TYPE_INT_RO = 1,
+ TE_PARAM_TYPE_INT_RW = 2,
+ TE_PARAM_TYPE_MEM_RO = 3,
+ TE_PARAM_TYPE_MEM_RW = 4,
+};
+
+struct te_oper_param {
+ uint32_t index;
+ uint32_t type;
+ union {
+ struct {
+ uint32_t val;
+ } Int;
+ struct {
+ void *base;
+ uint32_t len;
+ } Mem;
+ } u;
+ void *next_ptr_user;
+};
+
+struct te_oper_param_compat {
+ uint32_t index;
+ uint32_t type;
+ union {
+ struct {
+ uint32_t val;
+ } Int;
+ struct {
+ uint64_t base;
+ uint32_t len;
+ } Mem;
+ } u;
+ uint64_t next_ptr_user;
+};
+
+struct te_operation {
+ uint32_t command;
+ struct te_oper_param *list_head;
+ /* Maintain a pointer to tail of list to easily add new param node */
+ struct te_oper_param *list_tail;
+ uint32_t list_count;
+ uint32_t status;
+ uint32_t iterface_side;
+};
+
+struct te_service_id {
+ uint32_t time_low;
+ uint16_t time_mid;
+ uint16_t time_hi_and_version;
+ uint8_t clock_seq_and_node[8];
+};
+
+/*
+ * OpenSession
+ */
+struct te_opensession {
+ struct te_service_id dest_uuid;
+ struct te_operation operation;
+ uint32_t answer;
+};
+
+/*
+ * CloseSession
+ */
+struct te_closesession {
+ uint32_t session_id;
+ uint32_t answer;
+};
+
+/*
+ * LaunchOperation
+ */
+struct te_launchop {
+ uint32_t session_id;
+ struct te_operation operation;
+ uint32_t answer;
+};
+
+union te_cmd {
+ struct te_opensession opensession;
+ struct te_closesession closesession;
+ struct te_launchop launchop;
+};
+
+/*
+ * Compat versions of the original structs (eventually to replace
+ * the old structs, once the lib/TLK kernel changes are in).
+ */
+struct te_operation_compat {
+ uint32_t command;
+ uint32_t status;
+ uint64_t list_head;
+ uint64_t list_tail;
+ uint32_t list_count;
+ uint32_t interface_side;
+};
+
+/*
+ * OpenSession
+ */
+struct te_opensession_compat {
+ struct te_service_id dest_uuid;
+ struct te_operation_compat operation;
+ uint64_t answer;
+};
+
+/*
+ * CloseSession
+ */
+struct te_closesession_compat {
+ uint32_t session_id;
+ uint64_t answer;
+};
+
+/*
+ * LaunchOperation
+ */
+struct te_launchop_compat {
+ uint32_t session_id;
+ struct te_operation_compat operation;
+ uint64_t answer;
+};
+
+union te_cmd_compat {
+ struct te_opensession_compat opensession;
+ struct te_closesession_compat closesession;
+ struct te_launchop_compat launchop;
+};
+
+struct te_request {
+ uint32_t type;
+ uint32_t session_id;
+ uint32_t command_id;
+ struct te_oper_param *params;
+ uint32_t params_size;
+ uint32_t dest_uuid[4];
+ uint32_t result;
+ uint32_t result_origin;
+};
+
+struct te_request_compat {
+ uint32_t type;
+ uint32_t session_id;
+ uint32_t command_id;
+ uint64_t params;
+ uint32_t params_size;
+ uint32_t dest_uuid[4];
+ uint32_t result;
+ uint32_t result_origin;
+};
+
+struct te_answer {
+ uint32_t result;
+ uint32_t session_id;
+ uint32_t result_origin;
+};
+
+void te_open_session(struct te_opensession *cmd,
+ struct te_request *request,
+ struct tlk_context *context);
+
+void te_close_session(struct te_closesession *cmd,
+ struct te_request *request,
+ struct tlk_context *context);
+
+void te_launch_operation(struct te_launchop *cmd,
+ struct te_request *request,
+ struct tlk_context *context);
+
+void te_open_session_compat(struct te_opensession_compat *cmd,
+ struct te_request_compat *request,
+ struct tlk_context *context);
+
+void te_close_session_compat(struct te_closesession_compat *cmd,
+ struct te_request_compat *request,
+ struct tlk_context *context);
+
+void te_launch_operation_compat(struct te_launchop_compat *cmd,
+ struct te_request_compat *request,
+ struct tlk_context *context);
+
+#define TE_MAX_FILE_NAME_LEN 64
+
+enum te_file_req_type {
+ OTE_FILE_REQ_READ = 0,
+ OTE_FILE_REQ_WRITE = 1,
+ OTE_FILE_REQ_DELETE = 2,
+ OTE_FILE_REQ_SIZE = 3,
+};
+
+struct te_file_req {
+ char name[TE_MAX_FILE_NAME_LEN];
+ enum te_file_req_type type;
+ void *user_data_buf;
+ void *kern_data_buf;
+ unsigned long data_len;
+ unsigned long result;
+ int error;
+};
+
+#define SS_OP_MAX_DATA_SIZE 0x1000
+struct te_ss_op {
+ uint8_t data[SS_OP_MAX_DATA_SIZE];
+};
+
+int te_handle_ss_ioctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param);
+int te_handle_fs_ioctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param);
+void ote_print_logs(void);
+
+#endif
diff --git a/security/tlk_driver/ote_types.h b/security/tlk_driver/ote_types.h
new file mode 100644
index 000000000000..593400f7a03a
--- /dev/null
+++ b/security/tlk_driver/ote_types.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __OTE_TYPES_H__
+#define __OTE_TYPES_H__
+
+/*
+ * Return Codes
+ */
+enum {
+ /* Success */
+ OTE_SUCCESS = 0x00000000,
+ OTE_ERROR_NO_ERROR = OTE_SUCCESS,
+ /* Non-specific cause */
+ OTE_ERROR_GENERIC = 0xFFFF0000,
+ /* Access priviledge not sufficient */
+ OTE_ERROR_ACCESS_DENIED = 0xFFFF0001,
+ /* The operation was cancelled */
+ OTE_ERROR_CANCEL = 0xFFFF0002,
+ /* Concurrent accesses conflict */
+ OTE_ERROR_ACCESS_CONFLICT = 0xFFFF0003,
+ /* Too much data for req was passed */
+ OTE_ERROR_EXCESS_DATA = 0xFFFF0004,
+ /* Input data was of invalid format */
+ OTE_ERROR_BAD_FORMAT = 0xFFFF0005,
+ /* Input parameters were invalid */
+ OTE_ERROR_BAD_PARAMETERS = 0xFFFF0006,
+ /* Oper invalid in current state */
+ OTE_ERROR_BAD_STATE = 0xFFFF0007,
+ /* The req data item not found */
+ OTE_ERROR_ITEM_NOT_FOUND = 0xFFFF0008,
+ /* The req oper not implemented */
+ OTE_ERROR_NOT_IMPLEMENTED = 0xFFFF0009,
+ /* The req oper not supported */
+ OTE_ERROR_NOT_SUPPORTED = 0xFFFF000A,
+ /* Expected data was missing */
+ OTE_ERROR_NO_DATA = 0xFFFF000B,
+ /* System ran out of resources */
+ OTE_ERROR_OUT_OF_MEMORY = 0xFFFF000C,
+ /* The system is busy */
+ OTE_ERROR_BUSY = 0xFFFF000D,
+ /* Communication failed */
+ OTE_ERROR_COMMUNICATION = 0xFFFF000E,
+ /* A security fault was detected */
+ OTE_ERROR_SECURITY = 0xFFFF000F,
+ /* The supplied buffer is too short */
+ OTE_ERROR_SHORT_BUFFER = 0xFFFF0010,
+};
+
+/*
+ * Return Code origins
+ */
+enum {
+ /* Originated from OTE Client API */
+ OTE_RESULT_ORIGIN_API = 1,
+ /* Originated from Underlying Communication Stack */
+ OTE_RESULT_ORIGIN_COMMS = 2,
+ /* Originated from Common OTE Code */
+ OTE_RESULT_ORIGIN_KERNEL = 3,
+ /* Originated from Trusted APP Code */
+ OTE_RESULT_ORIGIN_TRUSTED_APP = 4,
+};
+
+#endif