summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan Willemsen <dwillemsen@nvidia.com>2011-03-09 00:36:06 -0800
committerNiket Sirsi <nsirsi@nvidia.com>2011-04-14 21:54:18 -0700
commitb186b82e4a6f87885e4df2742ba628b2e71e4d51 (patch)
tree2c043af4cf7b0dd7503add1f609ffb13bc07ef38
parent196834caab117848884980aab67576e85340f00a (diff)
Trusted Foundations kernel changes and driver
Change-Id: I318afbe66efa346b71e82413ac6442672cef4d36 Reviewed-on: http://git-master/r/21196 Reviewed-by: Jonathan B White (Engrg-Mobile) <jwhite@nvidia.com> Tested-by: Jonathan B White (Engrg-Mobile) <jwhite@nvidia.com> Reviewed-by: Maria Gutowski <mgutowski@nvidia.com>
-rw-r--r--arch/arm/mach-tegra/common.c12
-rw-r--r--arch/arm/mach-tegra/cortex-a9.S22
-rw-r--r--arch/arm/mach-tegra/cpuidle.c21
-rw-r--r--arch/arm/mach-tegra/platsmp.c18
-rw-r--r--arch/arm/mach-tegra/suspend.c31
-rw-r--r--arch/arm/mm/cache-l2x0.c73
-rw-r--r--arch/arm/mm/proc-v7.S6
-rw-r--r--security/Kconfig1
-rw-r--r--security/Makefile2
-rw-r--r--security/tf_driver/Kconfig7
-rw-r--r--security/tf_driver/Makefile36
-rw-r--r--security/tf_driver/s_version.h101
-rw-r--r--security/tf_driver/scx_protocol.h676
-rw-r--r--security/tf_driver/scxlnx_comm.c1756
-rw-r--r--security/tf_driver/scxlnx_comm.h204
-rw-r--r--security/tf_driver/scxlnx_comm_tz.c891
-rw-r--r--security/tf_driver/scxlnx_conn.c1530
-rw-r--r--security/tf_driver/scxlnx_conn.h91
-rw-r--r--security/tf_driver/scxlnx_defs.h532
-rw-r--r--security/tf_driver/scxlnx_device.c697
-rw-r--r--security/tf_driver/scxlnx_util.c1141
-rw-r--r--security/tf_driver/scxlnx_util.h102
22 files changed, 7940 insertions, 10 deletions
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index 71b86b5238e2..4504624cc1fd 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -98,9 +98,21 @@ void __init tegra_init_cache(void)
#ifdef CONFIG_CACHE_L2X0
void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
+ /*
+ ISSUE : Some registers of PL310 controler must be called from Secure context!
+ When called form Normal we obtain an abort.
+ Instructions that must be called in Secure :
+ - Tag and Data RAM Latency Control Registers (0x108 & 0x10C) must be written in Secure.
+
+ The following section of code has been regrouped in the implementation of "l2x0_init".
+ The "l2x0_init" will in fact call an SMC intruction to switch from Normal context to Secure context.
+ The configuration and activation will be done in Secure.
+ */
writel(0x331, p + L2X0_TAG_LATENCY_CTRL);
writel(0x441, p + L2X0_DATA_LATENCY_CTRL);
writel(2, p + L2X0_PWR_CTRL);
+#endif
l2x0_init(p, 0x6C480001, 0x8200c3fe);
#endif
diff --git a/arch/arm/mach-tegra/cortex-a9.S b/arch/arm/mach-tegra/cortex-a9.S
index 1ca815d0fab8..1b2287033a4b 100644
--- a/arch/arm/mach-tegra/cortex-a9.S
+++ b/arch/arm/mach-tegra/cortex-a9.S
@@ -406,7 +406,10 @@ ENTRY(__cortex_a9_restore)
mcr p15, 2, r0, c0, c0, 0 @ csselr
mcr p15, 0, r1, c1, c0, 0 @ sctlr
mcr p15, 0, r2, c1, c0, 1 @ actlr
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
+ //TL : moved to secure
mcr p15, 0, r3, c15, c0, 0 @ pctlr
+#endif
add r9, r8, #CTX_TTBR0
ldmia r9!, {r0-r7}
@@ -510,8 +513,11 @@ ENTRY(__cortex_a9_restore)
#endif
mcr p15, 0, lr, c1, c0, 2 @ cpacr (loaded before VFP)
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
+ //TL : moved to secure
ldr r9, [r8, #CTX_DIAGNOSTIC]
mcr p15, 0, r9, c15, c0, 1 @ diag
+#endif
/* finally, restore the stack and return */
ldmfd sp!, {r3-r12, lr}
@@ -532,6 +538,7 @@ ENTRY(__cortex_a9_l2x0_restart)
mov32 r9, (TEGRA_ARM_PL310_BASE-IO_CPU_PHYS+IO_CPU_VIRT)
add r10, r8, #CTX_L2_CTRL
ldmia r10, {r3-r7}
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
str r5, [r9, #L2X0_TAG_LATENCY_CTRL]
str r6, [r9, #L2X0_DATA_LATENCY_CTRL]
str r7, [r9, #L2X0_PREFETCH_OFFSET]
@@ -557,6 +564,21 @@ __reenable_l2x0:
dsb
isb
str r3, [r9, #L2X0_CTRL]
+#else
+ cmp r3, #0 @ only call SMC if L2 was enable
+ beq l2_done
+
+ cmp r0, #0 @ if invalidate, call SMC with R1=1, else R1=4
+ moveq r1, #4
+ movne r1, #1
+// SMC(Enable Cache)
+ ldr r0, =0xFFFFF100
+ ldr r2, =0x00000000
+ ldr r3, =0x00000000
+ ldr r4, =0x00000000
+ smc 0
+l2_done:
+#endif
#endif
b __cortex_a9_restore
diff --git a/arch/arm/mach-tegra/cpuidle.c b/arch/arm/mach-tegra/cpuidle.c
index 23cb9acc588c..e9a2f25da9e8 100644
--- a/arch/arm/mach-tegra/cpuidle.c
+++ b/arch/arm/mach-tegra/cpuidle.c
@@ -205,17 +205,26 @@ static int tegra_tear_down_cpu1(void)
return 0;
}
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+void callGenericSMC(u32 param0, u32 param1, u32 param2);
+#endif
static void tegra_wake_cpu1(void)
{
unsigned long boot_vector;
unsigned long old_boot_vector;
unsigned long timeout;
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
u32 reg;
+ static void __iomem *vector_base = (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x100);
+#endif
boot_vector = virt_to_phys(tegra_hotplug_startup);
- old_boot_vector = readl(EVP_CPU_RESET_VECTOR);
- writel(boot_vector, EVP_CPU_RESET_VECTOR);
+#if CONFIG_TRUSTED_FOUNDATIONS
+ callGenericSMC(0xFFFFFFFC, 0xFFFFFFE5, boot_vector);
+#else
+ old_boot_vector = readl(vector_base);
+ writel(boot_vector, vector_base);
/* enable cpu clock on cpu */
reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
@@ -227,15 +236,17 @@ static void tegra_wake_cpu1(void)
/* unhalt the cpu */
writel(0, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + 0x14);
+
timeout = jiffies + msecs_to_jiffies(1000);
while (time_before(jiffies, timeout)) {
- if (readl(EVP_CPU_RESET_VECTOR) != boot_vector)
+ if (readl(vector_base) != boot_vector)
break;
udelay(10);
}
/* put the old boot vector back */
- writel(old_boot_vector, EVP_CPU_RESET_VECTOR);
+ writel(old_boot_vector, vector_base);
+#endif
/* CPU1 is now started */
}
@@ -557,7 +568,7 @@ static int __init tegra_cpuidle_init(void)
void __iomem *mask_arm;
unsigned int reg;
int ret;
-
+
irq_set_affinity(TEGRA_CPUIDLE_BOTH_IDLE, cpumask_of(0));
irq_set_affinity(TEGRA_CPUIDLE_TEAR_DOWN, cpumask_of(1));
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
index 659c66967fb5..6cacdb910af4 100644
--- a/arch/arm/mach-tegra/platsmp.c
+++ b/arch/arm/mach-tegra/platsmp.c
@@ -75,13 +75,19 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
#endif
spin_unlock(&boot_lock);
}
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+void callGenericSMC(u32 param0, u32 param1, u32 param2);
+#endif
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long old_boot_vector;
unsigned long boot_vector;
unsigned long timeout;
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
u32 reg;
+ static void __iomem *vector_base = (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x100);
+#endif
/*
* set synchronisation state between this boot processor
@@ -99,8 +105,11 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
smp_wmb();
- old_boot_vector = readl(EVP_CPU_RESET_VECTOR);
- writel(boot_vector, EVP_CPU_RESET_VECTOR);
+#if CONFIG_TRUSTED_FOUNDATIONS
+ callGenericSMC(0xFFFFFFFC, 0xFFFFFFE5, boot_vector);
+#else
+ old_boot_vector = readl(vector_base);
+ writel(boot_vector, vector_base);
/* enable cpu clock on cpu */
reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
@@ -114,13 +123,14 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
timeout = jiffies + HZ;
while (time_before(jiffies, timeout)) {
- if (readl(EVP_CPU_RESET_VECTOR) != boot_vector)
+ if (readl(vector_base) != boot_vector)
break;
udelay(10);
}
/* put the old boot vector back */
- writel(old_boot_vector, EVP_CPU_RESET_VECTOR);
+ writel(old_boot_vector, vector_base);
+#endif
/*
* now the secondary core is starting up let it run its
diff --git a/arch/arm/mach-tegra/suspend.c b/arch/arm/mach-tegra/suspend.c
index 8a3af04a1689..7e581b198216 100644
--- a/arch/arm/mach-tegra/suspend.c
+++ b/arch/arm/mach-tegra/suspend.c
@@ -56,6 +56,28 @@
#include "board.h"
#include "power.h"
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+void callGenericSMC(u32 param0, u32 param1, u32 param2)
+{
+ __asm__ volatile(
+ "mov r0, %2\n"
+ "mov r1, %3\n"
+ "mov r2, %4\n"
+ "mov r3, #0\n"
+ "mov r4, #0\n"
+ ".word 0xe1600070 @ SMC 0\n"
+ "mov %0, r0\n"
+ "mov %1, r1\n"
+ : "=r" (param0), "=r" (param1)
+ : "r" (param0), "r" (param1),
+ "r" (param2)
+ : "r0", "r1", "r2", "r3", "r4");
+}
+u32 buffer_rdv[64];
+#endif
+
+/**************** END TL *********************/
+
struct suspend_context {
/*
* The next 7 values are referenced by offset in __restart_plls
@@ -379,6 +401,11 @@ unsigned int tegra_suspend_lp2(unsigned int us)
outer_flush_range(__pa(&tegra_sctx),__pa(&tegra_sctx+1));
barrier();
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+// TRUSTED LOGIC SMC_STOP/Save State
+ callGenericSMC(0xFFFFFFFC, 0xFFFFFFE4, virt_to_phys(buffer_rdv));
+#endif
+
__cortex_a9_save(mode);
/* return from __cortex_a9_restore */
barrier();
@@ -462,6 +489,10 @@ static void tegra_suspend_dram(bool do_lp0)
l2x0_shutdown();
#endif
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+// TRUSTED LOGIC SMC_STOP/Save State
+ callGenericSMC(0xFFFFFFFC, 0xFFFFFFE3, virt_to_phys(buffer_rdv));
+#endif
__cortex_a9_save(mode);
restore_cpu_complex();
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 9abfa5d2b750..55aff8687bff 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -23,6 +23,11 @@
#include <asm/cacheflush.h>
#include <asm/hardware/cache-l2x0.h>
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+#include <linux/sched.h>
+void callGenericSMC(u32 param0, u32 param1, u32 param2);
+#endif
+
#define CACHE_LINE_SIZE 32
static void __iomem *l2x0_base;
@@ -249,6 +254,11 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
void l2x0_shutdown(void)
{
unsigned long flags;
+#ifdef CONFIG_SMP
+ long ret;
+ cpumask_t saved_cpu_mask;
+ cpumask_t local_cpu_mask = CPU_MASK_NONE;
+#endif
if (l2x0_disabled)
return;
@@ -258,6 +268,7 @@ void l2x0_shutdown(void)
local_irq_save(flags);
if (readl(l2x0_base + L2X0_CTRL) & 1) {
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
int m;
/* lockdown all ways, all masters to prevent new line
* allocation during maintenance */
@@ -274,6 +285,27 @@ void l2x0_shutdown(void)
writel(0, l2x0_base + L2X0_LOCKDOWN_WAY_D + (m*8));
writel(0, l2x0_base + L2X0_LOCKDOWN_WAY_I + (m*8));
}
+#else
+#ifdef CONFIG_SMP
+ /* If SMP defined,
+ TF is running on Core #0. So, force execution on Core #0 */
+ cpu_set(0, local_cpu_mask);
+ sched_getaffinity(0, &saved_cpu_mask);
+ ret = sched_setaffinity(0, &local_cpu_mask);
+ if (ret != 0)
+ {
+ printk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret);
+ }
+#endif
+ callGenericSMC(0xFFFFF100, 0x00000002, 0);
+#ifdef CONFIG_SMP
+ ret = sched_setaffinity(0, &saved_cpu_mask);
+ if (ret != 0)
+ {
+ printk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret);
+ }
+#endif
+#endif
}
local_irq_restore(flags);
@@ -285,6 +317,11 @@ static void l2x0_enable(__u32 aux_val, __u32 aux_mask)
__u32 cache_id;
int ways;
const char *type;
+#ifdef CONFIG_SMP
+ long ret;
+ cpumask_t saved_cpu_mask;
+ cpumask_t local_cpu_mask = CPU_MASK_NONE;
+#endif
if (l2x0_disabled)
return;
@@ -324,6 +361,7 @@ static void l2x0_enable(__u32 aux_val, __u32 aux_mask)
*/
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
/* l2x0 controller is disabled */
writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
@@ -331,6 +369,41 @@ static void l2x0_enable(__u32 aux_val, __u32 aux_mask)
/* enable L2X0 */
writel_relaxed(1, l2x0_base + L2X0_CTRL);
+
+#else /* CONFIG_TRUSTED_FOUNDATIONS is defined */
+/*
+ ISSUE : Some registers of PL310 controler must be written from Secure context!
+ When called form Normal we obtain an abort or do nothing.
+ Instructions that must be called in Secure :
+ - Write to Control register (L2X0_CTRL==0x100)
+ - Write in Auxiliary controler (L2X0_AUX_CTRL==0x104)
+ - Invalidate all entries in cache (L2X0_INV_WAY==0x77C), mandatory at boot time.
+ - Tag and Data RAM Latency Control Registers (0x108 & 0x10C) must be written in Secure.
+
+ The following call are now called by a Secure driver.
+ We switch to Secure context and ask to Trusted Foundations to do the configuration and activation of L2.*/
+ /* l2x0 controller is disabled */
+
+#ifdef CONFIG_SMP
+ /* If SMP defined,
+ TF is running on Core #0. So, force execution on Core #0 */
+ cpu_set(0, local_cpu_mask);
+ sched_getaffinity(0, &saved_cpu_mask);
+ ret = sched_setaffinity(0, &local_cpu_mask);
+ if (ret != 0)
+ {
+ printk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret);
+ }
+#endif
+ callGenericSMC(0xFFFFF100, 0x00000001, 0);
+#ifdef CONFIG_SMP
+ ret = sched_setaffinity(0, &saved_cpu_mask);
+ if (ret != 0)
+ {
+ printk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret);
+ }
+#endif
+#endif
}
/*printk(KERN_INFO "%s cache controller enabled\n", type);
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index de77d5b4271a..590f57dc1f70 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -238,6 +238,8 @@ __v7_setup:
2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number
teq r0, r10
bne 3f
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
+ /* c15,c0,0: read and write in Secure privileged modes, read only in Non-secure state. */
cmp r6, #0x10 @ power ctrl reg added r1p0
mrcge p15, 0, r10, c15, c0, 0 @ read power control register
orrge r10, r10, #1 @ enable dynamic clock gating
@@ -248,7 +250,9 @@ __v7_setup:
orreq r10, r10, #0x30 @ disable core clk gate on
mcreq p15, 0, r10, c15, c0, 2 @ instr-side waits
#endif
-#ifdef CONFIG_ARM_ERRATA_742230
+#endif
+
+#if defined(CONFIG_ARM_ERRATA_742230) && !defined(CONFIG_TRUSTED_FOUNDATIONS)
cmp r6, #0x22 @ only present up to r2p2
mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
orrle r10, r10, #1 << 4 @ set bit #4
diff --git a/security/Kconfig b/security/Kconfig
index bd72ae623494..ae0a83663bbe 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -141,6 +141,7 @@ source security/selinux/Kconfig
source security/smack/Kconfig
source security/tomoyo/Kconfig
source security/apparmor/Kconfig
+source security/tf_driver/Kconfig
source security/integrity/ima/Kconfig
diff --git a/security/Makefile b/security/Makefile
index 8bb0fe9e1ca9..d18c6bebc4ca 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -7,6 +7,7 @@ subdir-$(CONFIG_SECURITY_SELINUX) += selinux
subdir-$(CONFIG_SECURITY_SMACK) += smack
subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo
subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor
+subdir-$(CONFIG_TRUSTED_FOUNDATIONS) += tf_driver
# always enable default capabilities
obj-y += commoncap.o
@@ -22,6 +23,7 @@ obj-$(CONFIG_AUDIT) += lsm_audit.o
obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o
obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/built-in.o
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
+obj-$(CONFIG_TRUSTED_FOUNDATIONS) += tf_driver/built-in.o
# Object integrity file lists
subdir-$(CONFIG_IMA) += integrity/ima
diff --git a/security/tf_driver/Kconfig b/security/tf_driver/Kconfig
new file mode 100644
index 000000000000..493b92c08f47
--- /dev/null
+++ b/security/tf_driver/Kconfig
@@ -0,0 +1,7 @@
+config TRUSTED_FOUNDATIONS
+ bool "Enable TF Driver"
+ default n
+ help
+ This option adds kernel support for communication with the Trusted Foundations.
+ If you are unsure how to answer this question, answer N.
+
diff --git a/security/tf_driver/Makefile b/security/tf_driver/Makefile
new file mode 100644
index 000000000000..888d1d329746
--- /dev/null
+++ b/security/tf_driver/Makefile
@@ -0,0 +1,36 @@
+#
+# Copyright (c) 2006-2010 Trusted Logic S.A.
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+# MA 02111-1307 USA
+#
+
+# debug options
+#EXTRA_CFLAGS += -O0 -DDEBUG -D_DEBUG -DCONFIG_TF_DRIVER_DEBUG_SUPPORT
+EXTRA_CFLAGS += -DNDEBUG
+EXTRA_CFLAGS += -DLINUX -DCONFIG_TF_TRUSTZONE -DCONFIG_TFN
+
+ifdef S_VERSION_BUILD
+EXTRA_CFLAGS += -DS_VERSION_BUILD=$(S_VERSION_BUILD)
+endif
+
+tf_driver-objs += scxlnx_util.o
+tf_driver-objs += scxlnx_conn.o
+tf_driver-objs += scxlnx_device.o
+tf_driver-objs += scxlnx_comm.o
+tf_driver-objs += scxlnx_comm_tz.o
+
+obj-$(CONFIG_TRUSTED_FOUNDATIONS) += tf_driver.o
diff --git a/security/tf_driver/s_version.h b/security/tf_driver/s_version.h
new file mode 100644
index 000000000000..f7368d797b4f
--- /dev/null
+++ b/security/tf_driver/s_version.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __S_VERSION_H__
+#define __S_VERSION_H__
+
+/*
+ * Usage: define S_VERSION_BUILD on the compiler's command line.
+ *
+ * Then, you get:
+ * - S_VERSION_MAIN "X.Y"
+ * - S_VERSION_BUILD = 0 if S_VERSION_BUILD not defined or empty
+ * - S_VERSION_STRING = "TFO[O][P] X.Y.N " or "TFO[O][P] X.Y.N D "
+ * - S_VERSION_RESOURCE = X,Y,0,N
+ */
+
+#ifdef S_VERSION_BUILD
+/* TRICK: detect if S_VERSION is defined but empty */
+#if 0 == S_VERSION_BUILD-0
+#undef S_VERSION_BUILD
+#define S_VERSION_BUILD 0
+#endif
+#else
+/* S_VERSION_BUILD is not defined */
+#define S_VERSION_BUILD 0
+#endif
+
+#define __STRINGIFY(X) #X
+#define __STRINGIFY2(X) __STRINGIFY(X)
+
+#if !defined(NDEBUG) || defined(_DEBUG)
+#define S_VERSION_VARIANT_DEBUG "D"
+#else
+#define S_VERSION_VARIANT_DEBUG " "
+#endif
+
+#ifdef STANDARD
+#define S_VERSION_VARIANT_STANDARD "S"
+#else
+#define S_VERSION_VARIANT_STANDARD " "
+#endif
+
+#define S_VERSION_VARIANT S_VERSION_VARIANT_STANDARD S_VERSION_VARIANT_DEBUG " "
+
+/*
+ * This version number must be updated for each new release
+ */
+#define S_VERSION_MAIN "08.01"
+#define S_VERSION_RESOURCE 8,1,0,S_VERSION_BUILD
+
+/*
+ * Products Versioning
+ */
+#if defined(WIN32)
+
+/* Win32 Simulator and all Win32 Side Components */
+#define PRODUCT_NAME "TFOWX"
+
+#elif defined(__ANDROID32__)
+
+#define PRODUCT_NAME "UNKWN"
+
+#elif defined(LINUX)
+
+#if defined(__ARM_EABI__)
+/* arm architecture -> Cortex-A8 */
+#define PRODUCT_NAME "TFOLB"
+#else
+/* ix86 architecture -> Linux Simulator and all Linux Side Components */
+#define PRODUCT_NAME "TFOLX"
+#endif
+
+#else
+
+/* Not OS specififc -> Cortex-A8 Secure Binary */
+#define PRODUCT_NAME "TFOXB"
+
+#endif
+
+#define S_VERSION_STRING \
+ PRODUCT_NAME S_VERSION_MAIN "." \
+ __STRINGIFY2(S_VERSION_BUILD) " " \
+ S_VERSION_VARIANT
+
+#endif /* __S_VERSION_H__ */
diff --git a/security/tf_driver/scx_protocol.h b/security/tf_driver/scx_protocol.h
new file mode 100644
index 000000000000..06a0bb792769
--- /dev/null
+++ b/security/tf_driver/scx_protocol.h
@@ -0,0 +1,676 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __SCX_PROTOCOL_H__
+#define __SCX_PROTOCOL_H__
+
+/*----------------------------------------------------------------------------
+ *
+ * This header file defines the structure used in the SChannel Protocol.
+ * See your Product Reference Manual for a specification of the SChannel
+ * protocol.
+ *---------------------------------------------------------------------------*/
+
+/*
+ * The driver interface version returned by the version ioctl
+ */
+#define SCX_DRIVER_INTERFACE_VERSION 0x04000000
+
+/*
+ * Protocol version handling
+ */
+#define SCX_S_PROTOCOL_MAJOR_VERSION (0x06)
+#define GET_PROTOCOL_MAJOR_VERSION(a) (a >> 24)
+#define GET_PROTOCOL_MINOR_VERSION(a) ((a >> 16) & 0xFF)
+
+/*
+ * The size, in bytes, of the L1 Shared Buffer.
+ */
+#define SCX_COMM_BUFFER_SIZE (0x1000) /* 4kB*/
+
+/*
+ * The S flag of the nConfigFlags_S register.
+ */
+#define SCX_CONFIG_FLAG_S (1 << 3)
+
+/*
+ * The TimeSlot field of the nSyncSerial_N register.
+ */
+#define SCX_SYNC_SERIAL_TIMESLOT_N (1)
+
+/*
+ * nStatus_S related defines.
+ */
+#define SCX_STATUS_P_MASK (0X00000001)
+#define SCX_STATUS_POWER_STATE_SHIFT (3)
+#define SCX_STATUS_POWER_STATE_MASK (0x1F << SCX_STATUS_POWER_STATE_SHIFT)
+
+/*
+ * Possible power states of the POWER_STATE field of the nStatus_S register
+ */
+#define SCX_POWER_MODE_COLD_BOOT (0)
+#define SCX_POWER_MODE_WARM_BOOT (1)
+#define SCX_POWER_MODE_ACTIVE (3)
+#define SCX_POWER_MODE_READY_TO_SHUTDOWN (5)
+#define SCX_POWER_MODE_READY_TO_HIBERNATE (7)
+#define SCX_POWER_MODE_WAKEUP (8)
+#define SCX_POWER_MODE_PANIC (15)
+
+/*
+ * Possible nCommand values for MANAGEMENT commands
+ */
+#define SCX_MANAGEMENT_HIBERNATE (1)
+#define SCX_MANAGEMENT_SHUTDOWN (2)
+#define SCX_MANAGEMENT_PREPARE_FOR_CORE_OFF (3)
+#define SCX_MANAGEMENT_RESUME_FROM_CORE_OFF (4)
+
+/*
+ * The capacity of the Normal Word message queue, in number of slots.
+ */
+#define SCX_N_MESSAGE_QUEUE_CAPACITY (512)
+
+/*
+ * The capacity of the Secure World message answer queue, in number of slots.
+ */
+#define SCX_S_ANSWER_QUEUE_CAPACITY (256)
+
+/*
+ * The value of the S-timeout register indicating an infinite timeout.
+ */
+#define SCX_S_TIMEOUT_0_INFINITE (0xFFFFFFFF)
+#define SCX_S_TIMEOUT_1_INFINITE (0xFFFFFFFF)
+
+/*
+ * The value of the S-timeout register indicating an immediate timeout.
+ */
+#define SCX_S_TIMEOUT_0_IMMEDIATE (0x0)
+#define SCX_S_TIMEOUT_1_IMMEDIATE (0x0)
+
+/*
+ * Identifies the get protocol version SMC.
+ */
+#define SCX_SMC_GET_PROTOCOL_VERSION (0XFFFFFFFB)
+
+/*
+ * Identifies the init SMC.
+ */
+#define SCX_SMC_INIT (0XFFFFFFFF)
+
+/*
+ * Identifies the reset irq SMC.
+ */
+#define SCX_SMC_RESET_IRQ (0xFFFFFFFE)
+
+/*
+ * Identifies the SET_W3B SMC.
+ */
+#define SCX_SMC_WAKE_UP (0xFFFFFFFD)
+
+/*
+ * Identifies the STOP SMC.
+ */
+#define SCX_SMC_STOP (0xFFFFFFFC)
+
+/*
+ * Identifies the n-yield SMC.
+ */
+#define SCX_SMC_N_YIELD (0X00000003)
+
+
+/* Possible stop commands for SMC_STOP */
+#define SCSTOP_HIBERNATE (0xFFFFFFE1)
+#define SCSTOP_SHUTDOWN (0xFFFFFFE2)
+
+/*
+ * representation of an UUID.
+ */
+struct SCX_UUID {
+ u32 time_low;
+ u16 time_mid;
+ u16 time_hi_and_version;
+ u8 clock_seq_and_node[8];
+};
+
+
+/**
+ * Command parameters.
+ */
+struct SCX_COMMAND_PARAM_VALUE {
+ u32 a;
+ u32 b;
+};
+
+struct SCX_COMMAND_PARAM_TEMP_MEMREF {
+ u32 nDescriptor; /* data pointer for exchange message.*/
+ u32 nSize;
+ u32 nOffset;
+};
+
+struct SCX_COMMAND_PARAM_MEMREF {
+ u32 hBlock;
+ u32 nSize;
+ u32 nOffset;
+};
+
+union SCX_COMMAND_PARAM {
+ struct SCX_COMMAND_PARAM_VALUE sValue;
+ struct SCX_COMMAND_PARAM_TEMP_MEMREF sTempMemref;
+ struct SCX_COMMAND_PARAM_MEMREF sMemref;
+};
+
+/**
+ * Answer parameters.
+ */
+struct SCX_ANSWER_PARAM_VALUE {
+ u32 a;
+ u32 b;
+};
+
+struct SCX_ANSWER_PARAM_SIZE {
+ u32 _ignored;
+ u32 nSize;
+};
+
+union SCX_ANSWER_PARAM {
+ struct SCX_ANSWER_PARAM_SIZE sSize;
+ struct SCX_ANSWER_PARAM_VALUE sValue;
+};
+
+/*
+ * Descriptor tables capacity
+ */
+#define SCX_MAX_W3B_COARSE_PAGES (2)
+#define SCX_MAX_COARSE_PAGES (8)
+#define SCX_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT (8)
+#define SCX_DESCRIPTOR_TABLE_CAPACITY \
+ (1 << SCX_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT)
+#define SCX_DESCRIPTOR_TABLE_CAPACITY_MASK \
+ (SCX_DESCRIPTOR_TABLE_CAPACITY - 1)
+/* Shared memories coarse pages can map up to 1MB */
+#define SCX_MAX_COARSE_PAGE_MAPPED_SIZE \
+ (PAGE_SIZE * SCX_DESCRIPTOR_TABLE_CAPACITY)
+/* Shared memories cannot exceed 8MB */
+#define SCX_MAX_SHMEM_SIZE \
+ (SCX_MAX_COARSE_PAGE_MAPPED_SIZE << 3)
+
+/*
+ * Buffer size for version description fields
+ */
+#define SCX_DESCRIPTION_BUFFER_LENGTH 64
+
+/*
+ * Shared memory type flags.
+ */
+#define SCX_SHMEM_TYPE_READ (0x00000001)
+#define SCX_SHMEM_TYPE_WRITE (0x00000002)
+
+/*
+ * Shared mem flags
+ */
+#define SCX_SHARED_MEM_FLAG_INPUT 1
+#define SCX_SHARED_MEM_FLAG_OUTPUT 2
+#define SCX_SHARED_MEM_FLAG_INOUT 3
+
+
+/*
+ * Parameter types
+ */
+#define SCX_PARAM_TYPE_NONE 0x0
+#define SCX_PARAM_TYPE_VALUE_INPUT 0x1
+#define SCX_PARAM_TYPE_VALUE_OUTPUT 0x2
+#define SCX_PARAM_TYPE_VALUE_INOUT 0x3
+#define SCX_PARAM_TYPE_MEMREF_TEMP_INPUT 0x5
+#define SCX_PARAM_TYPE_MEMREF_TEMP_OUTPUT 0x6
+#define SCX_PARAM_TYPE_MEMREF_TEMP_INOUT 0x7
+#define SCX_PARAM_TYPE_MEMREF_INPUT 0xD
+#define SCX_PARAM_TYPE_MEMREF_OUTPUT 0xE
+#define SCX_PARAM_TYPE_MEMREF_INOUT 0xF
+
+#define SCX_PARAM_TYPE_MEMREF_FLAG 0x4
+#define SCX_PARAM_TYPE_REGISTERED_MEMREF_FLAG 0x8
+
+
+#define SCX_MAKE_PARAM_TYPES(t0, t1, t2, t3) \
+ ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
+#define SCX_GET_PARAM_TYPE(t, i) (((t) >> (4 * i)) & 0xF)
+
+/*
+ * Login types.
+ */
+#define SCX_LOGIN_PUBLIC 0x00000000
+#define SCX_LOGIN_USER 0x00000001
+#define SCX_LOGIN_GROUP 0x00000002
+#define SCX_LOGIN_APPLICATION 0x00000004
+#define SCX_LOGIN_APPLICATION_USER 0x00000005
+#define SCX_LOGIN_APPLICATION_GROUP 0x00000006
+#define SCX_LOGIN_AUTHENTICATION 0x80000000
+#define SCX_LOGIN_PRIVILEGED 0x80000002
+
+/* Login variants */
+
+#define SCX_LOGIN_VARIANT(mainType, os, variant) \
+ ((mainType) | (1 << 27) | ((os) << 16) | ((variant) << 8))
+
+#define SCX_LOGIN_GET_MAIN_TYPE(type) \
+ ((type) & ~SCX_LOGIN_VARIANT(0, 0xFF, 0xFF))
+
+#define SCX_LOGIN_OS_ANY 0x00
+#define SCX_LOGIN_OS_LINUX 0x01
+#define SCX_LOGIN_OS_ANDROID 0x04
+
+/* OS-independent variants */
+#define SCX_LOGIN_USER_NONE \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_USER, SCX_LOGIN_OS_ANY, 0xFF)
+#define SCX_LOGIN_GROUP_NONE \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_GROUP, SCX_LOGIN_OS_ANY, 0xFF)
+#define SCX_LOGIN_APPLICATION_USER_NONE \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_USER, SCX_LOGIN_OS_ANY, 0xFF)
+#define SCX_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_AUTHENTICATION, SCX_LOGIN_OS_ANY, 0x01)
+#define SCX_LOGIN_PRIVILEGED_KERNEL \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_PRIVILEGED, SCX_LOGIN_OS_ANY, 0x01)
+
+/* Linux variants */
+#define SCX_LOGIN_USER_LINUX_EUID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_USER, SCX_LOGIN_OS_LINUX, 0x01)
+#define SCX_LOGIN_GROUP_LINUX_GID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_GROUP, SCX_LOGIN_OS_LINUX, 0x01)
+#define SCX_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION, SCX_LOGIN_OS_LINUX, 0x01)
+#define SCX_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_USER, SCX_LOGIN_OS_LINUX, 0x01)
+#define SCX_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_GROUP, SCX_LOGIN_OS_LINUX, 0x01)
+
+/* Android variants */
+#define SCX_LOGIN_USER_ANDROID_EUID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_USER, SCX_LOGIN_OS_ANDROID, 0x01)
+#define SCX_LOGIN_GROUP_ANDROID_GID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_GROUP, SCX_LOGIN_OS_ANDROID, 0x01)
+#define SCX_LOGIN_APPLICATION_ANDROID_UID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION, SCX_LOGIN_OS_ANDROID, 0x01)
+#define SCX_LOGIN_APPLICATION_USER_ANDROID_UID_EUID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_USER, SCX_LOGIN_OS_ANDROID, \
+ 0x01)
+#define SCX_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_GROUP, SCX_LOGIN_OS_ANDROID, \
+ 0x01)
+
+/*
+ * return origins
+ */
+#define SCX_ORIGIN_COMMS 2
+#define SCX_ORIGIN_TEE 3
+#define SCX_ORIGIN_TRUSTED_APP 4
+/*
+ * The SCX message types.
+ */
+#define SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT 0x02
+#define SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT 0xFD
+#define SCX_MESSAGE_TYPE_REGISTER_SHARED_MEMORY 0xF7
+#define SCX_MESSAGE_TYPE_RELEASE_SHARED_MEMORY 0xF9
+#define SCX_MESSAGE_TYPE_OPEN_CLIENT_SESSION 0xF0
+#define SCX_MESSAGE_TYPE_CLOSE_CLIENT_SESSION 0xF2
+#define SCX_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND 0xF5
+#define SCX_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND 0xF4
+#define SCX_MESSAGE_TYPE_MANAGEMENT 0xFE
+
+
+/*
+ * The error codes
+ */
+#define S_SUCCESS 0x00000000
+#define S_ERROR_NO_DATA 0xFFFF000B
+#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C
+
+
+struct SCX_COMMAND_HEADER {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo;
+ u32 nOperationID;
+};
+
+struct SCX_ANSWER_HEADER {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo;
+ u32 nOperationID;
+ u32 nErrorCode;
+};
+
+/*
+ * CREATE_DEVICE_CONTEXT command message.
+ */
+struct SCX_COMMAND_CREATE_DEVICE_CONTEXT {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ u32 nOperationID;
+ u32 nDeviceContextID;
+};
+
+/*
+ * CREATE_DEVICE_CONTEXT answer message.
+ */
+struct SCX_ANSWER_CREATE_DEVICE_CONTEXT {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 nErrorCode;
+ /* an opaque Normal World identifier for the device context */
+ u32 hDeviceContext;
+};
+
+/*
+ * DESTROY_DEVICE_CONTEXT command message.
+ */
+struct SCX_COMMAND_DESTROY_DEVICE_CONTEXT {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ u32 nOperationID;
+ u32 hDeviceContext;
+};
+
+/*
+ * DESTROY_DEVICE_CONTEXT answer message.
+ */
+struct SCX_ANSWER_DESTROY_DEVICE_CONTEXT {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 nErrorCode;
+ u32 nDeviceContextID;
+};
+
+/*
+ * OPEN_CLIENT_SESSION command message.
+ */
+struct SCX_COMMAND_OPEN_CLIENT_SESSION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nParamTypes;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 nCancellationID;
+ u64 sTimeout;
+ struct SCX_UUID sDestinationUUID;
+ union SCX_COMMAND_PARAM sParams[4];
+ u32 nLoginType;
+ /*
+ * Size = 0 for public, [16] for group identification, [20] for
+ * authentication
+ */
+ u8 sLoginData[20];
+};
+
+/*
+ * OPEN_CLIENT_SESSION answer message.
+ */
+struct SCX_ANSWER_OPEN_CLIENT_SESSION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u8 nReturnOrigin;
+ u8 __nReserved;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 nErrorCode;
+ u32 hClientSession;
+ union SCX_ANSWER_PARAM sAnswers[4];
+};
+
+/*
+ * CLOSE_CLIENT_SESSION command message.
+ */
+struct SCX_COMMAND_CLOSE_CLIENT_SESSION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 hClientSession;
+};
+
+/*
+ * CLOSE_CLIENT_SESSION answer message.
+ */
+struct SCX_ANSWER_CLOSE_CLIENT_SESSION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 nErrorCode;
+};
+
+
+/*
+ * REGISTER_SHARED_MEMORY command message
+ */
+struct SCX_COMMAND_REGISTER_SHARED_MEMORY {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMemoryFlags;
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 nBlockID;
+ u32 nSharedMemSize;
+ u32 nSharedMemStartOffset;
+ u32 nSharedMemDescriptors[SCX_MAX_COARSE_PAGES];
+};
+
+/*
+ * REGISTER_SHARED_MEMORY answer message.
+ */
+struct SCX_ANSWER_REGISTER_SHARED_MEMORY {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 nErrorCode;
+ u32 hBlock;
+};
+
+/*
+ * RELEASE_SHARED_MEMORY command message.
+ */
+struct SCX_COMMAND_RELEASE_SHARED_MEMORY {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 hBlock;
+};
+
+/*
+ * RELEASE_SHARED_MEMORY answer message.
+ */
+struct SCX_ANSWER_RELEASE_SHARED_MEMORY {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ u32 nOperationID;
+ u32 nErrorCode;
+ u32 nBlockID;
+};
+
+/*
+ * INVOKE_CLIENT_COMMAND command message.
+ */
+struct SCX_COMMAND_INVOKE_CLIENT_COMMAND {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nParamTypes;
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 hClientSession;
+ u64 sTimeout;
+ u32 nCancellationID;
+ u32 nClientCommandIdentifier;
+ union SCX_COMMAND_PARAM sParams[4];
+};
+
+/*
+ * INVOKE_CLIENT_COMMAND command answer.
+ */
+struct SCX_ANSWER_INVOKE_CLIENT_COMMAND {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u8 nReturnOrigin;
+ u8 __nReserved;
+ u32 nOperationID;
+ u32 nErrorCode;
+ union SCX_ANSWER_PARAM sAnswers[4];
+};
+
+/*
+ * CANCEL_CLIENT_OPERATION command message.
+ */
+struct SCX_COMMAND_CANCEL_CLIENT_OPERATION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 hClientSession;
+ u32 nCancellationID;
+};
+
+struct SCX_ANSWER_CANCEL_CLIENT_OPERATION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ u32 nOperationID;
+ u32 nErrorCode;
+};
+
+/*
+ * MANAGEMENT command message.
+ */
+struct SCX_COMMAND_MANAGEMENT {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nCommand;
+ u32 nOperationID;
+ u32 nW3BSize;
+ u32 nW3BStartOffset;
+ u32 nSharedMemDescriptors[1];
+};
+
+/*
+ * POWER_MANAGEMENT answer message.
+ * The message does not provide message specific parameters.
+ * Therefore no need to define a specific answer structure
+ */
+
+/*
+ * Structure for L2 messages
+ */
+union SCX_COMMAND_MESSAGE {
+ struct SCX_COMMAND_HEADER sHeader;
+ struct SCX_COMMAND_CREATE_DEVICE_CONTEXT sCreateDeviceContextMessage;
+ struct SCX_COMMAND_DESTROY_DEVICE_CONTEXT sDestroyDeviceContextMessage;
+ struct SCX_COMMAND_OPEN_CLIENT_SESSION sOpenClientSessionMessage;
+ struct SCX_COMMAND_CLOSE_CLIENT_SESSION sCloseClientSessionMessage;
+ struct SCX_COMMAND_REGISTER_SHARED_MEMORY sRegisterSharedMemoryMessage;
+ struct SCX_COMMAND_RELEASE_SHARED_MEMORY sReleaseSharedMemoryMessage;
+ struct SCX_COMMAND_INVOKE_CLIENT_COMMAND sInvokeClientCommandMessage;
+ struct SCX_COMMAND_CANCEL_CLIENT_OPERATION
+ sCancelClientOperationMessage;
+ struct SCX_COMMAND_MANAGEMENT sManagementMessage;
+};
+
+/*
+ * Structure for any L2 answer
+ */
+
+union SCX_ANSWER_MESSAGE {
+ struct SCX_ANSWER_HEADER sHeader;
+ struct SCX_ANSWER_CREATE_DEVICE_CONTEXT sCreateDeviceContextAnswer;
+ struct SCX_ANSWER_OPEN_CLIENT_SESSION sOpenClientSessionAnswer;
+ struct SCX_ANSWER_CLOSE_CLIENT_SESSION sCloseClientSessionAnswer;
+ struct SCX_ANSWER_REGISTER_SHARED_MEMORY sRegisterSharedMemoryAnswer;
+ struct SCX_ANSWER_RELEASE_SHARED_MEMORY sReleaseSharedMemoryAnswer;
+ struct SCX_ANSWER_INVOKE_CLIENT_COMMAND sInvokeClientCommandAnswer;
+ struct SCX_ANSWER_DESTROY_DEVICE_CONTEXT sDestroyDeviceContextAnswer;
+ struct SCX_ANSWER_CANCEL_CLIENT_OPERATION sCancelClientOperationAnswer;
+};
+
+/* Structure of the Communication Buffer */
+struct SCHANNEL_C1S_BUFFER {
+ u32 nConfigFlags_S;
+ u32 nW3BSizeMax_S;
+ u32 nReserved0;
+ u32 nW3BSizeCurrent_S;
+ u8 sReserved1[48];
+ u8 sVersionDescription[SCX_DESCRIPTION_BUFFER_LENGTH];
+ u32 nStatus_S;
+ u32 sReserved2;
+ u32 nSyncSerial_N;
+ u32 nSyncSerial_S;
+ u64 sTime_N[2];
+ u64 sTimeout_S[2];
+ u32 nFirstCommand;
+ u32 nFirstFreeCommand;
+ u32 nFirstAnswer;
+ u32 nFirstFreeAnswer;
+ u32 nW3BDescriptors[128];
+ #ifdef CONFIG_TF_ZEBRA
+ u8 sRPCTraceBuffer[140];
+ u8 sRPCShortcutBuffer[180];
+ #else
+ u8 sReserved3[320];
+ #endif
+ u32 sCommandQueue[SCX_N_MESSAGE_QUEUE_CAPACITY];
+ u32 sAnswerQueue[SCX_S_ANSWER_QUEUE_CAPACITY];
+};
+
+
+/*
+ * SCX_VERSION_INFORMATION_BUFFER structure description
+ * Description of the sVersionBuffer handed over from user space to kernel space
+ * This field is filled by the driver during a CREATE_DEVICE_CONTEXT ioctl
+ * and handed back to user space
+ */
+struct SCX_VERSION_INFORMATION_BUFFER {
+ u8 sDriverDescription[65];
+ u8 sSecureWorldDescription[65];
+};
+
+
+/* The IOCTLs the driver supports */
+#include <linux/ioctl.h>
+
+#define IOCTL_SCX_GET_VERSION _IO('z', 0)
+#define IOCTL_SCX_EXCHANGE _IOWR('z', 1, union SCX_COMMAND_MESSAGE)
+#define IOCTL_SCX_GET_DESCRIPTION _IOR('z', 2, \
+ struct SCX_VERSION_INFORMATION_BUFFER)
+
+#endif /* !defined(__SCX_PROTOCOL_H__) */
diff --git a/security/tf_driver/scxlnx_comm.c b/security/tf_driver/scxlnx_comm.c
new file mode 100644
index 000000000000..f3b4cb8d487f
--- /dev/null
+++ b/security/tf_driver/scxlnx_comm.c
@@ -0,0 +1,1756 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/div64.h>
+#include <asm/system.h>
+#include <linux/version.h>
+#include <asm/cputype.h>
+#include <linux/interrupt.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/jiffies.h>
+#include <linux/freezer.h>
+
+#include "scxlnx_defs.h"
+#include "scxlnx_comm.h"
+#include "scx_protocol.h"
+#include "scxlnx_util.h"
+#include "scxlnx_conn.h"
+
+#ifdef CONFIG_TF_ZEBRA
+#include "scxlnx_zebra.h"
+#endif
+
+/*---------------------------------------------------------------------------
+ * Internal Constants
+ *---------------------------------------------------------------------------*/
+
+/*
+ * shared memories descriptor constants
+ */
+#define DESCRIPTOR_B_MASK (1 << 2)
+#define DESCRIPTOR_C_MASK (1 << 3)
+#define DESCRIPTOR_S_MASK (1 << 10)
+
+#define L1_COARSE_DESCRIPTOR_BASE (0x00000001)
+#define L1_COARSE_DESCRIPTOR_ADDR_MASK (0xFFFFFC00)
+#define L1_COARSE_DESCRIPTOR_V13_12_SHIFT (5)
+
+#define L2_PAGE_DESCRIPTOR_BASE (0x00000003)
+#define L2_PAGE_DESCRIPTOR_AP_APX_READ (0x220)
+#define L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE (0x30)
+
+#define L2_INIT_DESCRIPTOR_BASE (0x00000003)
+#define L2_INIT_DESCRIPTOR_V13_12_SHIFT (4)
+
+/*
+ * Reject an attempt to share a strongly-Ordered or Device memory
+ * Strongly-Ordered: TEX=0b000, C=0, B=0
+ * Shared Device: TEX=0b000, C=0, B=1
+ * Non-Shared Device: TEX=0b010, C=0, B=0
+ */
+#define L2_TEX_C_B_MASK \
+ ((1<<8) | (1<<7) | (1<<6) | (1<<3) | (1<<2))
+#define L2_TEX_C_B_STRONGLY_ORDERED \
+ ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (0<<2))
+#define L2_TEX_C_B_SHARED_DEVICE \
+ ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (1<<2))
+#define L2_TEX_C_B_NON_SHARED_DEVICE \
+ ((0<<8) | (1<<7) | (0<<6) | (0<<3) | (0<<2))
+
+#define CACHE_S(x) ((x) & (1 << 24))
+#define CACHE_DSIZE(x) (((x) >> 12) & 4095)
+
+#define TIME_IMMEDIATE ((u64) 0x0000000000000000ULL)
+#define TIME_INFINITE ((u64) 0xFFFFFFFFFFFFFFFFULL)
+
+/*---------------------------------------------------------------------------
+ * atomic operation definitions
+ *---------------------------------------------------------------------------*/
+
+/*
+ * Atomically updates the nSyncSerial_N and sTime_N register
+ * nSyncSerial_N and sTime_N modifications are thread safe
+ */
+void SCXLNXCommSetCurrentTime(struct SCXLNX_COMM *pComm)
+{
+ u32 nNewSyncSerial;
+ struct timeval now;
+ u64 sTime64;
+
+ /*
+ * lock the structure while updating the L1 shared memory fields
+ */
+ spin_lock(&pComm->lock);
+
+ /* read nSyncSerial_N and change the TimeSlot bit field */
+ nNewSyncSerial =
+ SCXLNXCommReadReg32(&pComm->pBuffer->nSyncSerial_N) + 1;
+
+ do_gettimeofday(&now);
+ sTime64 = now.tv_sec;
+ sTime64 = (sTime64 * 1000) + (now.tv_usec / 1000);
+
+ /* Write the new sTime and nSyncSerial into shared memory */
+ SCXLNXCommWriteReg64(&pComm->pBuffer->sTime_N[nNewSyncSerial &
+ SCX_SYNC_SERIAL_TIMESLOT_N], sTime64);
+ SCXLNXCommWriteReg32(&pComm->pBuffer->nSyncSerial_N,
+ nNewSyncSerial);
+
+ spin_unlock(&pComm->lock);
+}
+
+/*
+ * Performs the specific read timeout operation
+ * The difficulty here is to read atomically 2 u32
+ * values from the L1 shared buffer.
+ * This is guaranteed by reading before and after the operation
+ * the timeslot given by the Secure World
+ */
+static inline void SCXLNXCommReadTimeout(struct SCXLNX_COMM *pComm, u64 *pTime)
+{
+ u32 nSyncSerial_S_initial = 0;
+ u32 nSyncSerial_S_final = 1;
+ u64 sTime;
+
+ spin_lock(&pComm->lock);
+
+ while (nSyncSerial_S_initial != nSyncSerial_S_final) {
+ nSyncSerial_S_initial = SCXLNXCommReadReg32(
+ &pComm->pBuffer->nSyncSerial_S);
+ sTime = SCXLNXCommReadReg64(
+ &pComm->pBuffer->sTimeout_S[nSyncSerial_S_initial&1]);
+
+ nSyncSerial_S_final = SCXLNXCommReadReg32(
+ &pComm->pBuffer->nSyncSerial_S);
+ }
+
+ spin_unlock(&pComm->lock);
+
+ *pTime = sTime;
+}
+
+/*----------------------------------------------------------------------------
+ * SIGKILL signal handling
+ *----------------------------------------------------------------------------*/
+
+static bool sigkill_pending(void)
+{
+ if (signal_pending(current)) {
+ dprintk(KERN_INFO "A signal is pending\n");
+ if (sigismember(&current->pending.signal, SIGKILL)) {
+ dprintk(KERN_INFO "A SIGKILL is pending\n");
+ return true;
+ } else if (sigismember(
+ &current->signal->shared_pending.signal, SIGKILL)) {
+ dprintk(KERN_INFO "A SIGKILL is pending (shared)\n");
+ return true;
+ }
+ }
+ return false;
+}
+
+/*----------------------------------------------------------------------------
+ * Shared memory related operations
+ *----------------------------------------------------------------------------*/
+
+struct SCXLNX_COARSE_PAGE_TABLE *SCXLNXAllocateCoarsePageTable(
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext,
+ u32 nType)
+{
+ struct SCXLNX_COARSE_PAGE_TABLE *pCoarsePageTable = NULL;
+
+ spin_lock(&(pAllocationContext->lock));
+
+ if (!(list_empty(&(pAllocationContext->sFreeCoarsePageTables)))) {
+ /*
+ * The free list can provide us a coarse page table
+ * descriptor
+ */
+ pCoarsePageTable = list_entry(
+ pAllocationContext->sFreeCoarsePageTables.next,
+ struct SCXLNX_COARSE_PAGE_TABLE, list);
+ list_del(&(pCoarsePageTable->list));
+
+ pCoarsePageTable->pParent->nReferenceCount++;
+ } else {
+ /* no array of coarse page tables, create a new one */
+ struct SCXLNX_COARSE_PAGE_TABLE_ARRAY *pArray;
+ void *pPage;
+ int i;
+
+ spin_unlock(&(pAllocationContext->lock));
+
+ /* first allocate a new page descriptor */
+ pArray = internal_kmalloc(sizeof(*pArray), GFP_KERNEL);
+ if (pArray == NULL) {
+ dprintk(KERN_ERR "SCXLNXAllocateCoarsePageTable(%p):"
+ " failed to allocate a table array\n",
+ pAllocationContext);
+ return NULL;
+ }
+
+ pArray->nType = nType;
+ INIT_LIST_HEAD(&(pArray->list));
+
+ /* now allocate the actual page the page descriptor describes */
+ pPage = (void *) internal_get_zeroed_page(GFP_KERNEL);
+ if (pPage == NULL) {
+ dprintk(KERN_ERR "SCXLNXAllocateCoarsePageTable(%p):"
+ " failed allocate a page\n",
+ pAllocationContext);
+ internal_kfree(pArray);
+ return NULL;
+ }
+
+ spin_lock(&(pAllocationContext->lock));
+
+ /* initialize the coarse page table descriptors */
+ for (i = 0; i < 4; i++) {
+ INIT_LIST_HEAD(&(pArray->sCoarsePageTables[i].list));
+ pArray->sCoarsePageTables[i].pDescriptors =
+ pPage + (i * SIZE_1KB);
+ pArray->sCoarsePageTables[i].pParent = pArray;
+
+ if (i == 0) {
+ /*
+ * the first element is kept for the current
+ * coarse page table allocation
+ */
+ pCoarsePageTable =
+ &(pArray->sCoarsePageTables[i]);
+ pArray->nReferenceCount++;
+ } else {
+ /*
+ * The other elements are added to the free list
+ */
+ list_add(&(pArray->sCoarsePageTables[i].list),
+ &(pAllocationContext->
+ sFreeCoarsePageTables));
+ }
+ }
+
+ list_add(&(pArray->list),
+ &(pAllocationContext->sCoarsePageTableArrays));
+ }
+ spin_unlock(&(pAllocationContext->lock));
+
+ return pCoarsePageTable;
+}
+
+
+void SCXLNXFreeCoarsePageTable(
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext,
+ struct SCXLNX_COARSE_PAGE_TABLE *pCoarsePageTable,
+ int nForce)
+{
+ struct SCXLNX_COARSE_PAGE_TABLE_ARRAY *pArray;
+
+ spin_lock(&(pAllocationContext->lock));
+
+ pArray = pCoarsePageTable->pParent;
+
+ (pArray->nReferenceCount)--;
+
+ if (pArray->nReferenceCount == 0) {
+ /*
+ * no coarse page table descriptor is used
+ * check if we should free the whole page
+ */
+
+ if ((pArray->nType == SCXLNX_PAGE_DESCRIPTOR_TYPE_PREALLOCATED)
+ && (nForce == 0))
+ /*
+ * This is a preallocated page,
+ * add the page back to the free list
+ */
+ list_add(&(pCoarsePageTable->list),
+ &(pAllocationContext->sFreeCoarsePageTables));
+ else {
+ /*
+ * None of the page's coarse page table descriptors
+ * are in use, free the whole page
+ */
+ int i;
+ u32 *pDescriptors;
+
+ /*
+ * remove the page's associated coarse page table
+ * descriptors from the free list
+ */
+ for (i = 0; i < 4; i++)
+ if (&(pArray->sCoarsePageTables[i]) !=
+ pCoarsePageTable)
+ list_del(&(pArray->
+ sCoarsePageTables[i].list));
+
+ pDescriptors =
+ pArray->sCoarsePageTables[0].pDescriptors;
+ pArray->sCoarsePageTables[0].pDescriptors = NULL;
+
+ /* remove the coarse page table from the array */
+ list_del(&(pArray->list));
+
+ spin_unlock(&(pAllocationContext->lock));
+ /*
+ * Free the page.
+ * The address of the page is contained in the first
+ * element
+ */
+ internal_free_page((unsigned long) pDescriptors);
+ /* finaly free the array */
+ internal_kfree(pArray);
+
+ spin_lock(&(pAllocationContext->lock));
+ }
+ } else {
+ /*
+ * Some coarse page table descriptors are in use.
+ * Add the descriptor to the free list
+ */
+ list_add(&(pCoarsePageTable->list),
+ &(pAllocationContext->sFreeCoarsePageTables));
+ }
+
+ spin_unlock(&(pAllocationContext->lock));
+}
+
+
+void SCXLNXInitializeCoarsePageTableAllocator(
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext)
+{
+ spin_lock_init(&(pAllocationContext->lock));
+ INIT_LIST_HEAD(&(pAllocationContext->sCoarsePageTableArrays));
+ INIT_LIST_HEAD(&(pAllocationContext->sFreeCoarsePageTables));
+}
+
+void SCXLNXReleaseCoarsePageTableAllocator(
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext)
+{
+ spin_lock(&(pAllocationContext->lock));
+
+ /* now clean up the list of page descriptors */
+ while (!list_empty(&(pAllocationContext->sCoarsePageTableArrays))) {
+ struct SCXLNX_COARSE_PAGE_TABLE_ARRAY *pPageDesc;
+ u32 *pDescriptors;
+
+ pPageDesc = list_entry(
+ pAllocationContext->sCoarsePageTableArrays.next,
+ struct SCXLNX_COARSE_PAGE_TABLE_ARRAY, list);
+
+ pDescriptors = pPageDesc->sCoarsePageTables[0].pDescriptors;
+ list_del(&(pPageDesc->list));
+
+ spin_unlock(&(pAllocationContext->lock));
+
+ if (pDescriptors != NULL)
+ internal_free_page((unsigned long)pDescriptors);
+
+ internal_kfree(pPageDesc);
+
+ spin_lock(&(pAllocationContext->lock));
+ }
+
+ spin_unlock(&(pAllocationContext->lock));
+}
+
+/*
+ * Returns the L1 coarse page descriptor for
+ * a coarse page table located at address pCoarsePageTableDescriptors
+ */
+u32 SCXLNXCommGetL1CoarseDescriptor(
+ u32 pCoarsePageTableDescriptors[256])
+{
+ u32 nDescriptor = L1_COARSE_DESCRIPTOR_BASE;
+ unsigned int info = read_cpuid(CPUID_CACHETYPE);
+
+ nDescriptor |= (virt_to_phys((void *) pCoarsePageTableDescriptors)
+ & L1_COARSE_DESCRIPTOR_ADDR_MASK);
+
+ if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11))) {
+ dprintk(KERN_DEBUG "SCXLNXCommGetL1CoarseDescriptor "
+ "V31-12 added to descriptor\n");
+ /* the 16k alignment restriction applies */
+ nDescriptor |= (DESCRIPTOR_V13_12_GET(
+ (u32)pCoarsePageTableDescriptors) <<
+ L1_COARSE_DESCRIPTOR_V13_12_SHIFT);
+ }
+
+ return nDescriptor;
+}
+
+
+#define dprintk_desc(...)
+/*
+ * Returns the L2 descriptor for the specified user page.
+ */
+u32 SCXLNXCommGetL2DescriptorCommon(u32 nVirtAddr, struct mm_struct *mm)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep;
+ u32 *hwpte;
+ u32 tex = 0;
+ u32 nDescriptor = 0;
+
+ dprintk_desc(KERN_INFO "VirtAddr = %x\n", nVirtAddr);
+ pgd = pgd_offset(mm, nVirtAddr);
+ dprintk_desc(KERN_INFO "pgd = %x, value=%x\n", (unsigned int) pgd,
+ (unsigned int) *pgd);
+ if (pgd_none(*pgd))
+ goto error;
+ pud = pud_offset(pgd, nVirtAddr);
+ dprintk_desc(KERN_INFO "pud = %x, value=%x\n", (unsigned int) pud,
+ (unsigned int) *pud);
+ if (pud_none(*pud))
+ goto error;
+ pmd = pmd_offset(pud, nVirtAddr);
+ dprintk_desc(KERN_INFO "pmd = %x, value=%x\n", (unsigned int) pmd,
+ (unsigned int) *pmd);
+ if (pmd_none(*pmd))
+ goto error;
+
+ if (PMD_TYPE_SECT&(*pmd)) {
+ /* We have a section */
+ dprintk_desc(KERN_INFO "Section descr=%x\n",
+ (unsigned int)*pmd);
+ if ((*pmd) & PMD_SECT_BUFFERABLE)
+ nDescriptor |= DESCRIPTOR_B_MASK;
+ if ((*pmd) & PMD_SECT_CACHEABLE)
+ nDescriptor |= DESCRIPTOR_C_MASK;
+ if ((*pmd) & PMD_SECT_S)
+ nDescriptor |= DESCRIPTOR_S_MASK;
+ tex = ((*pmd) >> 12) & 7;
+ } else {
+ /* We have a table */
+ ptep = pte_offset_map(pmd, nVirtAddr);
+ if (pte_present(*ptep)) {
+ dprintk_desc(KERN_INFO "L2 descr=%x\n",
+ (unsigned int) *ptep);
+ if ((*ptep) & L_PTE_MT_BUFFERABLE)
+ nDescriptor |= DESCRIPTOR_B_MASK;
+ if ((*ptep) & L_PTE_MT_WRITETHROUGH)
+ nDescriptor |= DESCRIPTOR_C_MASK;
+ if ((*ptep) & L_PTE_MT_DEV_SHARED)
+ nDescriptor |= DESCRIPTOR_S_MASK;
+
+ /*
+ * Linux's pte doesn't keep track of TEX value.
+ * Have to jump to hwpte see include/asm/pgtable.h
+ */
+ hwpte = (u32 *) (((u32) ptep) - 0x800);
+ if (((*hwpte) & L2_DESCRIPTOR_ADDR_MASK) !=
+ ((*ptep) & L2_DESCRIPTOR_ADDR_MASK))
+ goto error;
+ dprintk_desc(KERN_INFO "hw descr=%x\n", *hwpte);
+ tex = ((*hwpte) >> 6) & 7;
+ pte_unmap(ptep);
+ } else {
+ pte_unmap(ptep);
+ goto error;
+ }
+ }
+
+ nDescriptor |= (tex << 6);
+
+ return nDescriptor;
+
+error:
+ dprintk(KERN_ERR "Error occured in %s\n", __func__);
+ return 0;
+}
+
+
+/*
+ * Changes an L2 page descriptor back to a pointer to a physical page
+ */
+inline struct page *SCXLNXCommL2PageDescriptorToPage(u32 nL2PageDescriptor)
+{
+ return pte_page(nL2PageDescriptor & L2_DESCRIPTOR_ADDR_MASK);
+}
+
+
+/*
+ * Returns the L1 descriptor for the 1KB-aligned coarse page table. The address
+ * must be in the kernel address space.
+ */
+void SCXLNXCommGetL2PageDescriptor(
+ u32 *pL2PageDescriptor,
+ u32 nFlags, struct mm_struct *mm)
+{
+ unsigned long nPageVirtAddr;
+ u32 nDescriptor;
+ struct page *pPage;
+ bool bUnmapPage = false;
+
+ dprintk(KERN_INFO
+ "SCXLNXCommGetL2PageDescriptor():"
+ "*pL2PageDescriptor=%x\n",
+ *pL2PageDescriptor);
+
+ if (*pL2PageDescriptor == L2_DESCRIPTOR_FAULT)
+ return;
+
+ pPage = (struct page *) (*pL2PageDescriptor);
+
+ nPageVirtAddr = (unsigned long) page_address(pPage);
+ if (nPageVirtAddr == 0) {
+ dprintk(KERN_INFO "page_address returned 0\n");
+ /* Should we use kmap_atomic(pPage, KM_USER0) instead ? */
+ nPageVirtAddr = (unsigned long) kmap(pPage);
+ if (nPageVirtAddr == 0) {
+ *pL2PageDescriptor = L2_DESCRIPTOR_FAULT;
+ dprintk(KERN_ERR "kmap returned 0\n");
+ return;
+ }
+ bUnmapPage = true;
+ }
+
+ nDescriptor = SCXLNXCommGetL2DescriptorCommon(nPageVirtAddr, mm);
+ if (nDescriptor == 0) {
+ *pL2PageDescriptor = L2_DESCRIPTOR_FAULT;
+ return;
+ }
+ nDescriptor |= L2_PAGE_DESCRIPTOR_BASE;
+
+ nDescriptor |= (page_to_phys(pPage) & L2_DESCRIPTOR_ADDR_MASK);
+
+ if (!(nFlags & SCX_SHMEM_TYPE_WRITE))
+ /* only read access */
+ nDescriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ;
+ else
+ /* read and write access */
+ nDescriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE;
+
+ if (bUnmapPage)
+ kunmap(pPage);
+
+ *pL2PageDescriptor = nDescriptor;
+}
+
+
+/*
+ * Unlocks the physical memory pages
+ * and frees the coarse pages that need to
+ */
+void SCXLNXCommReleaseSharedMemory(
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext,
+ struct SCXLNX_SHMEM_DESC *pShmemDesc,
+ u32 nFullCleanup)
+{
+ u32 nCoarsePageIndex;
+
+ dprintk(KERN_INFO "SCXLNXCommReleaseSharedMemory(%p)\n",
+ pShmemDesc);
+
+#ifdef DEBUG_COARSE_TABLES
+ printk(KERN_DEBUG "SCXLNXCommReleaseSharedMemory "
+ "- numberOfCoarsePages=%d\n",
+ pShmemDesc->nNumberOfCoarsePageTables);
+
+ for (nCoarsePageIndex = 0;
+ nCoarsePageIndex < pShmemDesc->nNumberOfCoarsePageTables;
+ nCoarsePageIndex++) {
+ u32 nIndex;
+
+ printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n",
+ pShmemDesc->pCoarsePageTable[nCoarsePageIndex],
+ pShmemDesc->pCoarsePageTable[nCoarsePageIndex]->
+ pDescriptors,
+ nCoarsePageIndex);
+ if (pShmemDesc->pCoarsePageTable[nCoarsePageIndex] != NULL) {
+ for (nIndex = 0;
+ nIndex < SCX_DESCRIPTOR_TABLE_CAPACITY;
+ nIndex += 8) {
+ int i;
+ printk(KERN_DEBUG " ");
+ for (i = nIndex; i < nIndex + 8; i++)
+ printk(KERN_DEBUG "%p ",
+ pShmemDesc->pCoarsePageTable[
+ nCoarsePageIndex]->
+ pDescriptors);
+ printk(KERN_DEBUG "\n");
+ }
+ }
+ }
+ printk(KERN_DEBUG "SCXLNXCommReleaseSharedMemory() - done\n\n");
+#endif
+
+ /* Parse the coarse page descriptors */
+ for (nCoarsePageIndex = 0;
+ nCoarsePageIndex < pShmemDesc->nNumberOfCoarsePageTables;
+ nCoarsePageIndex++) {
+ u32 nPageIndex;
+ u32 nFoundStart = 0;
+
+ /* parse the page descriptors of the coarse page */
+ for (nPageIndex = 0;
+ nPageIndex < SCX_DESCRIPTOR_TABLE_CAPACITY;
+ nPageIndex++) {
+ u32 nL2PageDescriptor = (u32) (pShmemDesc->
+ pCoarsePageTable[nCoarsePageIndex]->
+ pDescriptors[nPageIndex]);
+
+ if (nL2PageDescriptor != L2_DESCRIPTOR_FAULT) {
+ struct page *page =
+ SCXLNXCommL2PageDescriptorToPage(
+ nL2PageDescriptor);
+
+ if (!PageReserved(page))
+ SetPageDirty(page);
+ internal_page_cache_release(page);
+
+ nFoundStart = 1;
+ } else if (nFoundStart == 1) {
+ break;
+ }
+ }
+
+ /*
+ * Only free the coarse pages of descriptors not preallocated
+ */
+ if ((pShmemDesc->nType == SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM) ||
+ (nFullCleanup != 0))
+ SCXLNXFreeCoarsePageTable(pAllocationContext,
+ pShmemDesc->pCoarsePageTable[nCoarsePageIndex],
+ 0);
+ }
+
+ pShmemDesc->nNumberOfCoarsePageTables = 0;
+ dprintk(KERN_INFO "SCXLNXCommReleaseSharedMemory(%p) done\n",
+ pShmemDesc);
+}
+
+/*
+ * Make sure the coarse pages are allocated. If not allocated, do it Locks down
+ * the physical memory pages
+ * Verifies the memory attributes depending on nFlags
+ */
+int SCXLNXCommFillDescriptorTable(
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext,
+ struct SCXLNX_SHMEM_DESC *pShmemDesc,
+ u32 nBufferVAddr,
+ struct vm_area_struct **ppVmas,
+ u32 pDescriptors[SCX_MAX_COARSE_PAGES],
+ u32 *pBufferSize,
+ u32 *pBufferStartOffset,
+ bool bInUserSpace,
+ u32 nFlags,
+ u32 *pnDescriptorCount)
+{
+ u32 nCoarsePageIndex;
+ u32 nNumberOfCoarsePages;
+ u32 nPageCount;
+ u32 nPageShift = 0;
+ u32 nIndex;
+ u32 nBufferSize = *pBufferSize;
+ int nError;
+ unsigned int info = read_cpuid(CPUID_CACHETYPE);
+
+ dprintk(KERN_INFO "SCXLNXCommFillDescriptorTable"
+ "(%p, nBufferVAddr=0x%08X, size=0x%08X, user=%01x "
+ "flags = 0x%08x)\n",
+ pShmemDesc,
+ nBufferVAddr,
+ nBufferSize,
+ bInUserSpace,
+ nFlags);
+
+ /*
+ * Compute the number of pages
+ * Compute the number of coarse pages
+ * Compute the page offset
+ */
+ nPageCount = ((nBufferVAddr & ~PAGE_MASK) +
+ nBufferSize + ~PAGE_MASK) >> PAGE_SHIFT;
+
+ /* check whether the 16k alignment restriction applies */
+ if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11)))
+ /*
+ * The 16k alignment restriction applies.
+ * Shift data to get them 16k aligned
+ */
+ nPageShift = DESCRIPTOR_V13_12_GET(nBufferVAddr);
+ nPageCount += nPageShift;
+
+
+ /*
+ * Check the number of pages fit in the coarse pages
+ */
+ if (nPageCount > (SCX_DESCRIPTOR_TABLE_CAPACITY *
+ SCX_MAX_COARSE_PAGES)) {
+ dprintk(KERN_ERR "SCXLNXCommFillDescriptorTable(%p): "
+ "%u pages required to map shared memory!\n",
+ pShmemDesc, nPageCount);
+ nError = -ENOMEM;
+ goto error;
+ }
+
+ /* coarse page describe 256 pages */
+ nNumberOfCoarsePages = ((nPageCount +
+ SCX_DESCRIPTOR_TABLE_CAPACITY_MASK) >>
+ SCX_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT);
+
+ /*
+ * Compute the buffer offset
+ */
+ *pBufferStartOffset = (nBufferVAddr & ~PAGE_MASK) |
+ (nPageShift << PAGE_SHIFT);
+
+ /* map each coarse page */
+ for (nCoarsePageIndex = 0;
+ nCoarsePageIndex < nNumberOfCoarsePages;
+ nCoarsePageIndex++) {
+ struct SCXLNX_COARSE_PAGE_TABLE *pCoarsePageTable;
+
+ /* compute a virtual address with appropriate offset */
+ u32 nBufferOffsetVAddr = nBufferVAddr +
+ (nCoarsePageIndex * SCX_MAX_COARSE_PAGE_MAPPED_SIZE);
+ u32 nPagesToGet;
+
+ /*
+ * Compute the number of pages left for this coarse page.
+ * Decrement nPageCount each time
+ */
+ nPagesToGet = (nPageCount >>
+ SCX_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT) ?
+ SCX_DESCRIPTOR_TABLE_CAPACITY : nPageCount;
+ nPageCount -= nPagesToGet;
+
+ /*
+ * Check if the coarse page has already been allocated
+ * If not, do it now
+ */
+ if ((pShmemDesc->nType == SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM)
+ || (pShmemDesc->nType ==
+ SCXLNX_SHMEM_TYPE_PM_HIBERNATE)) {
+ pCoarsePageTable = SCXLNXAllocateCoarsePageTable(
+ pAllocationContext,
+ SCXLNX_PAGE_DESCRIPTOR_TYPE_NORMAL);
+
+ if (pCoarsePageTable == NULL) {
+ dprintk(KERN_ERR
+ "SCXLNXCommFillDescriptorTable(%p):"
+ " SCXLNXConnAllocateCoarsePageTable "
+ "failed for coarse page %d\n",
+ pShmemDesc, nCoarsePageIndex);
+ nError = -ENOMEM;
+ goto error;
+ }
+
+ pShmemDesc->pCoarsePageTable[nCoarsePageIndex] =
+ pCoarsePageTable;
+ } else {
+ pCoarsePageTable =
+ pShmemDesc->pCoarsePageTable[nCoarsePageIndex];
+ }
+
+ /*
+ * The page is not necessarily filled with zeroes.
+ * Set the fault descriptors ( each descriptor is 4 bytes long)
+ */
+ memset(pCoarsePageTable->pDescriptors, 0x00,
+ SCX_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
+
+ if (bInUserSpace) {
+ int nPages;
+
+ /*
+ * TRICK: use pCoarsePageDescriptor->pDescriptors to
+ * hold the (struct page*) items before getting their
+ * physical address
+ */
+ down_read(&(current->mm->mmap_sem));
+ nPages = internal_get_user_pages(
+ current,
+ current->mm,
+ nBufferOffsetVAddr,
+ /*
+ * nPageShift is cleared after retrieving first
+ * coarse page
+ */
+ (nPagesToGet - nPageShift),
+ (nFlags & SCX_SHMEM_TYPE_WRITE) ? 1 : 0,
+ 0,
+ (struct page **) (pCoarsePageTable->pDescriptors
+ + nPageShift),
+ ppVmas);
+ up_read(&(current->mm->mmap_sem));
+
+ if ((nPages <= 0) ||
+ (nPages != (nPagesToGet - nPageShift))) {
+ dprintk(KERN_ERR"SCXLNXCommFillDescriptorTable:"
+ " get_user_pages got %d pages while "
+ "trying to get %d pages!\n",
+ nPages, nPagesToGet - nPageShift);
+ nError = -EFAULT;
+ goto error;
+ }
+
+ for (nIndex = nPageShift;
+ nIndex < nPageShift + nPages;
+ nIndex++) {
+ /* Get the actual L2 descriptors */
+ SCXLNXCommGetL2PageDescriptor(
+ &pCoarsePageTable->pDescriptors[nIndex],
+ nFlags,
+ current->mm);
+ /*
+ * Reject Strongly-Ordered or Device Memory
+ */
+#define IS_STRONGLY_ORDERED_OR_DEVICE_MEM(x) \
+ ((((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_STRONGLY_ORDERED) || \
+ (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_SHARED_DEVICE) || \
+ (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_NON_SHARED_DEVICE))
+
+ if (IS_STRONGLY_ORDERED_OR_DEVICE_MEM(
+ pCoarsePageTable->
+ pDescriptors[nIndex])) {
+ dprintk(KERN_ERR
+ "SCXLNXCommFillDescriptorTable:"
+ " descriptor 0x%08X use "
+ "strongly-ordered or device "
+ "memory. Rejecting!\n",
+ pCoarsePageTable->
+ pDescriptors[nIndex]);
+ nError = -EFAULT;
+ goto error;
+ }
+ }
+ } else {
+ /* Kernel-space memory */
+ for (nIndex = nPageShift;
+ nIndex < nPagesToGet;
+ nIndex++) {
+ unsigned long addr =
+ (unsigned long) (nBufferOffsetVAddr +
+ ((nIndex - nPageShift) *
+ PAGE_SIZE));
+ pCoarsePageTable->pDescriptors[nIndex] =
+ (u32) vmalloc_to_page((void *)addr);
+ get_page((struct page *) pCoarsePageTable->
+ pDescriptors[nIndex]);
+
+ /* change coarse page "page address" */
+ SCXLNXCommGetL2PageDescriptor(
+ &pCoarsePageTable->pDescriptors[nIndex],
+ nFlags,
+ &init_mm);
+ }
+ }
+
+ dmac_flush_range((void *)pCoarsePageTable->pDescriptors,
+ (void *)(((u32)(pCoarsePageTable->pDescriptors)) +
+ SCX_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32)));
+
+ outer_clean_range(
+ __pa(pCoarsePageTable->pDescriptors),
+ __pa(pCoarsePageTable->pDescriptors) +
+ SCX_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
+ wmb();
+
+ /* Update the coarse page table address */
+ pDescriptors[nCoarsePageIndex] =
+ SCXLNXCommGetL1CoarseDescriptor(
+ pCoarsePageTable->pDescriptors);
+
+ /*
+ * The next coarse page has no page shift, reset the
+ * nPageShift
+ */
+ nPageShift = 0;
+ }
+
+ *pnDescriptorCount = nNumberOfCoarsePages;
+ pShmemDesc->nNumberOfCoarsePageTables = nNumberOfCoarsePages;
+
+#ifdef DEBUG_COARSE_TABLES
+ printk(KERN_DEBUG "nSCXLNXCommFillDescriptorTable - size=0x%08X "
+ "numberOfCoarsePages=%d\n", *pBufferSize,
+ pShmemDesc->nNumberOfCoarsePageTables);
+ for (nCoarsePageIndex = 0;
+ nCoarsePageIndex < pShmemDesc->nNumberOfCoarsePageTables;
+ nCoarsePageIndex++) {
+ u32 nIndex;
+ struct SCXLNX_COARSE_PAGE_TABLE *pCorsePageTable =
+ pShmemDesc->pCoarsePageTable[nCoarsePageIndex];
+
+ printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n",
+ pCorsePageTable,
+ pCorsePageTable->pDescriptors,
+ nCoarsePageIndex);
+ for (nIndex = 0;
+ nIndex < SCX_DESCRIPTOR_TABLE_CAPACITY;
+ nIndex += 8) {
+ int i;
+ printk(KERN_DEBUG " ");
+ for (i = nIndex; i < nIndex + 8; i++)
+ printk(KERN_DEBUG "0x%08X ",
+ pCorsePageTable->pDescriptors[i]);
+ printk(KERN_DEBUG "\n");
+ }
+ }
+ printk(KERN_DEBUG "nSCXLNXCommFillDescriptorTable() - done\n\n");
+#endif
+
+ return 0;
+
+error:
+ SCXLNXCommReleaseSharedMemory(
+ pAllocationContext,
+ pShmemDesc,
+ 0);
+
+ return nError;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Standard communication operations
+ *----------------------------------------------------------------------------*/
+
+u8 *SCXLNXCommGetDescription(struct SCXLNX_COMM *pComm)
+{
+ if (test_bit(SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED, &(pComm->nFlags)))
+ return pComm->pBuffer->sVersionDescription;
+
+ return NULL;
+}
+
+/*
+ * Returns a non-zero value if the specified S-timeout has expired, zero
+ * otherwise.
+ *
+ * The placeholder referenced to by pnRelativeTimeoutJiffies gives the relative
+ * timeout from now in jiffies. It is set to zero if the S-timeout has expired,
+ * or to MAX_SCHEDULE_TIMEOUT if the S-timeout is infinite.
+ */
+static int SCXLNXCommTestSTimeout(
+ u64 sTimeout,
+ signed long *pnRelativeTimeoutJiffies)
+{
+ struct timeval now;
+ u64 sTime64;
+
+ *pnRelativeTimeoutJiffies = 0;
+
+ /* immediate timeout */
+ if (sTimeout == TIME_IMMEDIATE)
+ return 1;
+
+ /* infinite timeout */
+ if (sTimeout == TIME_INFINITE) {
+ dprintk(KERN_DEBUG "SCXLNXCommTestSTimeout: "
+ "timeout is infinite\n");
+ *pnRelativeTimeoutJiffies = MAX_SCHEDULE_TIMEOUT;
+ return 0;
+ }
+
+ do_gettimeofday(&now);
+ sTime64 = now.tv_sec;
+ /* will not overflow as operations are done on 64bit values */
+ sTime64 = (sTime64 * 1000) + (now.tv_usec / 1000);
+
+ /* timeout expired */
+ if (sTime64 >= sTimeout) {
+ dprintk(KERN_DEBUG "SCXLNXCommTestSTimeout: timeout expired\n");
+ return 1;
+ }
+
+ /*
+ * finite timeout, compute pnRelativeTimeoutJiffies
+ */
+ /* will not overflow as sTime64 < sTimeout */
+ sTimeout -= sTime64;
+
+ /* guarantee *pnRelativeTimeoutJiffies is a valid timeout */
+ if ((sTimeout >> 32) != 0)
+ *pnRelativeTimeoutJiffies = MAX_JIFFY_OFFSET;
+ else
+ *pnRelativeTimeoutJiffies =
+ msecs_to_jiffies((unsigned int) sTimeout);
+
+ dprintk(KERN_DEBUG "SCXLNXCommTestSTimeout: timeout is 0x%lx\n",
+ *pnRelativeTimeoutJiffies);
+ return 0;
+}
+
+static void tf_copy_answers(struct SCXLNX_COMM *pComm)
+{
+ u32 nFirstAnswer;
+ u32 nFirstFreeAnswer;
+ struct SCXLNX_ANSWER_STRUCT *pAnswerStructureTemp;
+
+ if (test_bit(SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED, &(pComm->nFlags))) {
+ spin_lock(&pComm->lock);
+ nFirstFreeAnswer = SCXLNXCommReadReg32(
+ &pComm->pBuffer->nFirstFreeAnswer);
+ nFirstAnswer = SCXLNXCommReadReg32(
+ &pComm->pBuffer->nFirstAnswer);
+
+ while (nFirstAnswer != nFirstFreeAnswer) {
+ /* answer queue not empty */
+ union SCX_ANSWER_MESSAGE sComAnswer;
+ struct SCX_ANSWER_HEADER sHeader;
+
+ /*
+ * the size of the command in words of 32bit, not in
+ * bytes
+ */
+ u32 nCommandSize;
+ u32 i;
+ u32 *pTemp = (uint32_t *) &sHeader;
+
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_answers(%p): "
+ "Read answers from L1\n",
+ current->pid, pComm);
+
+ /* Read the answer header */
+ for (i = 0;
+ i < sizeof(struct SCX_ANSWER_HEADER)/sizeof(u32);
+ i++)
+ pTemp[i] = pComm->pBuffer->sAnswerQueue[
+ (nFirstAnswer + i) %
+ SCX_S_ANSWER_QUEUE_CAPACITY];
+
+ /* Read the answer from the L1_Buffer*/
+ nCommandSize = sHeader.nMessageSize +
+ sizeof(struct SCX_ANSWER_HEADER)/sizeof(u32);
+ pTemp = (uint32_t *) &sComAnswer;
+ for (i = 0; i < nCommandSize; i++)
+ pTemp[i] = pComm->pBuffer->sAnswerQueue[
+ (nFirstAnswer + i) %
+ SCX_S_ANSWER_QUEUE_CAPACITY];
+
+ pAnswerStructureTemp = (struct SCXLNX_ANSWER_STRUCT *)
+ sComAnswer.sHeader.nOperationID;
+
+ SCXLNXDumpAnswer(&sComAnswer);
+
+ memcpy(pAnswerStructureTemp->pAnswer, &sComAnswer,
+ nCommandSize * sizeof(u32));
+ pAnswerStructureTemp->bAnswerCopied = true;
+
+ nFirstAnswer += nCommandSize;
+ SCXLNXCommWriteReg32(&pComm->pBuffer->nFirstAnswer,
+ nFirstAnswer);
+ }
+ spin_unlock(&(pComm->lock));
+ }
+}
+
+static void tf_copy_command(
+ struct SCXLNX_COMM *pComm,
+ union SCX_COMMAND_MESSAGE *pMessage,
+ struct SCXLNX_CONNECTION *pConn,
+ enum SCXLNX_COMMAND_STATE *command_status)
+{
+ if ((test_bit(SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED, &(pComm->nFlags)))
+ && (pMessage != NULL)) {
+ /*
+ * Write the message in the message queue.
+ */
+
+ if (*command_status == SCXLNX_COMMAND_STATE_PENDING) {
+ u32 nCommandSize;
+ u32 nQueueWordsCount;
+ u32 i;
+ u32 nFirstFreeCommand;
+ u32 nFirstCommand;
+
+ spin_lock(&pComm->lock);
+
+ nFirstCommand = SCXLNXCommReadReg32(
+ &pComm->pBuffer->nFirstCommand);
+ nFirstFreeCommand = SCXLNXCommReadReg32(
+ &pComm->pBuffer->nFirstFreeCommand);
+
+ nQueueWordsCount = nFirstFreeCommand - nFirstCommand;
+ nCommandSize = pMessage->sHeader.nMessageSize +
+ sizeof(struct SCX_COMMAND_HEADER)/sizeof(u32);
+ if ((nQueueWordsCount + nCommandSize) <
+ SCX_N_MESSAGE_QUEUE_CAPACITY) {
+ /*
+ * Command queue is not full.
+ * If the Command queue is full,
+ * the command will be copied at
+ * another iteration
+ * of the current function.
+ */
+
+ /*
+ * Change the conn state
+ */
+ if (pConn == NULL)
+ goto copy;
+
+ spin_lock(&(pConn->stateLock));
+
+ if ((pConn->nState ==
+ SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT)
+ &&
+ (pMessage->sHeader.nMessageType ==
+ SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
+
+ dprintk(KERN_INFO
+ "tf_copy_command(%p):"
+ "Conn state is DEVICE_CONTEXT_SENT\n",
+ pConn);
+ pConn->nState =
+ SCXLNX_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT;
+ } else if ((pConn->nState !=
+ SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT)
+ &&
+ (pMessage->sHeader.nMessageType !=
+ SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
+ /* The connection
+ * is no longer valid.
+ * We may not send any command on it,
+ * not even another
+ * DESTROY_DEVICE_CONTEXT.
+ */
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_command(%p): "
+ "Connection no longer valid."
+ "ABORT\n",
+ current->pid, pConn);
+ *command_status =
+ SCXLNX_COMMAND_STATE_ABORTED;
+ spin_unlock(
+ &(pConn->stateLock));
+ spin_unlock(
+ &pComm->lock);
+ return;
+ } else if (
+ (pMessage->sHeader.nMessageType ==
+ SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) &&
+ (pConn->nState ==
+ SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT)
+ ) {
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_command(%p): "
+ "Conn state is "
+ "DESTROY_DEVICE_CONTEXT_SENT\n",
+ current->pid, pConn);
+ pConn->nState =
+ SCXLNX_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT;
+ }
+ spin_unlock(&(pConn->stateLock));
+copy:
+ /*
+ * Copy the command to L1 Buffer
+ */
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_command(%p): "
+ "Write Message in the queue\n",
+ current->pid, pMessage);
+ SCXLNXDumpMessage(pMessage);
+
+ for (i = 0; i < nCommandSize; i++)
+ pComm->pBuffer->sCommandQueue[
+ (nFirstFreeCommand + i) %
+ SCX_N_MESSAGE_QUEUE_CAPACITY] =
+ ((uint32_t *) pMessage)[i];
+
+ *command_status =
+ SCXLNX_COMMAND_STATE_SENT;
+ nFirstFreeCommand += nCommandSize;
+
+ SCXLNXCommWriteReg32(
+ &pComm->
+ pBuffer->nFirstFreeCommand,
+ nFirstFreeCommand);
+ }
+ spin_unlock(&pComm->lock);
+ }
+ }
+}
+
+/*
+ * Sends the specified message through the specified communication channel.
+ *
+ * This function sends the command and waits for the answer
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static int tf_send_recv(struct SCXLNX_COMM *pComm,
+ union SCX_COMMAND_MESSAGE *pMessage,
+ struct SCXLNX_ANSWER_STRUCT *pAnswerStruct,
+ struct SCXLNX_CONNECTION *pConn,
+ int bKillable
+ #ifdef CONFIG_TF_ZEBRA
+ , bool *secure_is_idle
+ #endif
+ )
+{
+ int result;
+ u64 sTimeout;
+ signed long nRelativeTimeoutJiffies;
+ bool wait_prepared = false;
+ enum SCXLNX_COMMAND_STATE command_status = SCXLNX_COMMAND_STATE_PENDING;
+ DEFINE_WAIT(wait);
+#ifdef CONFIG_FREEZER
+ unsigned long saved_flags;
+#endif
+ dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p)\n",
+ current->pid, pMessage);
+
+#ifdef CONFIG_FREEZER
+ saved_flags = current->flags;
+ current->flags |= PF_FREEZER_NOSIG;
+#endif
+
+ /*
+ * Read all answers from the answer queue
+ */
+copy_answers:
+ tf_copy_answers(pComm);
+
+ tf_copy_command(pComm, pMessage, pConn, &command_status);
+
+ /*
+ * Notify all waiting threads
+ */
+ wake_up(&(pComm->waitQueue));
+
+#ifdef CONFIG_FREEZER
+ if (unlikely(freezing(current))) {
+
+#ifdef CONFIG_TF_ZEBRA
+ if (!(*secure_is_idle)) {
+ if (tf_schedule_secure_world(pComm, true) ==
+ STATUS_PENDING)
+ goto copy_answers;
+
+ tf_l4sec_clkdm_allow_idle(true, true);
+ *secure_is_idle = true;
+ }
+#endif
+
+ dprintk(KERN_INFO
+ "Entering refrigerator.\n");
+ refrigerator();
+ dprintk(KERN_INFO
+ "Left refrigerator.\n");
+ goto copy_answers;
+ }
+#endif
+
+#ifndef CONFIG_PREEMPT
+ if (need_resched())
+ schedule();
+#endif
+
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * Handle RPC (if any)
+ */
+ if (SCXLNXCommExecuteRPCCommand(pComm) == RPC_NON_YIELD)
+ goto schedule_secure_world;
+#endif
+
+ /*
+ * Join wait queue
+ */
+ /*dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p): Prepare to wait\n",
+ current->pid, pMessage);*/
+ prepare_to_wait(&pComm->waitQueue, &wait,
+ bKillable ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ wait_prepared = true;
+
+ /*
+ * Check if our answer is available
+ */
+ if (command_status == SCXLNX_COMMAND_STATE_ABORTED) {
+ /* Not waiting for an answer, return error code */
+ result = -EINTR;
+ dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+ "Command status is ABORTED."
+ "Exit with 0x%x\n",
+ current->pid, result);
+ goto exit;
+ }
+ if (pAnswerStruct->bAnswerCopied) {
+ dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
+ "Received answer (type 0x%02X)\n",
+ current->pid,
+ pAnswerStruct->pAnswer->sHeader.nMessageType);
+ result = 0;
+ goto exit;
+ }
+
+ /*
+ * Check if a signal is pending
+ */
+ if (bKillable && (sigkill_pending())) {
+ if (command_status == SCXLNX_COMMAND_STATE_PENDING)
+ /*Command was not sent. */
+ result = -EINTR;
+ else
+ /* Command was sent but no answer was received yet. */
+ result = -EIO;
+
+ dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+ "Signal Pending. Return error %d\n",
+ current->pid, result);
+ goto exit;
+ }
+
+ /*
+ * Check if secure world is schedulable. It is schedulable if at
+ * least one of the following conditions holds:
+ * + it is still initializing (SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED
+ * is not set);
+ * + there is a command in the queue;
+ * + the secure world timeout is zero.
+ */
+ if (test_bit(SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED, &(pComm->nFlags))) {
+ u32 nFirstFreeCommand;
+ u32 nFirstCommand;
+ spin_lock(&pComm->lock);
+ nFirstCommand = SCXLNXCommReadReg32(
+ &pComm->pBuffer->nFirstCommand);
+ nFirstFreeCommand = SCXLNXCommReadReg32(
+ &pComm->pBuffer->nFirstFreeCommand);
+ spin_unlock(&pComm->lock);
+ SCXLNXCommReadTimeout(pComm, &sTimeout);
+ if ((nFirstFreeCommand == nFirstCommand) &&
+ (SCXLNXCommTestSTimeout(sTimeout,
+ &nRelativeTimeoutJiffies) == 0))
+ /*
+ * If command queue is empty and if timeout has not
+ * expired secure world is not schedulable
+ */
+ goto wait;
+ }
+
+ finish_wait(&pComm->waitQueue, &wait);
+ wait_prepared = false;
+
+ /*
+ * Yield to the Secure World
+ */
+#ifdef CONFIG_TF_ZEBRA
+schedule_secure_world:
+ if (*secure_is_idle) {
+ tf_l4sec_clkdm_wakeup(true, true);
+ *secure_is_idle = false;
+ }
+#endif
+
+ result = tf_schedule_secure_world(pComm, false);
+ if (result < 0)
+ goto exit;
+ goto copy_answers;
+
+wait:
+ if (bKillable && (sigkill_pending())) {
+ if (command_status == SCXLNX_COMMAND_STATE_PENDING)
+ result = -EINTR; /* Command was not sent. */
+ else
+ /* Command was sent but no answer was received yet. */
+ result = -EIO;
+
+ dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+ "Signal Pending while waiting. Return error %d\n",
+ current->pid, result);
+ goto exit;
+ }
+
+ if (nRelativeTimeoutJiffies == MAX_SCHEDULE_TIMEOUT)
+ dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
+ "prepare to sleep infinitely\n", current->pid);
+ else
+ dprintk(KERN_INFO "tf_send_recv: "
+ "prepare to sleep 0x%lx jiffies\n",
+ nRelativeTimeoutJiffies);
+
+#ifdef CONFIG_TF_ZEBRA
+ if (!(*secure_is_idle)) {
+ if (tf_schedule_secure_world(pComm, true) == STATUS_PENDING) {
+ finish_wait(&pComm->waitQueue, &wait);
+ wait_prepared = false;
+ goto copy_answers;
+ }
+ tf_l4sec_clkdm_allow_idle(true, true);
+ *secure_is_idle = true;
+ }
+#endif
+
+ /* go to sleep */
+ if (schedule_timeout(nRelativeTimeoutJiffies) == 0)
+ dprintk(KERN_INFO
+ "tf_send_recv: timeout expired\n");
+ else
+ dprintk(KERN_INFO
+ "tf_send_recv: signal delivered\n");
+
+ finish_wait(&pComm->waitQueue, &wait);
+ wait_prepared = false;
+ goto copy_answers;
+
+exit:
+ if (wait_prepared) {
+ finish_wait(&pComm->waitQueue, &wait);
+ wait_prepared = false;
+ }
+
+#ifdef CONFIG_TF_ZEBRA
+ if ((!(*secure_is_idle)) && (result != -EIO)) {
+ if (tf_schedule_secure_world(pComm, true) == STATUS_PENDING)
+ goto copy_answers;
+
+ tf_l4sec_clkdm_allow_idle(true, true);
+ *secure_is_idle = true;
+ }
+#endif
+
+#ifdef CONFIG_FREEZER
+ current->flags &= ~(PF_FREEZER_NOSIG);
+ current->flags |= (saved_flags & PF_FREEZER_NOSIG);
+#endif
+
+ return result;
+}
+
+/*
+ * Sends the specified message through the specified communication channel.
+ *
+ * This function sends the message and waits for the corresponding answer
+ * It may return if a signal needs to be delivered.
+ *
+ * If pConn is not NULL, before sending the message, this function checks that
+ * it is still valid by calling the function SCXLNXConnCheckMessageValidity
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+int SCXLNXCommSendReceive(struct SCXLNX_COMM *pComm,
+ union SCX_COMMAND_MESSAGE *pMessage,
+ union SCX_ANSWER_MESSAGE *pAnswer,
+ struct SCXLNX_CONNECTION *pConn,
+ bool bKillable)
+{
+ int nError;
+ struct SCXLNX_ANSWER_STRUCT sAnswerStructure;
+#ifdef CONFIG_SMP
+ long ret_affinity;
+ cpumask_t saved_cpu_mask;
+ cpumask_t local_cpu_mask = CPU_MASK_NONE;
+#endif
+#ifdef CONFIG_TF_ZEBRA
+ bool secure_is_idle = true;
+#endif
+
+ sAnswerStructure.pAnswer = pAnswer;
+ sAnswerStructure.bAnswerCopied = false;
+
+ if (pMessage != NULL)
+ pMessage->sHeader.nOperationID = (u32) &sAnswerStructure;
+
+ dprintk(KERN_INFO "SCXLNXSMCommSendReceive: "
+ "tf_send_recv\n");
+
+#ifdef CONFIG_TF_ZEBRA
+ if (!test_bit(SCXLNX_COMM_FLAG_PA_AVAILABLE, &pComm->nFlags)) {
+ dprintk(KERN_ERR "SCXLNXCommSendReceive(%p): "
+ "Secure world not started\n", pComm);
+
+ return -EFAULT;
+ }
+#endif
+
+ if (test_bit(SCXLNX_COMM_FLAG_TERMINATING, &(pComm->nFlags)) != 0) {
+ dprintk(KERN_DEBUG "SCXLNXSMCommSendReceive: "
+ "Flag Terminating is set\n");
+ return 0;
+ }
+
+#ifdef CONFIG_SMP
+ cpu_set(0, local_cpu_mask);
+ sched_getaffinity(0, &saved_cpu_mask);
+ ret_affinity = sched_setaffinity(0, &local_cpu_mask);
+ if (ret_affinity != 0)
+ dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity);
+#endif
+
+
+ /*
+ * Send the command
+ */
+ nError = tf_send_recv(pComm,
+ pMessage, &sAnswerStructure, pConn, bKillable
+ #ifdef CONFIG_TF_ZEBRA
+ , &secure_is_idle
+ #endif
+ );
+
+ if (!bKillable && sigkill_pending()) {
+ if ((pMessage->sHeader.nMessageType ==
+ SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) &&
+ (pAnswer->sCreateDeviceContextAnswer.nErrorCode ==
+ S_SUCCESS)) {
+
+ /*
+ * CREATE_DEVICE_CONTEXT was interrupted.
+ */
+ dprintk(KERN_INFO "SCXLNXSMCommSendReceive: "
+ "sending DESTROY_DEVICE_CONTEXT\n");
+ sAnswerStructure.pAnswer = pAnswer;
+ sAnswerStructure.bAnswerCopied = false;
+
+ pMessage->sHeader.nMessageType =
+ SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+ pMessage->sHeader.nMessageSize =
+ (sizeof(struct
+ SCX_COMMAND_DESTROY_DEVICE_CONTEXT) -
+ sizeof(struct SCX_COMMAND_HEADER))/sizeof(u32);
+ pMessage->sHeader.nOperationID =
+ (u32) &sAnswerStructure;
+ pMessage->sDestroyDeviceContextMessage.hDeviceContext =
+ pAnswer->sCreateDeviceContextAnswer.
+ hDeviceContext;
+
+ goto destroy_context;
+ }
+ }
+
+ if (nError == 0) {
+ /*
+ * tf_send_recv returned Success.
+ */
+ if (pMessage->sHeader.nMessageType ==
+ SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) {
+ spin_lock(&(pConn->stateLock));
+ pConn->nState = SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT;
+ spin_unlock(&(pConn->stateLock));
+ } else if (pMessage->sHeader.nMessageType ==
+ SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
+ spin_lock(&(pConn->stateLock));
+ pConn->nState = SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(pConn->stateLock));
+ }
+ } else if (nError == -EINTR) {
+ /*
+ * No command was sent, return failure.
+ */
+ dprintk(KERN_ERR
+ "SCXLNXSMCommSendReceive: "
+ "tf_send_recv failed (error %d) !\n",
+ nError);
+ } else if (nError == -EIO) {
+ /*
+ * A command was sent but its answer is still pending.
+ */
+
+ /* means bKillable is true */
+ dprintk(KERN_ERR
+ "SCXLNXSMCommSendReceive: "
+ "tf_send_recv interrupted (error %d)."
+ "Send DESTROY_DEVICE_CONTEXT.\n", nError);
+
+ /* Send the DESTROY_DEVICE_CONTEXT. */
+ sAnswerStructure.pAnswer = pAnswer;
+ sAnswerStructure.bAnswerCopied = false;
+
+ pMessage->sHeader.nMessageType =
+ SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+ pMessage->sHeader.nMessageSize =
+ (sizeof(struct SCX_COMMAND_DESTROY_DEVICE_CONTEXT) -
+ sizeof(struct SCX_COMMAND_HEADER))/sizeof(u32);
+ pMessage->sHeader.nOperationID =
+ (u32) &sAnswerStructure;
+ pMessage->sDestroyDeviceContextMessage.hDeviceContext =
+ pConn->hDeviceContext;
+
+ nError = tf_send_recv(pComm,
+ pMessage, &sAnswerStructure, pConn, false
+ #ifdef CONFIG_TF_ZEBRA
+ , &secure_is_idle
+ #endif
+ );
+ if (nError == -EINTR) {
+ /*
+ * Another thread already sent
+ * DESTROY_DEVICE_CONTEXT.
+ * We must still wait for the answer
+ * to the original command.
+ */
+ pMessage = NULL;
+ goto destroy_context;
+ } else {
+ /* An answer was received.
+ * Check if it is the answer
+ * to the DESTROY_DEVICE_CONTEXT.
+ */
+ spin_lock(&pComm->lock);
+ if (pAnswer->sHeader.nMessageType !=
+ SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
+ sAnswerStructure.bAnswerCopied = false;
+ }
+ spin_unlock(&pComm->lock);
+ if (!sAnswerStructure.bAnswerCopied) {
+ /* Answer to DESTROY_DEVICE_CONTEXT
+ * was not yet received.
+ * Wait for the answer.
+ */
+ dprintk(KERN_INFO
+ "[pid=%d] SCXLNXCommSendReceive:"
+ "Answer to DESTROY_DEVICE_CONTEXT"
+ "not yet received.Retry\n",
+ current->pid);
+ pMessage = NULL;
+ goto destroy_context;
+ }
+ }
+ }
+
+ dprintk(KERN_INFO "SCXLNXCommSendReceive(): Message answer ready\n");
+ goto exit;
+
+destroy_context:
+ nError = tf_send_recv(pComm,
+ pMessage, &sAnswerStructure, pConn, false
+ #ifdef CONFIG_TF_ZEBRA
+ , &secure_is_idle
+ #endif
+ );
+
+ /*
+ * tf_send_recv cannot return an error because
+ * it's not killable and not within a connection
+ */
+ BUG_ON(nError != 0);
+
+ /* Reset the state, so a new CREATE DEVICE CONTEXT can be sent */
+ spin_lock(&(pConn->stateLock));
+ pConn->nState = SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(pConn->stateLock));
+
+exit:
+
+#ifdef CONFIG_SMP
+ ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
+ if (ret_affinity != 0)
+ dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity);
+#endif
+ return nError;
+}
+
+/*----------------------------------------------------------------------------
+ * Power management
+ *----------------------------------------------------------------------------*/
+
+
+/*
+ * Handles all the power management calls.
+ * The nOperation is the type of power management
+ * operation to be performed.
+ *
+ * This routine will only return if a failure occured or if
+ * the required opwer management is of type "resume".
+ * "Hibernate" and "Shutdown" should lock when doing the
+ * corresponding SMC to the Secure World
+ */
+int SCXLNXCommPowerManagement(struct SCXLNX_COMM *pComm,
+ enum SCXLNX_POWER_OPERATION nOperation)
+{
+ u32 nStatus;
+ int nError = 0;
+
+ dprintk(KERN_INFO "SCXLNXCommPowerManagement(%d)\n", nOperation);
+
+#ifdef CONFIG_TF_ZEBRA
+ if (!test_bit(SCXLNX_COMM_FLAG_PA_AVAILABLE, &pComm->nFlags)) {
+ dprintk(KERN_INFO "SCXLNXCommPowerManagement(%p): "
+ "succeeded (not started)\n", pComm);
+
+ return 0;
+ }
+#endif
+
+ nStatus = ((SCXLNXCommReadReg32(&(pComm->pBuffer->nStatus_S))
+ & SCX_STATUS_POWER_STATE_MASK)
+ >> SCX_STATUS_POWER_STATE_SHIFT);
+
+ switch (nOperation) {
+ case SCXLNX_POWER_OPERATION_SHUTDOWN:
+ switch (nStatus) {
+ case SCX_POWER_MODE_ACTIVE:
+ nError = SCXLNXCommShutdown(pComm);
+
+ if (nError) {
+ dprintk(KERN_ERR "SCXLNXCommPowerManagement(): "
+ "Failed with error code 0x%08x\n",
+ nError);
+ goto error;
+ }
+ break;
+
+ default:
+ goto not_allowed;
+ }
+ break;
+
+ case SCXLNX_POWER_OPERATION_HIBERNATE:
+ switch (nStatus) {
+ case SCX_POWER_MODE_ACTIVE:
+ nError = SCXLNXCommHibernate(pComm);
+
+ if (nError) {
+ dprintk(KERN_ERR "SCXLNXCommPowerManagement(): "
+ "Failed with error code 0x%08x\n",
+ nError);
+ goto error;
+ }
+ break;
+
+ default:
+ goto not_allowed;
+ }
+ break;
+
+ case SCXLNX_POWER_OPERATION_RESUME:
+ nError = SCXLNXCommResume(pComm);
+
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXCommPowerManagement(): "
+ "Failed with error code 0x%08x\n",
+ nError);
+ goto error;
+ }
+ break;
+ }
+
+ dprintk(KERN_INFO "SCXLNXCommPowerManagement(): succeeded\n");
+ return 0;
+
+not_allowed:
+ dprintk(KERN_ERR "SCXLNXCommPowerManagement(): "
+ "Power command not allowed in current "
+ "Secure World state %d\n", nStatus);
+ nError = -ENOTTY;
+error:
+ return nError;
+}
+
diff --git a/security/tf_driver/scxlnx_comm.h b/security/tf_driver/scxlnx_comm.h
new file mode 100644
index 000000000000..24512a7bdd23
--- /dev/null
+++ b/security/tf_driver/scxlnx_comm.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __SCXLNX_COMM_H__
+#define __SCXLNX_COMM_H__
+
+#include "scxlnx_defs.h"
+#include "scx_protocol.h"
+
+/*----------------------------------------------------------------------------
+ * Misc
+ *----------------------------------------------------------------------------*/
+
+void SCXLNXCommSetCurrentTime(struct SCXLNX_COMM *pComm);
+
+/*
+ * Atomic accesses to 32-bit variables in the L1 Shared buffer
+ */
+static inline u32 SCXLNXCommReadReg32(const u32 *pCommBuffer)
+{
+ u32 result;
+
+ __asm__ __volatile__("@ SCXLNXCommReadReg32\n"
+ "ldrex %0, [%1]\n"
+ : "=&r" (result)
+ : "r" (pCommBuffer)
+ );
+
+ return result;
+}
+
+static inline void SCXLNXCommWriteReg32(void *pCommBuffer, u32 nValue)
+{
+ u32 tmp;
+
+ __asm__ __volatile__("@ SCXLNXCommWriteReg32\n"
+ "1: ldrex %0, [%2]\n"
+ " strex %0, %1, [%2]\n"
+ " teq %0, #0\n"
+ " bne 1b"
+ : "=&r" (tmp)
+ : "r" (nValue), "r" (pCommBuffer)
+ : "cc"
+ );
+}
+
+/*
+ * Atomic accesses to 64-bit variables in the L1 Shared buffer
+ */
+static inline u64 SCXLNXCommReadReg64(void *pCommBuffer)
+{
+ u64 result;
+
+ __asm__ __volatile__("@ SCXLNXCommReadReg64\n"
+ "ldrexd %0, [%1]\n"
+ : "=&r" (result)
+ : "r" (pCommBuffer)
+ );
+
+ return result;
+}
+
+static inline void SCXLNXCommWriteReg64(void *pCommBuffer, u64 nValue)
+{
+ u64 tmp;
+
+ __asm__ __volatile__("@ SCXLNXCommWriteReg64\n"
+ "1: ldrexd %0, [%2]\n"
+ " strexd %0, %1, [%2]\n"
+ " teq %0, #0\n"
+ " bne 1b"
+ : "=&r" (tmp)
+ : "r" (nValue), "r" (pCommBuffer)
+ : "cc"
+ );
+}
+
+/*----------------------------------------------------------------------------
+ * SMC operations
+ *----------------------------------------------------------------------------*/
+
+/* RPC return values */
+#define RPC_NO 0x00 /* No RPC to execute */
+#define RPC_YIELD 0x01 /* Yield RPC */
+#define RPC_NON_YIELD 0x02 /* non-Yield RPC */
+
+int SCXLNXCommExecuteRPCCommand(struct SCXLNX_COMM *pComm);
+
+/*----------------------------------------------------------------------------
+ * Shared memory related operations
+ *----------------------------------------------------------------------------*/
+
+#define L1_DESCRIPTOR_FAULT (0x00000000)
+#define L2_DESCRIPTOR_FAULT (0x00000000)
+
+#define L2_DESCRIPTOR_ADDR_MASK (0xFFFFF000)
+
+#define DESCRIPTOR_V13_12_MASK (0x3 << PAGE_SHIFT)
+#define DESCRIPTOR_V13_12_GET(a) ((a & DESCRIPTOR_V13_12_MASK) >> PAGE_SHIFT)
+
+struct SCXLNX_COARSE_PAGE_TABLE *SCXLNXAllocateCoarsePageTable(
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext,
+ u32 nType);
+
+void SCXLNXFreeCoarsePageTable(
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext,
+ struct SCXLNX_COARSE_PAGE_TABLE *pCoarsePageTable,
+ int nForce);
+
+void SCXLNXInitializeCoarsePageTableAllocator(
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext);
+
+void SCXLNXReleaseCoarsePageTableAllocator(
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext);
+
+struct page *SCXLNXCommL2PageDescriptorToPage(u32 nL2PageDescriptor);
+
+u32 SCXLNXCommGetL2DescriptorCommon(u32 nVirtAddr, struct mm_struct *mm);
+
+void SCXLNXCommReleaseSharedMemory(
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext,
+ struct SCXLNX_SHMEM_DESC *pShmemDesc,
+ u32 nFullCleanup);
+
+int SCXLNXCommFillDescriptorTable(
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT *pAllocationContext,
+ struct SCXLNX_SHMEM_DESC *pShmemDesc,
+ u32 nBufferVAddr,
+ struct vm_area_struct **ppVmas,
+ u32 pDescriptors[SCX_MAX_COARSE_PAGES],
+ u32 *pBufferSize,
+ u32 *pBufferStartOffset,
+ bool bInUserSpace,
+ u32 nFlags,
+ u32 *pnDescriptorCount);
+
+/*----------------------------------------------------------------------------
+ * Standard communication operations
+ *----------------------------------------------------------------------------*/
+
+#define STATUS_PENDING 0x00000001
+
+int tf_schedule_secure_world(struct SCXLNX_COMM *pComm, bool prepare_exit);
+
+int SCXLNXCommSendReceive(
+ struct SCXLNX_COMM *pComm,
+ union SCX_COMMAND_MESSAGE *pMessage,
+ union SCX_ANSWER_MESSAGE *pAnswer,
+ struct SCXLNX_CONNECTION *pConn,
+ bool bKillable);
+
+
+/**
+ * get a pointer to the secure world description.
+ * This points directly into the L1 shared buffer
+ * and is valid only once the communication has
+ * been initialized
+ **/
+u8 *SCXLNXCommGetDescription(struct SCXLNX_COMM *pComm);
+
+/*----------------------------------------------------------------------------
+ * Power management
+ *----------------------------------------------------------------------------*/
+
+enum SCXLNX_POWER_OPERATION {
+ SCXLNX_POWER_OPERATION_HIBERNATE = 1,
+ SCXLNX_POWER_OPERATION_SHUTDOWN = 2,
+ SCXLNX_POWER_OPERATION_RESUME = 3,
+};
+
+int SCXLNXCommHibernate(struct SCXLNX_COMM *pComm);
+int SCXLNXCommResume(struct SCXLNX_COMM *pComm);
+int SCXLNXCommShutdown(struct SCXLNX_COMM *pComm);
+
+int SCXLNXCommPowerManagement(struct SCXLNX_COMM *pComm,
+ enum SCXLNX_POWER_OPERATION nOperation);
+
+
+/*----------------------------------------------------------------------------
+ * Communication initialization and termination
+ *----------------------------------------------------------------------------*/
+
+int SCXLNXCommInit(struct SCXLNX_COMM *pComm);
+
+void SCXLNXCommTerminate(struct SCXLNX_COMM *pComm);
+
+
+#endif /* __SCXLNX_COMM_H__ */
diff --git a/security/tf_driver/scxlnx_comm_tz.c b/security/tf_driver/scxlnx_comm_tz.c
new file mode 100644
index 000000000000..b186d98548a4
--- /dev/null
+++ b/security/tf_driver/scxlnx_comm_tz.c
@@ -0,0 +1,891 @@
+/*
+ * Copyright (c) 2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/div64.h>
+#include <asm/system.h>
+#include <linux/version.h>
+#include <asm/cputype.h>
+#include <linux/interrupt.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/jiffies.h>
+
+#include "scxlnx_defs.h"
+#include "scxlnx_comm.h"
+#include "scx_protocol.h"
+#include "scxlnx_util.h"
+#include "scxlnx_conn.h"
+
+/*
+ * Structure common to all SMC operations
+ */
+struct SCXLNX_GENERIC_SMC {
+ u32 reg0;
+ u32 reg1;
+ u32 reg2;
+ u32 reg3;
+ u32 reg4;
+};
+
+/*----------------------------------------------------------------------------
+ * SMC operations
+ *----------------------------------------------------------------------------*/
+
+static inline void SCXLNXCommCallGenericSMC(
+ struct SCXLNX_GENERIC_SMC *pGenericSMC)
+{
+#ifdef CONFIG_SMP
+ long ret;
+ cpumask_t saved_cpu_mask;
+ cpumask_t local_cpu_mask = CPU_MASK_NONE;
+
+ cpu_set(0, local_cpu_mask);
+ sched_getaffinity(0, &saved_cpu_mask);
+ ret = sched_setaffinity(0, &local_cpu_mask);
+ if (ret != 0)
+ {
+ dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret);
+ }
+#endif
+
+ __asm__ volatile(
+ "mov r0, %2\n"
+ "mov r1, %3\n"
+ "mov r2, %4\n"
+ "mov r3, %5\n"
+ "mov r4, %6\n"
+ ".word 0xe1600070 @ SMC 0\n"
+ "mov %0, r0\n"
+ "mov %1, r1\n"
+ : "=r" (pGenericSMC->reg0), "=r" (pGenericSMC->reg1)
+ : "r" (pGenericSMC->reg0), "r" (pGenericSMC->reg1),
+ "r" (pGenericSMC->reg2), "r" (pGenericSMC->reg3),
+ "r" (pGenericSMC->reg4)
+ : "r0", "r1", "r2", "r3", "r4");
+
+#ifdef CONFIG_SMP
+ ret = sched_setaffinity(0, &saved_cpu_mask);
+ if (ret != 0)
+ {
+ dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret);
+ }
+#endif
+}
+
+/*
+ * Calls the get protocol version SMC.
+ * Fills the parameter pProtocolVersion with the version number returned by the
+ * SMC
+ */
+static inline void SCXLNXCommCallGetProtocolVersionSMC(u32 *pProcotolVersion)
+{
+ struct SCXLNX_GENERIC_SMC sGenericSMC;
+
+ sGenericSMC.reg0 = SCX_SMC_GET_PROTOCOL_VERSION;
+ sGenericSMC.reg1 = 0;
+ sGenericSMC.reg2 = 0;
+ sGenericSMC.reg3 = 0;
+ sGenericSMC.reg4 = 0;
+
+ SCXLNXCommCallGenericSMC(&sGenericSMC);
+ *pProcotolVersion = sGenericSMC.reg1;
+}
+
+
+/*
+ * Calls the init SMC with the specified parameters.
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static inline int SCXLNXCommCallInitSMC(u32 nSharedPageDescriptor)
+{
+ struct SCXLNX_GENERIC_SMC sGenericSMC;
+
+ sGenericSMC.reg0 = SCX_SMC_INIT;
+ /* Descriptor for the layer 1 shared buffer */
+ sGenericSMC.reg1 = nSharedPageDescriptor;
+ sGenericSMC.reg2 = 0;
+ sGenericSMC.reg3 = 0;
+ sGenericSMC.reg4 = 0;
+
+ SCXLNXCommCallGenericSMC(&sGenericSMC);
+ if (sGenericSMC.reg0 != S_SUCCESS)
+ printk(KERN_ERR "SCXLNXCommCallInitSMC:"
+ " r0=0x%08X upon return (expected 0x%08X)!\n",
+ sGenericSMC.reg0,
+ S_SUCCESS);
+
+ return sGenericSMC.reg0;
+}
+
+
+/*
+ * Calls the reset irq SMC.
+ */
+static inline void SCXLNXCommCallResetIrqSMC(void)
+{
+ struct SCXLNX_GENERIC_SMC sGenericSMC;
+
+ sGenericSMC.reg0 = SCX_SMC_RESET_IRQ;
+ sGenericSMC.reg1 = 0;
+ sGenericSMC.reg2 = 0;
+ sGenericSMC.reg3 = 0;
+ sGenericSMC.reg4 = 0;
+
+ SCXLNXCommCallGenericSMC(&sGenericSMC);
+}
+
+
+/*
+ * Calls the WAKE_UP SMC.
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static inline int SCXLNXCommCallWakeUpSMC(u32 nL1SharedBufferDescriptor,
+ u32 nSharedMemStartOffset,
+ u32 nSharedMemSize)
+{
+ struct SCXLNX_GENERIC_SMC sGenericSMC;
+
+ sGenericSMC.reg0 = SCX_SMC_WAKE_UP;
+ sGenericSMC.reg1 = nSharedMemStartOffset;
+ /* long form command */
+ sGenericSMC.reg2 = nSharedMemSize | 0x80000000;
+ sGenericSMC.reg3 = nL1SharedBufferDescriptor;
+ sGenericSMC.reg4 = 0;
+
+ SCXLNXCommCallGenericSMC(&sGenericSMC);
+
+ if (sGenericSMC.reg0 != S_SUCCESS)
+ printk(KERN_ERR "SCXLNXCommCallWakeUpSMC:"
+ " r0=0x%08X upon return (expected 0x%08X)!\n",
+ sGenericSMC.reg0,
+ S_SUCCESS);
+
+ return sGenericSMC.reg0;
+}
+
+/*
+ * Calls the N-Yield SMC.
+ */
+static inline void SCXLNXCommCallNYieldSMC(void)
+{
+ struct SCXLNX_GENERIC_SMC sGenericSMC;
+
+ sGenericSMC.reg0 = SCX_SMC_N_YIELD;
+ sGenericSMC.reg1 = 0;
+ sGenericSMC.reg2 = 0;
+ sGenericSMC.reg3 = 0;
+ sGenericSMC.reg4 = 0;
+
+ SCXLNXCommCallGenericSMC(&sGenericSMC);
+}
+
+/* Yields the Secure World */
+int tf_schedule_secure_world(struct SCXLNX_COMM *pComm, bool prepare_exit)
+{
+ SCXLNXCommSetCurrentTime(pComm);
+
+ /* yield to the Secure World */
+ SCXLNXCommCallNYieldSMC();
+
+ return 0;
+}
+
+/*
+ * Returns the L2 descriptor for the specified user page.
+ */
+
+#define L2_INIT_DESCRIPTOR_BASE (0x00000003)
+#define L2_INIT_DESCRIPTOR_V13_12_SHIFT (4)
+
+static u32 SCXLNXCommGetL2InitDescriptor(void *pVirtAddr)
+{
+ struct page *pPage;
+ u32 nVirtAddr;
+ u32 nPhysAddr;
+ u32 nDescriptor;
+
+ nDescriptor = L2_INIT_DESCRIPTOR_BASE;
+ nVirtAddr = (u32) pVirtAddr;
+
+ /* get physical address and add to nDescriptor */
+ pPage = virt_to_page(pVirtAddr);
+ nPhysAddr = page_to_phys(pPage);
+ nDescriptor |= (nPhysAddr & L2_DESCRIPTOR_ADDR_MASK);
+
+ /* Add virtual address v[13:12] bits to nDescriptor */
+ nDescriptor |= (DESCRIPTOR_V13_12_GET(nVirtAddr)
+ << L2_INIT_DESCRIPTOR_V13_12_SHIFT);
+
+ nDescriptor |= SCXLNXCommGetL2DescriptorCommon(nVirtAddr, &init_mm);
+
+
+ return nDescriptor;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Power management
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Free the memory used by the W3B buffer for the specified comm.
+ * This function does nothing if no W3B buffer is allocated for the device.
+ */
+static inline void SCXLNXCommFreeW3B(struct SCXLNX_COMM *pComm)
+{
+ SCXLNXCommReleaseSharedMemory(
+ &(pComm->sW3BAllocationContext),
+ &(pComm->sW3BShmemDesc),
+ 0);
+
+ SCXLNXReleaseCoarsePageTableAllocator(&(pComm->sW3BAllocationContext));
+
+ internal_vfree((void *)pComm->nW3BShmemVAddr);
+ pComm->nW3BShmemVAddr = 0;
+ pComm->nW3BShmemSize = 0;
+ clear_bit(SCXLNX_COMM_FLAG_W3B_ALLOCATED, &(pComm->nFlags));
+}
+
+
+/*
+ * Allocates the W3B buffer for the specified comm.
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static inline int SCXLNXCommAllocateW3B(struct SCXLNX_COMM *pComm)
+{
+ int nError;
+ u32 nFlags;
+ u32 nConfigFlags_S;
+ u32 *pW3BDescriptors;
+ u32 nW3BDescriptorCount;
+ u32 nW3BCurrentSize;
+
+ nConfigFlags_S = SCXLNXCommReadReg32(&pComm->pBuffer->nConfigFlags_S);
+
+retry:
+ if ((test_bit(SCXLNX_COMM_FLAG_W3B_ALLOCATED, &(pComm->nFlags))) == 0) {
+ /*
+ * Initialize the shared memory for the W3B
+ */
+ SCXLNXInitializeCoarsePageTableAllocator(
+ &pComm->sW3BAllocationContext);
+ } else {
+ /*
+ * The W3B is allocated but do we have to reallocate a bigger
+ * one?
+ */
+ /* Check H bit */
+ if ((nConfigFlags_S & (1<<4)) != 0) {
+ /* The size of the W3B may change after SMC_INIT */
+ /* Read the current value */
+ nW3BCurrentSize = SCXLNXCommReadReg32(
+ &pComm->pBuffer->nW3BSizeCurrent_S);
+ if (pComm->nW3BShmemSize > nW3BCurrentSize)
+ return 0;
+
+ SCXLNXCommFreeW3B(pComm);
+ goto retry;
+ } else {
+ return 0;
+ }
+ }
+
+ /* check H bit */
+ if ((nConfigFlags_S & (1<<4)) != 0)
+ /* The size of the W3B may change after SMC_INIT */
+ /* Read the current value */
+ pComm->nW3BShmemSize = SCXLNXCommReadReg32(
+ &pComm->pBuffer->nW3BSizeCurrent_S);
+ else
+ pComm->nW3BShmemSize = SCXLNXCommReadReg32(
+ &pComm->pBuffer->nW3BSizeMax_S);
+
+ pComm->nW3BShmemVAddr = (u32) internal_vmalloc(pComm->nW3BShmemSize);
+ if (pComm->nW3BShmemVAddr == 0) {
+ printk(KERN_ERR "SCXLNXCommAllocateW3B():"
+ " Out of memory for W3B buffer (%u bytes)!\n",
+ (unsigned int)(pComm->nW3BShmemSize));
+ nError = -ENOMEM;
+ goto error;
+ }
+
+ /* initialize the sW3BShmemDesc structure */
+ pComm->sW3BShmemDesc.nType = SCXLNX_SHMEM_TYPE_PM_HIBERNATE;
+ INIT_LIST_HEAD(&(pComm->sW3BShmemDesc.list));
+
+ nFlags = (SCX_SHMEM_TYPE_READ | SCX_SHMEM_TYPE_WRITE);
+
+ /* directly point to the L1 shared buffer W3B descriptors */
+ pW3BDescriptors = pComm->pBuffer->nW3BDescriptors;
+
+ /*
+ * SCXLNXCommFillDescriptorTable uses the following parameter as an
+ * IN/OUT
+ */
+
+ nError = SCXLNXCommFillDescriptorTable(
+ &(pComm->sW3BAllocationContext),
+ &(pComm->sW3BShmemDesc),
+ pComm->nW3BShmemVAddr,
+ NULL,
+ pW3BDescriptors,
+ &(pComm->nW3BShmemSize),
+ &(pComm->nW3BShmemOffset),
+ false,
+ nFlags,
+ &nW3BDescriptorCount);
+ if (nError != 0) {
+ printk(KERN_ERR "SCXLNXCommAllocateW3B():"
+ " SCXLNXCommFillDescriptorTable failed with "
+ "error code 0x%08x!\n",
+ nError);
+ goto error;
+ }
+
+ set_bit(SCXLNX_COMM_FLAG_W3B_ALLOCATED, &(pComm->nFlags));
+
+ /* successful completion */
+ return 0;
+
+error:
+ SCXLNXCommFreeW3B(pComm);
+
+ return nError;
+}
+
+/*
+ * Perform a Secure World shutdown operation.
+ * The routine does not return if the operation succeeds.
+ * the routine returns an appropriate error code if
+ * the operation fails.
+ */
+int SCXLNXCommShutdown(struct SCXLNX_COMM *pComm)
+{
+#ifdef CONFIG_TFN
+ /* this function is useless for the TEGRA product */
+ return 0;
+#else
+ int nError;
+ union SCX_COMMAND_MESSAGE sMessage;
+ union SCX_ANSWER_MESSAGE sAnswer;
+
+ dprintk(KERN_INFO "SCXLNXCommShutdown()\n");
+
+ memset(&sMessage, 0, sizeof(sMessage));
+
+ sMessage.sHeader.nMessageType = SCX_MESSAGE_TYPE_MANAGEMENT;
+ sMessage.sHeader.nMessageSize =
+ (sizeof(struct SCX_COMMAND_MANAGEMENT) -
+ sizeof(struct SCX_COMMAND_HEADER))/sizeof(u32);
+
+ sMessage.sManagementMessage.nCommand = SCX_MANAGEMENT_SHUTDOWN;
+
+ nError = SCXLNXCommSendReceive(
+ pComm,
+ &sMessage,
+ &sAnswer,
+ NULL,
+ false);
+
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXCommShutdown(): "
+ "SCXLNXCommSendReceive failed (error %d)!\n",
+ nError);
+ return nError;
+ }
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+ if (sAnswer.sHeader.nErrorCode != 0)
+ dprintk(KERN_ERR "tf_driver: shutdown failed.\n");
+ else
+ dprintk(KERN_INFO "tf_driver: shutdown succeeded.\n");
+#endif
+
+ return sAnswer.sHeader.nErrorCode;
+#endif
+}
+
+
+/*
+ * Perform a Secure World hibernate operation.
+ * The routine does not return if the operation succeeds.
+ * the routine returns an appropriate error code if
+ * the operation fails.
+ */
+int SCXLNXCommHibernate(struct SCXLNX_COMM *pComm)
+{
+#ifdef CONFIG_TFN
+ /* this function is useless for the TEGRA product */
+ return 0;
+#else
+ int nError;
+ union SCX_COMMAND_MESSAGE sMessage;
+ union SCX_ANSWER_MESSAGE sAnswer;
+ u32 nFirstCommand;
+ u32 nFirstFreeCommand;
+
+ dprintk(KERN_INFO "SCXLNXCommHibernate()\n");
+
+ nError = SCXLNXCommAllocateW3B(pComm);
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXCommHibernate(): "
+ "SCXLNXCommAllocateW3B failed (error %d)!\n",
+ nError);
+ return nError;
+ }
+
+ /*
+ * As the polling thread is already hibernating, we
+ * should send the message and receive the answer ourself
+ */
+
+ /* build the "prepare to hibernate" message */
+ sMessage.sHeader.nMessageType = SCX_MESSAGE_TYPE_MANAGEMENT;
+ sMessage.sManagementMessage.nCommand = SCX_MANAGEMENT_HIBERNATE;
+ /* Long Form Command */
+ sMessage.sManagementMessage.nSharedMemDescriptors[0] = 0;
+ sMessage.sManagementMessage.nSharedMemDescriptors[1] = 0;
+ sMessage.sManagementMessage.nW3BSize =
+ pComm->nW3BShmemSize | 0x80000000;
+ sMessage.sManagementMessage.nW3BStartOffset =
+ pComm->nW3BShmemOffset;
+ sMessage.sHeader.nOperationID = (u32) &sAnswer;
+
+ SCXLNXDumpMessage(&sMessage);
+
+ /* find a slot to send the message in */
+
+ /* AFY: why not use the function SCXLNXCommSendReceive?? We are
+ * duplicating a lot of subtle code here. And it's not going to be
+ * tested because power management is currently not supported by the
+ * secure world. */
+ for (;;) {
+ int nQueueWordsCount, nCommandSize;
+
+ spin_lock(&(pComm->lock));
+
+ nFirstCommand = SCXLNXCommReadReg32(
+ &pComm->pBuffer->nFirstCommand);
+ nFirstFreeCommand = SCXLNXCommReadReg32(
+ &pComm->pBuffer->nFirstFreeCommand);
+
+ nQueueWordsCount = nFirstFreeCommand - nFirstCommand;
+ nCommandSize = sMessage.sHeader.nMessageSize
+ + sizeof(struct SCX_COMMAND_HEADER);
+ if ((nQueueWordsCount + nCommandSize) <
+ SCX_N_MESSAGE_QUEUE_CAPACITY) {
+ /* Command queue is not full */
+ memcpy(&pComm->pBuffer->sCommandQueue[
+ nFirstFreeCommand %
+ SCX_N_MESSAGE_QUEUE_CAPACITY],
+ &sMessage,
+ nCommandSize * sizeof(u32));
+
+ SCXLNXCommWriteReg32(&pComm->pBuffer->nFirstFreeCommand,
+ nFirstFreeCommand + nCommandSize);
+
+ spin_unlock(&(pComm->lock));
+ break;
+ }
+
+ spin_unlock(&(pComm->lock));
+ (void)tf_schedule_secure_world(pComm, false);
+ }
+
+ /* now wait for the answer, dispatching other answers */
+ while (1) {
+ u32 nFirstAnswer;
+ u32 nFirstFreeAnswer;
+
+ /* check all the answers */
+ nFirstFreeAnswer = SCXLNXCommReadReg32(
+ &pComm->pBuffer->nFirstFreeAnswer);
+ nFirstAnswer = SCXLNXCommReadReg32(
+ &pComm->pBuffer->nFirstAnswer);
+
+ if (nFirstAnswer != nFirstFreeAnswer) {
+ int bFoundAnswer = 0;
+
+ do {
+ /* answer queue not empty */
+ union SCX_ANSWER_MESSAGE sComAnswer;
+ struct SCX_ANSWER_HEADER sHeader;
+ /* size of the command in words of 32bit */
+ int nCommandSize;
+
+ /* get the nMessageSize */
+ memcpy(&sHeader,
+ &pComm->pBuffer->sAnswerQueue[
+ nFirstAnswer %
+ SCX_S_ANSWER_QUEUE_CAPACITY],
+ sizeof(struct SCX_ANSWER_HEADER));
+ nCommandSize = sHeader.nMessageSize +
+ sizeof(struct SCX_ANSWER_HEADER);
+
+ /*
+ * NOTE: nMessageSize is the number of words
+ * following the first word
+ */
+ memcpy(&sComAnswer,
+ &pComm->pBuffer->sAnswerQueue[
+ nFirstAnswer %
+ SCX_S_ANSWER_QUEUE_CAPACITY],
+ nCommandSize * sizeof(u32));
+
+ SCXLNXDumpAnswer(&sComAnswer);
+
+ if (sComAnswer.sHeader.nOperationID ==
+ (u32) &sAnswer) {
+ /*
+ * this is the answer to the "prepare to
+ * hibernate" message
+ */
+ memcpy(&sAnswer,
+ &sComAnswer,
+ nCommandSize * sizeof(u32));
+
+ bFoundAnswer = 1;
+ SCXLNXCommWriteReg32(
+ &pComm->pBuffer->nFirstAnswer,
+ nFirstAnswer + nCommandSize);
+ break;
+ } else {
+ /*
+ * this is a standard message answer,
+ * dispatch it
+ */
+ struct SCXLNX_ANSWER_STRUCT
+ *pAnswerStructure;
+
+ pAnswerStructure =
+ (struct SCXLNX_ANSWER_STRUCT *)
+ sComAnswer.sHeader.nOperationID;
+
+ memcpy(pAnswerStructure->pAnswer,
+ &sComAnswer,
+ nCommandSize * sizeof(u32));
+
+ pAnswerStructure->bAnswerCopied = true;
+ }
+
+ SCXLNXCommWriteReg32(
+ &pComm->pBuffer->nFirstAnswer,
+ nFirstAnswer + nCommandSize);
+ } while (nFirstAnswer != nFirstFreeAnswer);
+
+ if (bFoundAnswer)
+ break;
+ }
+
+ /*
+ * since the Secure World is at least running the "prepare to
+ * hibernate" message, its timeout must be immediate So there is
+ * no need to check its timeout and schedule() the current
+ * thread
+ */
+ (void)tf_schedule_secure_world(pComm, false);
+ } /* while (1) */
+
+ printk(KERN_INFO "tf_driver: hibernate.\n");
+ return 0;
+#endif
+}
+
+
+/*
+ * Perform a Secure World resume operation.
+ * The routine returns once the Secure World is active again
+ * or if an error occurs during the "resume" process
+ */
+int SCXLNXCommResume(struct SCXLNX_COMM *pComm)
+{
+#ifdef CONFIG_TFN
+ /* this function is useless for the TEGRA product */
+ return 0;
+#else
+ int nError;
+ u32 nStatus;
+
+ dprintk(KERN_INFO "SCXLNXCommResume()\n");
+
+ nError = SCXLNXCommCallWakeUpSMC(
+ SCXLNXCommGetL2InitDescriptor(pComm->pBuffer),
+ pComm->nW3BShmemOffset,
+ pComm->nW3BShmemSize);
+
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXCommResume(): "
+ "SCXLNXCommCallWakeUpSMC failed (error %d)!\n",
+ nError);
+ return nError;
+ }
+
+ nStatus = ((SCXLNXCommReadReg32(&(pComm->pBuffer->nStatus_S))
+ & SCX_STATUS_POWER_STATE_MASK)
+ >> SCX_STATUS_POWER_STATE_SHIFT);
+
+ while ((nStatus != SCX_POWER_MODE_ACTIVE)
+ && (nStatus != SCX_POWER_MODE_PANIC)) {
+ SCXLNXCommCallNYieldSMC();
+
+ nStatus = ((SCXLNXCommReadReg32(&(pComm->pBuffer->nStatus_S))
+ & SCX_STATUS_POWER_STATE_MASK)
+ >> SCX_STATUS_POWER_STATE_SHIFT);
+
+ /*
+ * As this may last quite a while, call the kernel scheduler to
+ * hand over CPU for other operations
+ */
+ schedule();
+ }
+
+ switch (nStatus) {
+ case SCX_POWER_MODE_ACTIVE:
+ break;
+
+ case SCX_POWER_MODE_PANIC:
+ dprintk(KERN_ERR "SCXLNXCommResume(): "
+ "Secure World POWER_MODE_PANIC!\n");
+ return -EINVAL;
+
+ default:
+ dprintk(KERN_ERR "SCXLNXCommResume(): "
+ "unexpected Secure World POWER_MODE (%d)!\n", nStatus);
+ return -EINVAL;
+ }
+
+ dprintk(KERN_INFO "SCXLNXCommResume() succeeded\n");
+ return 0;
+#endif
+}
+
+/*----------------------------------------------------------------------------
+ * Communication initialization and termination
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Handles the software interrupts issued by the Secure World.
+ */
+static irqreturn_t SCXLNXCommSoftIntHandler(int irq, void *dev_id)
+{
+ struct SCXLNX_COMM *pComm = (struct SCXLNX_COMM *) dev_id;
+
+ if (pComm->pBuffer == NULL)
+ return IRQ_NONE;
+
+ if ((SCXLNXCommReadReg32(&pComm->pBuffer->nStatus_S) &
+ SCX_STATUS_P_MASK) == 0)
+ /* interrupt not issued by the Trusted Foundations Software */
+ return IRQ_NONE;
+
+ SCXLNXCommCallResetIrqSMC();
+
+ /* signal N_SM_EVENT */
+ wake_up(&pComm->waitQueue);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Initializes the communication with the Secure World.
+ * The L1 shared buffer is allocated and the Secure World
+ * is yielded for the first time.
+ * returns successfuly once the communication with
+ * the Secure World is up and running
+ *
+ * Returns 0 upon success or appropriate error code
+ * upon failure
+ */
+int SCXLNXCommInit(struct SCXLNX_COMM *pComm)
+{
+ int nError;
+ struct page *pBufferPage;
+ u32 nProtocolVersion;
+
+ dprintk(KERN_INFO "SCXLNXCommInit()\n");
+
+ spin_lock_init(&(pComm->lock));
+ pComm->nFlags = 0;
+ pComm->pBuffer = NULL;
+ init_waitqueue_head(&(pComm->waitQueue));
+
+ /*
+ * Check the Secure World protocol version is the expected one.
+ */
+ SCXLNXCommCallGetProtocolVersionSMC(&nProtocolVersion);
+
+ if ((GET_PROTOCOL_MAJOR_VERSION(nProtocolVersion))
+ != SCX_S_PROTOCOL_MAJOR_VERSION) {
+ printk(KERN_ERR "SCXLNXCommInit():"
+ " Unsupported Secure World Major Version "
+ "(0x%02X, expected 0x%02X)!\n",
+ GET_PROTOCOL_MAJOR_VERSION(nProtocolVersion),
+ SCX_S_PROTOCOL_MAJOR_VERSION);
+ nError = -EIO;
+ goto error;
+ }
+
+ /*
+ * Register the software interrupt handler if required to.
+ */
+ if (pComm->nSoftIntIrq != -1) {
+ dprintk(KERN_INFO "SCXLNXCommInit(): "
+ "Registering software interrupt handler (IRQ %d)\n",
+ pComm->nSoftIntIrq);
+
+ nError = request_irq(pComm->nSoftIntIrq,
+ SCXLNXCommSoftIntHandler,
+ IRQF_SHARED,
+ SCXLNX_DEVICE_BASE_NAME,
+ pComm);
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXCommInit(): "
+ "request_irq failed for irq %d (error %d)\n",
+ pComm->nSoftIntIrq, nError);
+ goto error;
+ }
+ set_bit(SCXLNX_COMM_FLAG_IRQ_REQUESTED, &(pComm->nFlags));
+ }
+
+ /*
+ * Allocate and initialize the L1 shared buffer.
+ */
+ pComm->pBuffer = (void *) internal_get_zeroed_page(GFP_KERNEL);
+ if (pComm->pBuffer == NULL) {
+ printk(KERN_ERR "SCXLNXCommInit():"
+ " get_zeroed_page failed for L1 shared buffer!\n");
+ nError = -ENOMEM;
+ goto error;
+ }
+
+ /*
+ * Ensure the page storing the L1 shared buffer is mapped.
+ */
+ pBufferPage = virt_to_page(pComm->pBuffer);
+ trylock_page(pBufferPage);
+
+ dprintk(KERN_INFO "SCXLNXCommInit(): "
+ "L1 shared buffer allocated at virtual:%p, "
+ "physical:%p (page:%p)\n",
+ pComm->pBuffer,
+ (void *)virt_to_phys(pComm->pBuffer),
+ pBufferPage);
+
+ set_bit(SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED, &(pComm->nFlags));
+
+ /*
+ * Init SMC
+ */
+ nError = SCXLNXCommCallInitSMC(
+ SCXLNXCommGetL2InitDescriptor(pComm->pBuffer));
+ if (nError != S_SUCCESS) {
+ dprintk(KERN_ERR "SCXLNXCommInit(): "
+ "SCXLNXCommCallInitSMC failed (error 0x%08X)!\n",
+ nError);
+ goto error;
+ }
+
+ /*
+ * check whether the interrupts are actually enabled
+ * If not, remove irq handler
+ */
+ if ((SCXLNXCommReadReg32(&pComm->pBuffer->nConfigFlags_S) &
+ SCX_CONFIG_FLAG_S) == 0) {
+ if (test_and_clear_bit(SCXLNX_COMM_FLAG_IRQ_REQUESTED,
+ &(pComm->nFlags)) != 0) {
+ dprintk(KERN_INFO "SCXLNXCommInit(): "
+ "Interrupts not used, unregistering "
+ "softint (IRQ %d)\n",
+ pComm->nSoftIntIrq);
+
+ free_irq(pComm->nSoftIntIrq, pComm);
+ }
+ } else {
+ if (test_bit(SCXLNX_COMM_FLAG_IRQ_REQUESTED,
+ &(pComm->nFlags)) == 0) {
+ /*
+ * Interrupts are enabled in the Secure World, but not
+ * handled by driver
+ */
+ dprintk(KERN_ERR "SCXLNXCommInit(): "
+ "soft_interrupt argument not provided\n");
+ nError = -EINVAL;
+ goto error;
+ }
+ }
+
+ /*
+ * Successful completion.
+ */
+
+ /* yield for the first time */
+ (void)tf_schedule_secure_world(pComm, false);
+
+ dprintk(KERN_INFO "SCXLNXCommInit(): Success\n");
+ return S_SUCCESS;
+
+error:
+ /*
+ * Error handling.
+ */
+ dprintk(KERN_INFO "SCXLNXCommInit(): Failure (error %d)\n",
+ nError);
+ SCXLNXCommTerminate(pComm);
+ return nError;
+}
+
+
+/*
+ * Attempt to terminate the communication with the Secure World.
+ * The L1 shared buffer is freed.
+ * Calling this routine terminates definitaly the communication
+ * with the Secure World : there is no way to inform the Secure World of a new
+ * L1 shared buffer to be used once it has been initialized.
+ */
+void SCXLNXCommTerminate(struct SCXLNX_COMM *pComm)
+{
+ dprintk(KERN_INFO "SCXLNXCommTerminate()\n");
+
+ set_bit(SCXLNX_COMM_FLAG_TERMINATING, &(pComm->nFlags));
+
+ if ((test_bit(SCXLNX_COMM_FLAG_W3B_ALLOCATED,
+ &(pComm->nFlags))) != 0) {
+ dprintk(KERN_INFO "SCXLNXCommTerminate(): "
+ "Freeing the W3B buffer...\n");
+ SCXLNXCommFreeW3B(pComm);
+ }
+
+ if ((test_bit(SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED,
+ &(pComm->nFlags))) != 0) {
+ __clear_page_locked(virt_to_page(pComm->pBuffer));
+ internal_free_page((unsigned long) pComm->pBuffer);
+ }
+
+ if ((test_bit(SCXLNX_COMM_FLAG_IRQ_REQUESTED,
+ &(pComm->nFlags))) != 0) {
+ dprintk(KERN_INFO "SCXLNXCommTerminate(): "
+ "Unregistering softint (IRQ %d)\n",
+ pComm->nSoftIntIrq);
+ free_irq(pComm->nSoftIntIrq, pComm);
+ }
+}
diff --git a/security/tf_driver/scxlnx_conn.c b/security/tf_driver/scxlnx_conn.c
new file mode 100644
index 000000000000..cac8e0e795e2
--- /dev/null
+++ b/security/tf_driver/scxlnx_conn.c
@@ -0,0 +1,1530 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/types.h>
+
+#include "s_version.h"
+
+#include "scx_protocol.h"
+#include "scxlnx_defs.h"
+#include "scxlnx_util.h"
+#include "scxlnx_comm.h"
+#include "scxlnx_conn.h"
+
+#ifdef CONFIG_TF_ZEBRA
+#include "scx_public_crypto.h"
+#endif
+
+/*----------------------------------------------------------------------------
+ * Management of the shared memory blocks.
+ *
+ * Shared memory blocks are the blocks registered through
+ * the commands REGISTER_SHARED_MEMORY and POWER_MANAGEMENT
+ *----------------------------------------------------------------------------*/
+
+/**
+ * Unmaps a shared memory
+ **/
+static void SCXLNXConnUnmapShmem(
+ struct SCXLNX_CONNECTION *pConn,
+ struct SCXLNX_SHMEM_DESC *pShmemDesc,
+ u32 nFullCleanup)
+{
+ /* check pShmemDesc contains a descriptor */
+ if (pShmemDesc == NULL)
+ return;
+
+ dprintk(KERN_DEBUG "SCXLNXConnUnmapShmem(%p)\n", pShmemDesc);
+
+retry:
+ mutex_lock(&(pConn->sharedMemoriesMutex));
+ if (atomic_read(&pShmemDesc->nRefCnt) > 1) {
+ /*
+ * Shared mem still in use, wait for other operations completion
+ * before actually unmapping it.
+ */
+ dprintk(KERN_INFO "Descriptor in use\n");
+ mutex_unlock(&(pConn->sharedMemoriesMutex));
+ schedule();
+ goto retry;
+ }
+
+ SCXLNXCommReleaseSharedMemory(
+ &(pConn->sAllocationContext),
+ pShmemDesc,
+ nFullCleanup);
+
+ list_del(&(pShmemDesc->list));
+
+ if ((pShmemDesc->nType == SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM) ||
+ (nFullCleanup != 0)) {
+ internal_kfree(pShmemDesc);
+
+ atomic_dec(&(pConn->nShmemAllocated));
+ } else {
+ /*
+ * This is a preallocated shared memory, add to free list
+ * Since the device context is unmapped last, it is
+ * always the first element of the free list if no
+ * device context has been created
+ */
+ pShmemDesc->hIdentifier = 0;
+ list_add(&(pShmemDesc->list), &(pConn->sFreeSharedMemoryList));
+ }
+
+ mutex_unlock(&(pConn->sharedMemoriesMutex));
+}
+
+
+/**
+ * Find the first available slot for a new block of shared memory
+ * and map the user buffer.
+ * Update the pDescriptors to L1 descriptors
+ * Update the pBufferStartOffset and pBufferSize fields
+ * pShmemDesc is updated to the mapped shared memory descriptor
+ **/
+static int SCXLNXConnMapShmem(
+ struct SCXLNX_CONNECTION *pConn,
+ u32 nBufferVAddr,
+ /* flags for read-write access rights on the memory */
+ u32 nFlags,
+ bool bInUserSpace,
+ u32 pDescriptors[SCX_MAX_COARSE_PAGES],
+ u32 *pBufferStartOffset,
+ u32 *pBufferSize,
+ struct SCXLNX_SHMEM_DESC **ppShmemDesc,
+ u32 *pnDescriptorCount)
+{
+ struct SCXLNX_SHMEM_DESC *pShmemDesc = NULL;
+ int nError;
+
+ dprintk(KERN_INFO "SCXLNXConnMapShmem(%p, %p, flags = 0x%08x)\n",
+ pConn,
+ (void *) nBufferVAddr,
+ nFlags);
+
+ mutex_lock(&(pConn->sharedMemoriesMutex));
+
+ /*
+ * Check the list of free shared memory
+ * is not empty
+ */
+ if (list_empty(&(pConn->sFreeSharedMemoryList))) {
+ if (atomic_read(&(pConn->nShmemAllocated)) ==
+ SCXLNX_SHMEM_MAX_COUNT) {
+ printk(KERN_ERR "SCXLNXConnMapShmem(%p):"
+ " maximum shared memories already registered\n",
+ pConn);
+ nError = -ENOMEM;
+ goto error;
+ }
+
+ atomic_inc(&(pConn->nShmemAllocated));
+
+ /* no descriptor available, allocate a new one */
+
+ pShmemDesc = (struct SCXLNX_SHMEM_DESC *) internal_kmalloc(
+ sizeof(*pShmemDesc), GFP_KERNEL);
+ if (pShmemDesc == NULL) {
+ printk(KERN_ERR "SCXLNXConnMapShmem(%p):"
+ " failed to allocate descriptor\n",
+ pConn);
+ nError = -ENOMEM;
+ goto error;
+ }
+
+ /* Initialize the structure */
+ pShmemDesc->nType = SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM;
+ atomic_set(&pShmemDesc->nRefCnt, 1);
+ INIT_LIST_HEAD(&(pShmemDesc->list));
+ } else {
+ /* take the first free shared memory descriptor */
+ pShmemDesc = list_entry(pConn->sFreeSharedMemoryList.next,
+ struct SCXLNX_SHMEM_DESC, list);
+ list_del(&(pShmemDesc->list));
+ }
+
+ /* Add the descriptor to the used list */
+ list_add(&(pShmemDesc->list), &(pConn->sUsedSharedMemoryList));
+
+ nError = SCXLNXCommFillDescriptorTable(
+ &(pConn->sAllocationContext),
+ pShmemDesc,
+ nBufferVAddr,
+ pConn->ppVmas,
+ pDescriptors,
+ pBufferSize,
+ pBufferStartOffset,
+ bInUserSpace,
+ nFlags,
+ pnDescriptorCount);
+
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXConnMapShmem(%p):"
+ " SCXLNXCommFillDescriptorTable failed with error "
+ "code %d!\n",
+ pConn,
+ nError);
+ goto error;
+ }
+ pShmemDesc->pBuffer = (u8 *) nBufferVAddr;
+
+ /*
+ * Successful completion.
+ */
+ *ppShmemDesc = pShmemDesc;
+ mutex_unlock(&(pConn->sharedMemoriesMutex));
+ dprintk(KERN_DEBUG "SCXLNXConnMapShmem: success\n");
+ return 0;
+
+
+ /*
+ * Error handling.
+ */
+error:
+ mutex_unlock(&(pConn->sharedMemoriesMutex));
+ dprintk(KERN_ERR "SCXLNXConnMapShmem: failure with error code %d\n",
+ nError);
+
+ SCXLNXConnUnmapShmem(
+ pConn,
+ pShmemDesc,
+ 0);
+
+ return nError;
+}
+
+
+
+/* This function is a copy of the find_vma() function
+in linux kernel 2.6.15 version with some fixes :
+ - memory block may end on vm_end
+ - check the full memory block is in the memory area
+ - guarantee NULL is returned if no memory area is found */
+struct vm_area_struct *SCXLNXConnFindVma(struct mm_struct *mm,
+ unsigned long addr, unsigned long size)
+{
+ struct vm_area_struct *vma = NULL;
+
+ dprintk(KERN_INFO
+ "SCXLNXConnFindVma addr=0x%lX size=0x%lX\n", addr, size);
+
+ if (mm) {
+ /* Check the cache first. */
+ /* (Cache hit rate is typically around 35%.) */
+ vma = mm->mmap_cache;
+ if (!(vma && vma->vm_end >= (addr+size) &&
+ vma->vm_start <= addr)) {
+ struct rb_node *rb_node;
+
+ rb_node = mm->mm_rb.rb_node;
+ vma = NULL;
+
+ while (rb_node) {
+ struct vm_area_struct *vma_tmp;
+
+ vma_tmp = rb_entry(rb_node,
+ struct vm_area_struct, vm_rb);
+
+ dprintk(KERN_INFO
+ "vma_tmp->vm_start=0x%lX"
+ "vma_tmp->vm_end=0x%lX\n",
+ vma_tmp->vm_start,
+ vma_tmp->vm_end);
+
+ if (vma_tmp->vm_end >= (addr+size)) {
+ vma = vma_tmp;
+ if (vma_tmp->vm_start <= addr)
+ break;
+
+ rb_node = rb_node->rb_left;
+ } else {
+ rb_node = rb_node->rb_right;
+ }
+ }
+
+ if (vma)
+ mm->mmap_cache = vma;
+ if (rb_node == NULL)
+ vma = NULL;
+ }
+ }
+ return vma;
+}
+
+static int SCXLNXConnValidateSharedMemoryBlockAndFlags(
+ void *pSharedMemory,
+ u32 nSharedMemorySize,
+ u32 nFlags)
+{
+ struct vm_area_struct *vma;
+ unsigned long nSharedMemory = (unsigned long) pSharedMemory;
+ u32 nChunk;
+
+ if (nSharedMemorySize == 0)
+ /* This is always valid */
+ return 0;
+
+ if ((nSharedMemory + nSharedMemorySize) < nSharedMemory)
+ /* Overflow */
+ return -EINVAL;
+
+ down_read(&current->mm->mmap_sem);
+
+ /*
+ * When looking for a memory address, split buffer into chunks of
+ * size=PAGE_SIZE.
+ */
+ nChunk = PAGE_SIZE - (nSharedMemory & (PAGE_SIZE-1));
+ if (nChunk > nSharedMemorySize)
+ nChunk = nSharedMemorySize;
+
+ do {
+ vma = SCXLNXConnFindVma(current->mm, nSharedMemory, nChunk);
+
+ if (vma == NULL)
+ goto error;
+
+ if (nFlags & SCX_SHMEM_TYPE_READ)
+ if (!(vma->vm_flags & VM_READ))
+ goto error;
+ if (nFlags & SCX_SHMEM_TYPE_WRITE)
+ if (!(vma->vm_flags & VM_WRITE))
+ goto error;
+
+ nSharedMemorySize -= nChunk;
+ nSharedMemory += nChunk;
+ nChunk = (nSharedMemorySize <= PAGE_SIZE ?
+ nSharedMemorySize : PAGE_SIZE);
+ } while (nSharedMemorySize != 0);
+
+ up_read(&current->mm->mmap_sem);
+ return 0;
+
+error:
+ up_read(&current->mm->mmap_sem);
+ dprintk(KERN_ERR "SCXLNXConnValidateSharedMemoryBlockAndFlags: "
+ "return error\n");
+ return -EFAULT;
+}
+
+
+static int SCXLNXConnMapTempShMem(struct SCXLNX_CONNECTION *pConn,
+ struct SCX_COMMAND_PARAM_TEMP_MEMREF *pTempMemRef,
+ u32 nParamType,
+ struct SCXLNX_SHMEM_DESC **ppShmemDesc)
+{
+ u32 nFlags;
+ u32 nError = S_SUCCESS;
+
+ dprintk(KERN_INFO "SCXLNXConnMapTempShMem(%p, "
+ "0x%08x[size=0x%08x], offset=0x%08x)\n",
+ pConn,
+ pTempMemRef->nDescriptor,
+ pTempMemRef->nSize,
+ pTempMemRef->nOffset);
+
+ switch (nParamType) {
+ case SCX_PARAM_TYPE_MEMREF_TEMP_INPUT:
+ nFlags = SCX_SHMEM_TYPE_READ;
+ break;
+ case SCX_PARAM_TYPE_MEMREF_TEMP_OUTPUT:
+ nFlags = SCX_SHMEM_TYPE_WRITE;
+ break;
+ case SCX_PARAM_TYPE_MEMREF_TEMP_INOUT:
+ nFlags = SCX_SHMEM_TYPE_WRITE | SCX_SHMEM_TYPE_READ;
+ break;
+ default:
+ nError = -EINVAL;
+ goto error;
+ }
+
+ if (pTempMemRef->nDescriptor == 0) {
+ /* NULL tmpref */
+ pTempMemRef->nOffset = 0;
+ *ppShmemDesc = NULL;
+ } else if ((pTempMemRef->nDescriptor != 0) &&
+ (pTempMemRef->nSize == 0)) {
+ /* Empty tmpref */
+ pTempMemRef->nOffset = pTempMemRef->nDescriptor;
+ pTempMemRef->nDescriptor = 0;
+ pTempMemRef->nSize = 0;
+ *ppShmemDesc = NULL;
+ } else {
+ /* Map the temp shmem block */
+
+ u32 nSharedMemDescriptors[SCX_MAX_COARSE_PAGES];
+ u32 nDescriptorCount;
+
+ nError = SCXLNXConnValidateSharedMemoryBlockAndFlags(
+ (void *) pTempMemRef->nDescriptor,
+ pTempMemRef->nSize,
+ nFlags);
+ if (nError != 0)
+ goto error;
+
+ nError = SCXLNXConnMapShmem(
+ pConn,
+ pTempMemRef->nDescriptor,
+ nFlags,
+ true,
+ nSharedMemDescriptors,
+ &(pTempMemRef->nOffset),
+ &(pTempMemRef->nSize),
+ ppShmemDesc,
+ &nDescriptorCount);
+ pTempMemRef->nDescriptor = nSharedMemDescriptors[0];
+ }
+
+error:
+ return nError;
+}
+
+/*
+ * Clean up a list of shared memory descriptors.
+ */
+static void SCXLNXSharedMemoryCleanupList(
+ struct SCXLNX_CONNECTION *pConn,
+ struct list_head *pList)
+{
+ while (!list_empty(pList)) {
+ struct SCXLNX_SHMEM_DESC *pShmemDesc;
+
+ pShmemDesc = list_entry(pList->next, struct SCXLNX_SHMEM_DESC,
+ list);
+
+ SCXLNXConnUnmapShmem(pConn, pShmemDesc, 1);
+ }
+}
+
+
+/*
+ * Clean up the shared memory information in the connection.
+ * Releases all allocated pages.
+ */
+void SCXLNXConnCleanupSharedMemory(struct SCXLNX_CONNECTION *pConn)
+{
+ /* clean up the list of used and free descriptors.
+ * done outside the mutex, because SCXLNXConnUnmapShmem already
+ * mutex()ed
+ */
+ SCXLNXSharedMemoryCleanupList(pConn,
+ &pConn->sUsedSharedMemoryList);
+ SCXLNXSharedMemoryCleanupList(pConn,
+ &pConn->sFreeSharedMemoryList);
+
+ mutex_lock(&(pConn->sharedMemoriesMutex));
+
+ /* Free the Vmas page */
+ if (pConn->ppVmas) {
+ internal_free_page((unsigned long) pConn->ppVmas);
+ pConn->ppVmas = NULL;
+ }
+
+ SCXLNXReleaseCoarsePageTableAllocator(
+ &(pConn->sAllocationContext));
+
+ mutex_unlock(&(pConn->sharedMemoriesMutex));
+}
+
+
+/*
+ * Initialize the shared memory in a connection.
+ * Allocates the minimum memory to be provided
+ * for shared memory management
+ */
+int SCXLNXConnInitSharedMemory(struct SCXLNX_CONNECTION *pConn)
+{
+ int nError;
+ int nSharedMemoryDescriptorIndex;
+ int nCoarsePageIndex;
+
+ /*
+ * We only need to initialize special elements and attempt to allocate
+ * the minimum shared memory descriptors we want to support
+ */
+
+ mutex_init(&(pConn->sharedMemoriesMutex));
+ INIT_LIST_HEAD(&(pConn->sFreeSharedMemoryList));
+ INIT_LIST_HEAD(&(pConn->sUsedSharedMemoryList));
+ atomic_set(&(pConn->nShmemAllocated), 0);
+
+ SCXLNXInitializeCoarsePageTableAllocator(
+ &(pConn->sAllocationContext));
+
+
+ /*
+ * Preallocate 3 pages to increase the chances that a connection
+ * succeeds in allocating shared mem
+ */
+ for (nSharedMemoryDescriptorIndex = 0;
+ nSharedMemoryDescriptorIndex < 3;
+ nSharedMemoryDescriptorIndex++) {
+ struct SCXLNX_SHMEM_DESC *pShmemDesc =
+ (struct SCXLNX_SHMEM_DESC *) internal_kmalloc(
+ sizeof(*pShmemDesc), GFP_KERNEL);
+
+ if (pShmemDesc == NULL) {
+ printk(KERN_ERR "SCXLNXConnInitSharedMemory(%p):"
+ " failed to pre allocate descriptor %d\n",
+ pConn,
+ nSharedMemoryDescriptorIndex);
+ nError = -ENOMEM;
+ goto error;
+ }
+
+ for (nCoarsePageIndex = 0;
+ nCoarsePageIndex < SCX_MAX_COARSE_PAGES;
+ nCoarsePageIndex++) {
+ struct SCXLNX_COARSE_PAGE_TABLE *pCoarsePageTable;
+
+ pCoarsePageTable = SCXLNXAllocateCoarsePageTable(
+ &(pConn->sAllocationContext),
+ SCXLNX_PAGE_DESCRIPTOR_TYPE_PREALLOCATED);
+
+ if (pCoarsePageTable == NULL) {
+ printk(KERN_ERR "SCXLNXConnInitSharedMemory(%p)"
+ ": descriptor %d coarse page %d - "
+ "SCXLNXConnAllocateCoarsePageTable() "
+ "failed\n",
+ pConn,
+ nSharedMemoryDescriptorIndex,
+ nCoarsePageIndex);
+ nError = -ENOMEM;
+ goto error;
+ }
+
+ pShmemDesc->pCoarsePageTable[nCoarsePageIndex] =
+ pCoarsePageTable;
+ }
+ pShmemDesc->nNumberOfCoarsePageTables = 0;
+
+ pShmemDesc->nType = SCXLNX_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM;
+ atomic_set(&pShmemDesc->nRefCnt, 1);
+
+ /*
+ * add this preallocated descriptor to the list of free
+ * descriptors Keep the device context specific one at the
+ * beginning of the list
+ */
+ INIT_LIST_HEAD(&(pShmemDesc->list));
+ list_add_tail(&(pShmemDesc->list),
+ &(pConn->sFreeSharedMemoryList));
+ }
+
+ /* allocate memory for the vmas structure */
+ pConn->ppVmas =
+ (struct vm_area_struct **) internal_get_zeroed_page(GFP_KERNEL);
+ if (pConn->ppVmas == NULL) {
+ printk(KERN_ERR "SCXLNXConnInitSharedMemory(%p):"
+ " ppVmas - failed to get_zeroed_page\n",
+ pConn);
+ nError = -ENOMEM;
+ goto error;
+ }
+
+ return 0;
+
+error:
+ SCXLNXConnCleanupSharedMemory(pConn);
+ return nError;
+}
+
+/*----------------------------------------------------------------------------
+ * Connection operations to the Secure World
+ *----------------------------------------------------------------------------*/
+
+int SCXLNXConnCreateDeviceContext(
+ struct SCXLNX_CONNECTION *pConn)
+{
+ union SCX_COMMAND_MESSAGE sMessage;
+ union SCX_ANSWER_MESSAGE sAnswer;
+ int nError = 0;
+
+ dprintk(KERN_INFO "SCXLNXConnCreateDeviceContext(%p)\n",
+ pConn);
+
+ sMessage.sCreateDeviceContextMessage.nMessageType =
+ SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT;
+ sMessage.sCreateDeviceContextMessage.nMessageSize =
+ (sizeof(struct SCX_COMMAND_CREATE_DEVICE_CONTEXT)
+ - sizeof(struct SCX_COMMAND_HEADER))/sizeof(u32);
+ sMessage.sCreateDeviceContextMessage.nOperationID = (u32) &sAnswer;
+ sMessage.sCreateDeviceContextMessage.nDeviceContextID = (u32) pConn;
+
+ nError = SCXLNXCommSendReceive(
+ &pConn->pDevice->sm,
+ &sMessage,
+ &sAnswer,
+ pConn,
+ true);
+
+ if ((nError != 0) ||
+ (sAnswer.sCreateDeviceContextAnswer.nErrorCode != S_SUCCESS))
+ goto error;
+
+ /*
+ * CREATE_DEVICE_CONTEXT succeeded,
+ * store device context handler and update connection status
+ */
+ pConn->hDeviceContext =
+ sAnswer.sCreateDeviceContextAnswer.hDeviceContext;
+ spin_lock(&(pConn->stateLock));
+ pConn->nState = SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT;
+ spin_unlock(&(pConn->stateLock));
+
+ /* successful completion */
+ dprintk(KERN_INFO "SCXLNXConnCreateDeviceContext(%p):"
+ " hDeviceContext=0x%08x\n",
+ pConn,
+ sAnswer.sCreateDeviceContextAnswer.hDeviceContext);
+ return 0;
+
+error:
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXConnCreateDeviceContext failed with "
+ "error %d\n", nError);
+ } else {
+ /*
+ * We sent a DeviceCreateContext. The state is now
+ * SCXLNX_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT It has to be
+ * reset if we ever want to send a DeviceCreateContext again
+ */
+ spin_lock(&(pConn->stateLock));
+ pConn->nState = SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(pConn->stateLock));
+ dprintk(KERN_ERR "SCXLNXConnCreateDeviceContext failed with "
+ "nErrorCode 0x%08X\n",
+ sAnswer.sCreateDeviceContextAnswer.nErrorCode);
+ if (sAnswer.sCreateDeviceContextAnswer.nErrorCode ==
+ S_ERROR_OUT_OF_MEMORY)
+ nError = -ENOMEM;
+ else
+ nError = -EFAULT;
+ }
+
+ return nError;
+}
+
+/* Check that the current application belongs to the
+ * requested GID */
+static bool SCXLNXConnCheckGID(gid_t nRequestedGID)
+{
+ if (nRequestedGID == current_egid()) {
+ return true;
+ } else {
+ u32 nSize;
+ u32 i;
+ /* Look in the supplementary GIDs */
+ get_group_info(GROUP_INFO);
+ nSize = GROUP_INFO->ngroups;
+ for (i = 0; i < nSize; i++)
+ if (nRequestedGID == GROUP_AT(GROUP_INFO , i))
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Opens a client session to the Secure World
+ */
+int SCXLNXConnOpenClientSession(
+ struct SCXLNX_CONNECTION *pConn,
+ union SCX_COMMAND_MESSAGE *pMessage,
+ union SCX_ANSWER_MESSAGE *pAnswer)
+{
+ int nError = 0;
+ struct SCXLNX_SHMEM_DESC *pShmemDesc[4] = {NULL};
+ u32 i;
+
+ dprintk(KERN_INFO "SCXLNXConnOpenClientSession(%p)\n", pConn);
+
+ /*
+ * Initialize the message size with no login data. This will be later
+ * adjusted the the cases below
+ */
+ pMessage->sOpenClientSessionMessage.nMessageSize =
+ (sizeof(struct SCX_COMMAND_OPEN_CLIENT_SESSION) - 20
+ - sizeof(struct SCX_COMMAND_HEADER))/4;
+
+ switch (pMessage->sOpenClientSessionMessage.nLoginType) {
+ case SCX_LOGIN_PUBLIC:
+ /* Nothing to do */
+ break;
+
+ case SCX_LOGIN_USER:
+ /*
+ * Send the EUID of the calling application in the login data.
+ * Update message size.
+ */
+ *(u32 *) &pMessage->sOpenClientSessionMessage.sLoginData =
+ current_euid();
+#ifndef CONFIG_ANDROID
+ pMessage->sOpenClientSessionMessage.nLoginType =
+ (u32) SCX_LOGIN_USER_LINUX_EUID;
+#else
+ pMessage->sOpenClientSessionMessage.nLoginType =
+ (u32) SCX_LOGIN_USER_ANDROID_EUID;
+#endif
+
+ /* Added one word */
+ pMessage->sOpenClientSessionMessage.nMessageSize += 1;
+ break;
+
+ case SCX_LOGIN_GROUP: {
+ /* Check requested GID */
+ gid_t nRequestedGID =
+ *(u32 *) pMessage->sOpenClientSessionMessage.sLoginData;
+
+ if (!SCXLNXConnCheckGID(nRequestedGID)) {
+ dprintk(KERN_ERR "SCXLNXConnOpenClientSession(%p) "
+ "SCX_LOGIN_GROUP: requested GID (0x%x) does "
+ "not match real eGID (0x%x)"
+ "or any of the supplementary GIDs\n",
+ pConn, nRequestedGID, current_egid());
+ nError = -EACCES;
+ goto error;
+ }
+#ifndef CONFIG_ANDROID
+ pMessage->sOpenClientSessionMessage.nLoginType =
+ SCX_LOGIN_GROUP_LINUX_GID;
+#else
+ pMessage->sOpenClientSessionMessage.nLoginType =
+ SCX_LOGIN_GROUP_ANDROID_GID;
+#endif
+
+ pMessage->sOpenClientSessionMessage.nMessageSize += 1; /* GID */
+ break;
+ }
+
+#ifndef CONFIG_ANDROID
+ case SCX_LOGIN_APPLICATION: {
+ /*
+ * Compute SHA-1 hash of the application fully-qualified path
+ * name. Truncate the hash to 16 bytes and send it as login
+ * data. Update message size.
+ */
+ u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+ nError = SCXLNXConnHashApplicationPathAndData(pSHA1Hash,
+ NULL, 0);
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXConnOpenClientSession: "
+ "error in SCXLNXConnHashApplicationPath"
+ "AndData\n");
+ goto error;
+ }
+ memcpy(&pMessage->sOpenClientSessionMessage.sLoginData,
+ pSHA1Hash, 16);
+ pMessage->sOpenClientSessionMessage.nLoginType =
+ SCX_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH;
+ /* 16 bytes */
+ pMessage->sOpenClientSessionMessage.nMessageSize += 4;
+ break;
+ }
+#else
+ case SCX_LOGIN_APPLICATION:
+ /*
+ * Send the real UID of the calling application in the login
+ * data. Update message size.
+ */
+ *(u32 *) &pMessage->sOpenClientSessionMessage.sLoginData =
+ current_uid();
+
+ pMessage->sOpenClientSessionMessage.nLoginType =
+ (u32) SCX_LOGIN_APPLICATION_ANDROID_UID;
+
+ /* Added one word */
+ pMessage->sOpenClientSessionMessage.nMessageSize += 1;
+ break;
+#endif
+
+#ifndef CONFIG_ANDROID
+ case SCX_LOGIN_APPLICATION_USER: {
+ /*
+ * Compute SHA-1 hash of the concatenation of the application
+ * fully-qualified path name and the EUID of the calling
+ * application. Truncate the hash to 16 bytes and send it as
+ * login data. Update message size.
+ */
+ u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+ nError = SCXLNXConnHashApplicationPathAndData(pSHA1Hash,
+ (u8 *) &(current_euid()), sizeof(current_euid()));
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXConnOpenClientSession: "
+ "error in SCXLNXConnHashApplicationPath"
+ "AndData\n");
+ goto error;
+ }
+ memcpy(&pMessage->sOpenClientSessionMessage.sLoginData,
+ pSHA1Hash, 16);
+ pMessage->sOpenClientSessionMessage.nLoginType =
+ SCX_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH;
+
+ /* 16 bytes */
+ pMessage->sOpenClientSessionMessage.nMessageSize += 4;
+
+ break;
+ }
+#else
+ case SCX_LOGIN_APPLICATION_USER:
+ /*
+ * Send the real UID and the EUID of the calling application in
+ * the login data. Update message size.
+ */
+ *(u32 *) &pMessage->sOpenClientSessionMessage.sLoginData =
+ current_uid();
+ *(u32 *) &pMessage->sOpenClientSessionMessage.sLoginData[4] =
+ current_euid();
+
+ pMessage->sOpenClientSessionMessage.nLoginType =
+ SCX_LOGIN_APPLICATION_USER_ANDROID_UID_EUID;
+
+ /* Added two words */
+ pMessage->sOpenClientSessionMessage.nMessageSize += 2;
+ break;
+#endif
+
+#ifndef CONFIG_ANDROID
+ case SCX_LOGIN_APPLICATION_GROUP: {
+ /*
+ * Check requested GID. Compute SHA-1 hash of the concatenation
+ * of the application fully-qualified path name and the
+ * requested GID. Update message size
+ */
+ gid_t nRequestedGID;
+ u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+ nRequestedGID = *(u32 *) &pMessage->sOpenClientSessionMessage.
+ sLoginData;
+
+ if (!SCXLNXConnCheckGID(nRequestedGID)) {
+ dprintk(KERN_ERR "SCXLNXConnOpenClientSession(%p) "
+ "SCX_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
+ "does not match real eGID (0x%x)"
+ "or any of the supplementary GIDs\n",
+ pConn, nRequestedGID, current_egid());
+ nError = -EACCES;
+ goto error;
+ }
+
+ nError = SCXLNXConnHashApplicationPathAndData(pSHA1Hash,
+ &nRequestedGID, sizeof(u32));
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXConnOpenClientSession: "
+ "error in SCXLNXConnHashApplicationPath"
+ "AndData\n");
+ goto error;
+ }
+
+ memcpy(&pMessage->sOpenClientSessionMessage.sLoginData,
+ pSHA1Hash, 16);
+ pMessage->sOpenClientSessionMessage.nLoginType =
+ SCX_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH;
+
+ /* 16 bytes */
+ pMessage->sOpenClientSessionMessage.nMessageSize += 4;
+ break;
+ }
+#else
+ case SCX_LOGIN_APPLICATION_GROUP: {
+ /*
+ * Check requested GID. Send the real UID and the requested GID
+ * in the login data. Update message size.
+ */
+ gid_t nRequestedGID;
+
+ nRequestedGID = *(u32 *) &pMessage->sOpenClientSessionMessage.
+ sLoginData;
+
+ if (!SCXLNXConnCheckGID(nRequestedGID)) {
+ dprintk(KERN_ERR "SCXLNXConnOpenClientSession(%p) "
+ "SCX_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
+ "does not match real eGID (0x%x)"
+ "or any of the supplementary GIDs\n",
+ pConn, nRequestedGID, current_egid());
+ nError = -EACCES;
+ goto error;
+ }
+
+ *(u32 *) &pMessage->sOpenClientSessionMessage.sLoginData =
+ current_uid();
+ *(u32 *) &pMessage->sOpenClientSessionMessage.sLoginData[4] =
+ nRequestedGID;
+
+ pMessage->sOpenClientSessionMessage.nLoginType =
+ SCX_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID;
+
+ /* Added two words */
+ pMessage->sOpenClientSessionMessage.nMessageSize += 2;
+
+ break;
+ }
+#endif
+
+ case SCX_LOGIN_PRIVILEGED:
+ /*
+ * Check that calling application either hash EUID=0 or has
+ * EGID=0
+ */
+ if (current_euid() != 0 && current_egid() != 0) {
+ dprintk(KERN_ERR "SCXLNXConnOpenClientSession: "
+ " user %d, group %d not allowed to open "
+ "session with SCX_LOGIN_PRIVILEGED\n",
+ current_euid(), current_egid());
+ nError = -EACCES;
+ goto error;
+ }
+ pMessage->sOpenClientSessionMessage.nLoginType =
+ SCX_LOGIN_PRIVILEGED;
+ break;
+
+ case SCX_LOGIN_AUTHENTICATION: {
+ /*
+ * Compute SHA-1 hash of the application binary
+ * Send this hash as the login data (20 bytes)
+ */
+
+ u8 *pHash;
+ pHash = &(pMessage->sOpenClientSessionMessage.sLoginData[0]);
+
+ nError = SCXLNXConnGetCurrentProcessHash(pHash);
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXConnOpenClientSession: "
+ "error in SCXLNXConnGetCurrentProcessHash\n");
+ goto error;
+ }
+ pMessage->sOpenClientSessionMessage.nLoginType =
+ SCX_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH;
+
+ /* 20 bytes */
+ pMessage->sOpenClientSessionMessage.nMessageSize += 5;
+ break;
+ }
+
+ default:
+ dprintk(KERN_ERR "SCXLNXConnOpenClientSession: "
+ "unknown nLoginType(%08X)\n",
+ pMessage->sOpenClientSessionMessage.nLoginType);
+ nError = -EOPNOTSUPP;
+ goto error;
+ }
+
+ /* Map the temporary memory references */
+ for (i = 0; i < 4; i++) {
+ int nParamType;
+ nParamType = SCX_GET_PARAM_TYPE(
+ pMessage->sOpenClientSessionMessage.nParamTypes, i);
+ if ((nParamType & (SCX_PARAM_TYPE_MEMREF_FLAG |
+ SCX_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
+ == SCX_PARAM_TYPE_MEMREF_FLAG) {
+ /* Map temp mem ref */
+ nError = SCXLNXConnMapTempShMem(pConn,
+ &pMessage->sOpenClientSessionMessage.
+ sParams[i].sTempMemref,
+ nParamType,
+ &pShmemDesc[i]);
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXConnOpenClientSession: "
+ "unable to map temporary memory block "
+ "(%08X)\n", nError);
+ goto error;
+ }
+ }
+ }
+
+ /* Fill the handle of the Device Context */
+ pMessage->sOpenClientSessionMessage.hDeviceContext =
+ pConn->hDeviceContext;
+
+ nError = SCXLNXCommSendReceive(
+ &pConn->pDevice->sm,
+ pMessage,
+ pAnswer,
+ pConn,
+ true);
+
+error:
+ /* Unmap the temporary memory references */
+ for (i = 0; i < 4; i++)
+ if (pShmemDesc[i] != NULL)
+ SCXLNXConnUnmapShmem(pConn, pShmemDesc[i], 0);
+
+ if (nError != 0)
+ dprintk(KERN_ERR "SCXLNXConnOpenClientSession returns %d\n",
+ nError);
+ else
+ dprintk(KERN_ERR "SCXLNXConnOpenClientSession returns "
+ "nErrorCode 0x%08X\n",
+ pAnswer->sOpenClientSessionAnswer.nErrorCode);
+
+ return nError;
+}
+
+
+/*
+ * Closes a client session from the Secure World
+ */
+int SCXLNXConnCloseClientSession(
+ struct SCXLNX_CONNECTION *pConn,
+ union SCX_COMMAND_MESSAGE *pMessage,
+ union SCX_ANSWER_MESSAGE *pAnswer)
+{
+ int nError = 0;
+
+ dprintk(KERN_DEBUG "SCXLNXConnCloseClientSession(%p)\n", pConn);
+
+ pMessage->sCloseClientSessionMessage.nMessageSize =
+ (sizeof(struct SCX_COMMAND_CLOSE_CLIENT_SESSION) -
+ sizeof(struct SCX_COMMAND_HEADER)) / 4;
+ pMessage->sCloseClientSessionMessage.hDeviceContext =
+ pConn->hDeviceContext;
+
+ nError = SCXLNXCommSendReceive(
+ &pConn->pDevice->sm,
+ pMessage,
+ pAnswer,
+ pConn,
+ true);
+
+ if (nError != 0)
+ dprintk(KERN_ERR "SCXLNXConnCloseClientSession returns %d\n",
+ nError);
+ else
+ dprintk(KERN_ERR "SCXLNXConnCloseClientSession returns "
+ "nError 0x%08X\n",
+ pAnswer->sCloseClientSessionAnswer.nErrorCode);
+
+ return nError;
+}
+
+
+/*
+ * Registers a shared memory to the Secure World
+ */
+int SCXLNXConnRegisterSharedMemory(
+ struct SCXLNX_CONNECTION *pConn,
+ union SCX_COMMAND_MESSAGE *pMessage,
+ union SCX_ANSWER_MESSAGE *pAnswer)
+{
+ int nError = 0;
+ struct SCXLNX_SHMEM_DESC *pShmemDesc = NULL;
+
+ dprintk(KERN_INFO "SCXLNXConnRegisterSharedMemory(%p) "
+ "%p[0x%08X][0x%08x]\n",
+ pConn,
+ (void *) pMessage->sRegisterSharedMemoryMessage.
+ nSharedMemDescriptors[0],
+ pMessage->sRegisterSharedMemoryMessage.nSharedMemSize,
+ (u32)pMessage->sRegisterSharedMemoryMessage.nMemoryFlags);
+
+ nError = SCXLNXConnValidateSharedMemoryBlockAndFlags(
+ (void *) pMessage->sRegisterSharedMemoryMessage.
+ nSharedMemDescriptors[0],
+ pMessage->sRegisterSharedMemoryMessage.nSharedMemSize,
+ (u32)pMessage->sRegisterSharedMemoryMessage.nMemoryFlags);
+ if (nError != 0)
+ goto error;
+
+ /* Initialize nMessageSize with no descriptors */
+ pMessage->sRegisterSharedMemoryMessage.nMessageSize
+ = (sizeof(struct SCX_COMMAND_REGISTER_SHARED_MEMORY) -
+ sizeof(struct SCX_COMMAND_HEADER)) / 4;
+
+ /* Map the shmem block and update the message */
+ if (pMessage->sRegisterSharedMemoryMessage.nSharedMemSize == 0) {
+ /* Empty shared mem */
+ pMessage->sRegisterSharedMemoryMessage.nSharedMemStartOffset =
+ pMessage->sRegisterSharedMemoryMessage.
+ nSharedMemDescriptors[0];
+ } else {
+ u32 nDescriptorCount;
+ nError = SCXLNXConnMapShmem(
+ pConn,
+ pMessage->sRegisterSharedMemoryMessage.
+ nSharedMemDescriptors[0],
+ pMessage->sRegisterSharedMemoryMessage.nMemoryFlags,
+ true,
+ pMessage->sRegisterSharedMemoryMessage.
+ nSharedMemDescriptors,
+ &(pMessage->sRegisterSharedMemoryMessage.
+ nSharedMemStartOffset),
+ &(pMessage->sRegisterSharedMemoryMessage.
+ nSharedMemSize),
+ &pShmemDesc,
+ &nDescriptorCount);
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXConnRegisterSharedMemory: "
+ "unable to map shared memory block\n");
+ goto error;
+ }
+ pMessage->sRegisterSharedMemoryMessage.nMessageSize +=
+ nDescriptorCount;
+ }
+
+ /*
+ * write the correct device context handle and the address of the shared
+ * memory descriptor in the message
+ */
+ pMessage->sRegisterSharedMemoryMessage.hDeviceContext =
+ pConn->hDeviceContext;
+ pMessage->sRegisterSharedMemoryMessage.nBlockID = (u32) pShmemDesc;
+
+ /* Send the updated message */
+ nError = SCXLNXCommSendReceive(
+ &pConn->pDevice->sm,
+ pMessage,
+ pAnswer,
+ pConn,
+ true);
+
+ if ((nError != 0) ||
+ (pAnswer->sRegisterSharedMemoryAnswer.nErrorCode
+ != S_SUCCESS)) {
+ dprintk(KERN_ERR "SCXLNXConnRegisterSharedMemory: "
+ "operation failed. Unmap block\n");
+ goto error;
+ }
+
+ /* Saves the block handle returned by the secure world */
+ if (pShmemDesc != NULL)
+ pShmemDesc->hIdentifier =
+ pAnswer->sRegisterSharedMemoryAnswer.hBlock;
+
+ /* successful completion */
+ dprintk(KERN_INFO "SCXLNXConnRegisterSharedMemory(%p):"
+ " nBlockID=0x%08x hBlock=0x%08x\n",
+ pConn, pMessage->sRegisterSharedMemoryMessage.nBlockID,
+ pAnswer->sRegisterSharedMemoryAnswer.hBlock);
+ return 0;
+
+ /* error completion */
+error:
+ SCXLNXConnUnmapShmem(
+ pConn,
+ pShmemDesc,
+ 0);
+
+ if (nError != 0)
+ dprintk(KERN_ERR "SCXLNXConnRegisterSharedMemory returns %d\n",
+ nError);
+ else
+ dprintk(KERN_ERR "SCXLNXConnRegisterSharedMemory returns "
+ "nErrorCode 0x%08X\n",
+ pAnswer->sRegisterSharedMemoryAnswer.nErrorCode);
+
+ return nError;
+}
+
+
+/*
+ * Releases a shared memory from the Secure World
+ */
+int SCXLNXConnReleaseSharedMemory(
+ struct SCXLNX_CONNECTION *pConn,
+ union SCX_COMMAND_MESSAGE *pMessage,
+ union SCX_ANSWER_MESSAGE *pAnswer)
+{
+ int nError = 0;
+
+ dprintk(KERN_DEBUG "SCXLNXConnReleaseSharedMemory(%p)\n", pConn);
+
+ pMessage->sReleaseSharedMemoryMessage.nMessageSize =
+ (sizeof(struct SCX_COMMAND_RELEASE_SHARED_MEMORY) -
+ sizeof(struct SCX_COMMAND_HEADER)) / 4;
+ pMessage->sReleaseSharedMemoryMessage.hDeviceContext =
+ pConn->hDeviceContext;
+
+ nError = SCXLNXCommSendReceive(
+ &pConn->pDevice->sm,
+ pMessage,
+ pAnswer,
+ pConn,
+ true);
+
+ if ((nError != 0) ||
+ (pAnswer->sReleaseSharedMemoryAnswer.nErrorCode != S_SUCCESS))
+ goto error;
+
+ /* Use nBlockID to get back the pointer to pShmemDesc */
+ SCXLNXConnUnmapShmem(
+ pConn,
+ (struct SCXLNX_SHMEM_DESC *)
+ pAnswer->sReleaseSharedMemoryAnswer.nBlockID,
+ 0);
+
+ /* successful completion */
+ dprintk(KERN_INFO "SCXLNXConnReleaseSharedMemory(%p):"
+ " nBlockID=0x%08x hBlock=0x%08x\n",
+ pConn, pAnswer->sReleaseSharedMemoryAnswer.nBlockID,
+ pMessage->sReleaseSharedMemoryMessage.hBlock);
+ return 0;
+
+
+error:
+ if (nError != 0)
+ dprintk(KERN_ERR "SCXLNXConnReleaseSharedMemory returns %d\n",
+ nError);
+ else
+ dprintk(KERN_ERR "SCXLNXConnReleaseSharedMemory returns "
+ "nChannelStatus 0x%08X\n",
+ pAnswer->sReleaseSharedMemoryAnswer.nErrorCode);
+
+ return nError;
+
+}
+
+
+/*
+ * Invokes a client command to the Secure World
+ */
+int SCXLNXConnInvokeClientCommand(
+ struct SCXLNX_CONNECTION *pConn,
+ union SCX_COMMAND_MESSAGE *pMessage,
+ union SCX_ANSWER_MESSAGE *pAnswer)
+{
+ int nError = 0;
+ struct SCXLNX_SHMEM_DESC *pShmemDesc[4] = {NULL};
+ int i;
+
+ dprintk(KERN_INFO "SCXLNXConnInvokeClientCommand(%p)\n", pConn);
+
+ pMessage->sReleaseSharedMemoryMessage.nMessageSize =
+ (sizeof(struct SCX_COMMAND_INVOKE_CLIENT_COMMAND) -
+ sizeof(struct SCX_COMMAND_HEADER)) / 4;
+
+#ifdef CONFIG_TF_ZEBRA
+ nError = SCXPublicCryptoTryShortcutedUpdate(pConn,
+ (struct SCX_COMMAND_INVOKE_CLIENT_COMMAND *) pMessage,
+ (struct SCX_ANSWER_INVOKE_CLIENT_COMMAND *) pAnswer);
+ if (nError == 0)
+ return nError;
+#endif
+
+ /* Map the tmprefs */
+ for (i = 0; i < 4; i++) {
+ int nParamType = SCX_GET_PARAM_TYPE(
+ pMessage->sInvokeClientCommandMessage.nParamTypes, i);
+
+ if ((nParamType & (SCX_PARAM_TYPE_MEMREF_FLAG |
+ SCX_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
+ == SCX_PARAM_TYPE_MEMREF_FLAG) {
+ /* A temporary memref: map it */
+ nError = SCXLNXConnMapTempShMem(pConn,
+ &pMessage->sInvokeClientCommandMessage.
+ sParams[i].sTempMemref,
+ nParamType, &pShmemDesc[i]);
+ if (nError != 0) {
+ dprintk(KERN_ERR
+ "SCXLNXConnInvokeClientCommand: "
+ "unable to map temporary memory "
+ "block\n (%08X)", nError);
+ goto error;
+ }
+ }
+ }
+
+ pMessage->sInvokeClientCommandMessage.hDeviceContext =
+ pConn->hDeviceContext;
+
+ nError = SCXLNXCommSendReceive(&pConn->pDevice->sm, pMessage,
+ pAnswer, pConn, true);
+
+error:
+ /* Unmap de temp mem refs */
+ for (i = 0; i < 4; i++) {
+ if (pShmemDesc[i] != NULL) {
+ dprintk(KERN_INFO "SCXLNXConnInvokeClientCommand: "
+ "UnMapTempMemRef %d\n ", i);
+
+ SCXLNXConnUnmapShmem(pConn, pShmemDesc[i], 0);
+ }
+ }
+
+ if (nError != 0)
+ dprintk(KERN_ERR "SCXLNXConnInvokeClientCommand returns %d\n",
+ nError);
+ else
+ dprintk(KERN_ERR "SCXLNXConnInvokeClientCommand returns "
+ "nErrorCode 0x%08X\n",
+ pAnswer->sInvokeClientCommandAnswer.nErrorCode);
+
+ return nError;
+}
+
+
+/*
+ * Cancels a client command from the Secure World
+ */
+int SCXLNXConnCancelClientCommand(
+ struct SCXLNX_CONNECTION *pConn,
+ union SCX_COMMAND_MESSAGE *pMessage,
+ union SCX_ANSWER_MESSAGE *pAnswer)
+{
+ int nError = 0;
+
+ dprintk(KERN_DEBUG "SCXLNXConnCancelClientCommand(%p)\n", pConn);
+
+ pMessage->sCancelClientOperationMessage.hDeviceContext =
+ pConn->hDeviceContext;
+ pMessage->sCancelClientOperationMessage.nMessageSize =
+ (sizeof(struct SCX_COMMAND_CANCEL_CLIENT_OPERATION) -
+ sizeof(struct SCX_COMMAND_HEADER)) / 4;
+
+ nError = SCXLNXCommSendReceive(
+ &pConn->pDevice->sm,
+ pMessage,
+ pAnswer,
+ pConn,
+ true);
+
+ if ((nError != 0) ||
+ (pAnswer->sCancelClientOperationAnswer.nErrorCode != S_SUCCESS))
+ goto error;
+
+
+ /* successful completion */
+ return 0;
+
+error:
+ if (nError != 0)
+ dprintk(KERN_ERR "SCXLNXConnCancelClientCommand returns %d\n",
+ nError);
+ else
+ dprintk(KERN_ERR "SCXLNXConnCancelClientCommand returns "
+ "nChannelStatus 0x%08X\n",
+ pAnswer->sCancelClientOperationAnswer.nErrorCode);
+
+ return nError;
+}
+
+
+
+/*
+ * Destroys a device context from the Secure World
+ */
+int SCXLNXConnDestroyDeviceContext(
+ struct SCXLNX_CONNECTION *pConn)
+{
+ int nError;
+ /*
+ * AFY: better use the specialized SCX_COMMAND_DESTROY_DEVICE_CONTEXT
+ * structure: this will save stack
+ */
+ union SCX_COMMAND_MESSAGE sMessage;
+ union SCX_ANSWER_MESSAGE sAnswer;
+
+ dprintk(KERN_INFO "SCXLNXConnDestroyDeviceContext(%p)\n", pConn);
+
+ BUG_ON(pConn == NULL);
+
+ sMessage.sHeader.nMessageType = SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+ sMessage.sHeader.nMessageSize =
+ (sizeof(struct SCX_COMMAND_DESTROY_DEVICE_CONTEXT) -
+ sizeof(struct SCX_COMMAND_HEADER))/sizeof(u32);
+
+ /*
+ * fill in the device context handler
+ * it is guarantied that the first shared memory descriptor describes
+ * the device context
+ */
+ sMessage.sDestroyDeviceContextMessage.hDeviceContext =
+ pConn->hDeviceContext;
+
+ nError = SCXLNXCommSendReceive(
+ &pConn->pDevice->sm,
+ &sMessage,
+ &sAnswer,
+ pConn,
+ false);
+
+ if ((nError != 0) ||
+ (sAnswer.sDestroyDeviceContextAnswer.nErrorCode != S_SUCCESS))
+ goto error;
+
+ spin_lock(&(pConn->stateLock));
+ pConn->nState = SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(pConn->stateLock));
+
+ /* successful completion */
+ dprintk(KERN_INFO "SCXLNXConnDestroyDeviceContext(%p)\n",
+ pConn);
+ return 0;
+
+error:
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXConnDestroyDeviceContext failed with "
+ "error %d\n", nError);
+ } else {
+ dprintk(KERN_ERR "SCXLNXConnDestroyDeviceContext failed with "
+ "nErrorCode 0x%08X\n",
+ sAnswer.sDestroyDeviceContextAnswer.nErrorCode);
+ if (sAnswer.sDestroyDeviceContextAnswer.nErrorCode ==
+ S_ERROR_OUT_OF_MEMORY)
+ nError = -ENOMEM;
+ else
+ nError = -EFAULT;
+ }
+
+ return nError;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Connection initialization and cleanup operations
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Opens a connection to the specified device.
+ *
+ * The placeholder referenced by ppConn is set to the address of the
+ * new connection; it is set to NULL upon failure.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+int SCXLNXConnOpen(struct SCXLNX_DEVICE *pDevice,
+ struct file *file,
+ struct SCXLNX_CONNECTION **ppConn)
+{
+ int nError;
+ struct SCXLNX_CONNECTION *pConn = NULL;
+
+ dprintk(KERN_INFO "SCXLNXConnOpen(%p, %p)\n", file, ppConn);
+
+ /*
+ * Allocate and initialize the connection.
+ * kmalloc only allocates sizeof(*pConn) virtual memory
+ */
+ pConn = (struct SCXLNX_CONNECTION *) internal_kmalloc(sizeof(*pConn),
+ GFP_KERNEL);
+ if (pConn == NULL) {
+ printk(KERN_ERR "SCXLNXConnOpen(): "
+ "Out of memory for connection!\n");
+ nError = -ENOMEM;
+ goto error;
+ }
+
+ memset(pConn, 0, sizeof(*pConn));
+
+ INIT_LIST_HEAD(&(pConn->list));
+ pConn->nState = SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT;
+ pConn->pDevice = pDevice;
+ spin_lock_init(&(pConn->stateLock));
+ atomic_set(&(pConn->nPendingOpCounter), 0);
+
+ /*
+ * Initialize the shared memory
+ */
+ nError = SCXLNXConnInitSharedMemory(pConn);
+ if (nError != 0)
+ goto error;
+
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * Initialize CUS specifics
+ */
+ SCXPublicCryptoInitDeviceContext(pConn);
+#endif
+
+ /*
+ * Successful completion.
+ */
+
+ *ppConn = pConn;
+
+ dprintk(KERN_INFO "SCXLNXConnOpen(): Success (pConn=%p)\n", pConn);
+ return 0;
+
+ /*
+ * Error handling.
+ */
+
+error:
+ dprintk(KERN_ERR "SCXLNXConnOpen(): Failure (error %d)\n", nError);
+ /* Deallocate the descriptor pages if necessary */
+ internal_kfree(pConn);
+ *ppConn = NULL;
+ return nError;
+}
+
+
+/*
+ * Closes the specified connection.
+ *
+ * Upon return, the connection referenced by pConn has been destroyed and cannot
+ * be used anymore.
+ *
+ * This function does nothing if pConn is set to NULL.
+ */
+void SCXLNXConnClose(struct SCXLNX_CONNECTION *pConn)
+{
+ int nError;
+ enum SCXLNX_CONN_STATE nState;
+
+ dprintk(KERN_DEBUG "SCXLNXConnClose(%p)\n", pConn);
+
+ if (pConn == NULL)
+ return;
+
+ /*
+ * Assumption: Linux guarantees that no other operation is in progress
+ * and that no other operation will be started when close is called
+ */
+ BUG_ON(atomic_read(&(pConn->nPendingOpCounter)) != 0);
+
+ /*
+ * Exchange a Destroy Device Context message if needed.
+ */
+ spin_lock(&(pConn->stateLock));
+ nState = pConn->nState;
+ spin_unlock(&(pConn->stateLock));
+ if (nState == SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT) {
+ /*
+ * A DestroyDeviceContext operation was not performed. Do it
+ * now.
+ */
+ nError = SCXLNXConnDestroyDeviceContext(pConn);
+ if (nError != 0)
+ /* avoid cleanup if destroy device context fails */
+ goto error;
+ }
+
+ /*
+ * Clean up the shared memory
+ */
+ SCXLNXConnCleanupSharedMemory(pConn);
+
+ internal_kfree(pConn);
+
+ return;
+
+error:
+ dprintk(KERN_DEBUG "SCXLNXConnClose(%p) failed with error code %d\n",
+ pConn, nError);
+}
+
diff --git a/security/tf_driver/scxlnx_conn.h b/security/tf_driver/scxlnx_conn.h
new file mode 100644
index 000000000000..f080f4ef8027
--- /dev/null
+++ b/security/tf_driver/scxlnx_conn.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __SCXLNX_CONN_H__
+#define __SCXLNX_CONN_H__
+
+#include "scxlnx_defs.h"
+
+/*
+ * Returns a pointer to the connection referenced by the
+ * specified file.
+ */
+static inline struct SCXLNX_CONNECTION *SCXLNXConnFromFile(
+ struct file *file)
+{
+ return file->private_data;
+}
+
+/*----------------------------------------------------------------------------
+ * Connection operations to the Secure World
+ *----------------------------------------------------------------------------*/
+
+int SCXLNXConnCreateDeviceContext(
+ struct SCXLNX_CONNECTION *pConn);
+
+int SCXLNXConnDestroyDeviceContext(
+ struct SCXLNX_CONNECTION *pConn);
+
+int SCXLNXConnOpenClientSession(
+ struct SCXLNX_CONNECTION *pConn,
+ union SCX_COMMAND_MESSAGE *pMessage,
+ union SCX_ANSWER_MESSAGE *pAnswer);
+
+int SCXLNXConnCloseClientSession(
+ struct SCXLNX_CONNECTION *pConn,
+ union SCX_COMMAND_MESSAGE *pMessage,
+ union SCX_ANSWER_MESSAGE *pAnswer);
+
+int SCXLNXConnRegisterSharedMemory(
+ struct SCXLNX_CONNECTION *pConn,
+ union SCX_COMMAND_MESSAGE *pMessage,
+ union SCX_ANSWER_MESSAGE *pAnswer);
+
+int SCXLNXConnReleaseSharedMemory(
+ struct SCXLNX_CONNECTION *pConn,
+ union SCX_COMMAND_MESSAGE *pMessage,
+ union SCX_ANSWER_MESSAGE *pAnswer);
+
+int SCXLNXConnInvokeClientCommand(
+ struct SCXLNX_CONNECTION *pConn,
+ union SCX_COMMAND_MESSAGE *pMessage,
+ union SCX_ANSWER_MESSAGE *pAnswer);
+
+int SCXLNXConnCancelClientCommand(
+ struct SCXLNX_CONNECTION *pConn,
+ union SCX_COMMAND_MESSAGE *pMessage,
+ union SCX_ANSWER_MESSAGE *pAnswer);
+
+int SCXLNXConnCheckMessageValidity(
+ struct SCXLNX_CONNECTION *pConn,
+ union SCX_COMMAND_MESSAGE *pMessage);
+
+/*----------------------------------------------------------------------------
+ * Connection initialization and cleanup operations
+ *----------------------------------------------------------------------------*/
+
+int SCXLNXConnOpen(struct SCXLNX_DEVICE *pDevice,
+ struct file *file,
+ struct SCXLNX_CONNECTION **ppConn);
+
+void SCXLNXConnClose(
+ struct SCXLNX_CONNECTION *pConn);
+
+
+#endif /* !defined(__SCXLNX_CONN_H__) */
diff --git a/security/tf_driver/scxlnx_defs.h b/security/tf_driver/scxlnx_defs.h
new file mode 100644
index 000000000000..b6430d2a3c59
--- /dev/null
+++ b/security/tf_driver/scxlnx_defs.h
@@ -0,0 +1,532 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __SCXLNX_DEFS_H__
+#define __SCXLNX_DEFS_H__
+
+#include <asm/atomic.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/sysdev.h>
+#include <linux/sysfs.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+
+#include "scx_protocol.h"
+
+/*----------------------------------------------------------------------------*/
+
+#define SIZE_1KB 0x400
+
+/*
+ * Maximum number of shared memory blocks that can be reigsters in a connection
+ */
+#define SCXLNX_SHMEM_MAX_COUNT (64)
+
+/*
+ * Describes the possible types of shared memories
+ *
+ * SCXLNX_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM :
+ * The descriptor describes a registered shared memory.
+ * Its coarse pages are preallocated when initializing the
+ * connection
+ * SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM :
+ * The descriptor describes a registered shared memory.
+ * Its coarse pages are not preallocated
+ * SCXLNX_SHMEM_TYPE_PM_HIBERNATE :
+ * The descriptor describes a power management shared memory.
+ */
+enum SCXLNX_SHMEM_TYPE {
+ SCXLNX_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM = 0,
+ SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM,
+ SCXLNX_SHMEM_TYPE_PM_HIBERNATE,
+};
+
+
+/*
+ * This structure contains a pointer on a coarse page table
+ */
+struct SCXLNX_COARSE_PAGE_TABLE {
+ /*
+ * Identifies the coarse page table descriptor in
+ * sFreeCoarsePageTables list
+ */
+ struct list_head list;
+
+ /*
+ * The address of the coarse page table
+ */
+ u32 *pDescriptors;
+
+ /*
+ * The address of the array containing this coarse page table
+ */
+ struct SCXLNX_COARSE_PAGE_TABLE_ARRAY *pParent;
+};
+
+
+#define SCXLNX_PAGE_DESCRIPTOR_TYPE_NORMAL 0
+#define SCXLNX_PAGE_DESCRIPTOR_TYPE_PREALLOCATED 1
+
+/*
+ * This structure describes an array of up to 4 coarse page tables
+ * allocated within a single 4KB page.
+ */
+struct SCXLNX_COARSE_PAGE_TABLE_ARRAY {
+ /*
+ * identifies the element in the sCoarsePageTableArrays list
+ */
+ struct list_head list;
+
+ /*
+ * Type of page descriptor
+ * can take any of SCXLNX_PAGE_DESCRIPTOR_TYPE_XXX value
+ */
+ u32 nType;
+
+ struct SCXLNX_COARSE_PAGE_TABLE sCoarsePageTables[4];
+
+ /*
+ * A counter of the number of coarse pages currently used
+ * the max value should be 4 (one coarse page table is 1KB while one
+ * page is 4KB)
+ */
+ u8 nReferenceCount;
+};
+
+
+/*
+ * This structure describes a list of coarse page table arrays
+ * with some of the coarse page tables free. It is used
+ * when the driver needs to allocate a new coarse page
+ * table.
+ */
+struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT {
+ /*
+ * The spin lock protecting concurrent access to the structure.
+ */
+ spinlock_t lock;
+
+ /*
+ * The list of allocated coarse page table arrays
+ */
+ struct list_head sCoarsePageTableArrays;
+
+ /*
+ * The list of free coarse page tables
+ */
+ struct list_head sFreeCoarsePageTables;
+};
+
+
+/*
+ * Fully describes a shared memory block
+ */
+struct SCXLNX_SHMEM_DESC {
+ /*
+ * Identifies the shared memory descriptor in the list of free shared
+ * memory descriptors
+ */
+ struct list_head list;
+
+ /*
+ * Identifies the type of shared memory descriptor
+ */
+ enum SCXLNX_SHMEM_TYPE nType;
+
+ /*
+ * The identifier of the block of shared memory, as returned by the
+ * Secure World.
+ * This identifier is hBlock field of a REGISTER_SHARED_MEMORY answer
+ */
+ u32 hIdentifier;
+
+ /* Client buffer */
+ u8 *pBuffer;
+
+ /* Up to eight coarse page table context */
+ struct SCXLNX_COARSE_PAGE_TABLE *pCoarsePageTable[SCX_MAX_COARSE_PAGES];
+
+ u32 nNumberOfCoarsePageTables;
+
+ /* Reference counter */
+ atomic_t nRefCnt;
+};
+
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * This structure describes the communication with the Secure World
+ *
+ * Note that this driver supports only one instance of the Secure World
+ */
+struct SCXLNX_COMM {
+ /*
+ * The spin lock protecting concurrent access to the structure.
+ */
+ spinlock_t lock;
+
+ /*
+ * Bit vector with the following possible flags:
+ * - SCXLNX_COMM_FLAG_IRQ_REQUESTED: If set, indicates that
+ * the IRQ has been successfuly requested.
+ * - SCXLNX_COMM_FLAG_TERMINATING: If set, indicates that the
+ * communication with the Secure World is being terminated.
+ * Transmissions to the Secure World are not permitted
+ * - SCXLNX_COMM_FLAG_W3B_ALLOCATED: If set, indicates that the
+ * W3B buffer has been allocated.
+ *
+ * This bit vector must be accessed with the kernel's atomic bitwise
+ * operations.
+ */
+ unsigned long nFlags;
+
+ /*
+ * The virtual address of the L1 shared buffer.
+ */
+ struct SCHANNEL_C1S_BUFFER *pBuffer;
+
+ /*
+ * The wait queue the client threads are waiting on.
+ */
+ wait_queue_head_t waitQueue;
+
+#ifdef CONFIG_TF_TRUSTZONE
+ /*
+ * The interrupt line used by the Secure World.
+ */
+ int nSoftIntIrq;
+
+ /* ----- W3B ----- */
+ /* shared memory descriptor to identify the W3B */
+ struct SCXLNX_SHMEM_DESC sW3BShmemDesc;
+
+ /* Virtual address of the kernel allocated shared memory */
+ u32 nW3BShmemVAddr;
+
+ /* offset of data in shared memory coarse pages */
+ u32 nW3BShmemOffset;
+
+ u32 nW3BShmemSize;
+
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT
+ sW3BAllocationContext;
+#endif
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * The SE SDP can only be initialized once...
+ */
+ int bSEInitialized;
+
+ /* Virtual address of the L0 communication buffer */
+ void *pInitSharedBuffer;
+
+ /*
+ * Lock to be held by a client when executing an RPC
+ */
+ struct mutex sRPCLock;
+
+ /*
+ * Lock to protect concurrent accesses to DMA channels
+ */
+ struct mutex sDMALock;
+#endif
+};
+
+
+#define SCXLNX_COMM_FLAG_IRQ_REQUESTED (0)
+#define SCXLNX_COMM_FLAG_PA_AVAILABLE (1)
+#define SCXLNX_COMM_FLAG_TERMINATING (2)
+#define SCXLNX_COMM_FLAG_W3B_ALLOCATED (3)
+#define SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED (4)
+
+/*----------------------------------------------------------------------------*/
+
+struct SCXLNX_DEVICE_STATS {
+ struct kobject kobj;
+
+ struct kobj_type kobj_type;
+
+ struct attribute kobj_stat_attribute;
+
+ struct attribute *kobj_attribute_list[2];
+
+ atomic_t stat_pages_allocated;
+ atomic_t stat_memories_allocated;
+ atomic_t stat_pages_locked;
+};
+
+/*
+ * This structure describes the information about one device handled by the
+ * driver. Note that the driver supports only a single device. see the global
+ * variable g_SCXLNXDevice
+ */
+struct SCXLNX_DEVICE {
+ /*
+ * The device number for the device.
+ */
+ dev_t nDevNum;
+
+ /*
+ * Interfaces the system device with the kernel.
+ */
+ struct sys_device sysdev;
+
+ /*
+ * Interfaces the char device with the kernel.
+ */
+ struct cdev cdev;
+
+#ifdef CONFIG_TF_ZEBRA
+ struct cdev cdev_ctrl;
+
+ /*
+ * Globals for CUS
+ */
+ /* Current key handles loaded in HWAs */
+ u32 hAES1SecureKeyContext;
+ u32 hDESSecureKeyContext;
+ bool bSHAM1IsPublic;
+
+ /* Semaphores used to serialize HWA accesses */
+ struct semaphore sAES1CriticalSection;
+ struct mutex sDESCriticalSection;
+ struct mutex sSHACriticalSection;
+
+ /*
+ * An aligned and correctly shaped pre-allocated buffer used for DMA
+ * transfers
+ */
+ u32 nDMABufferLength;
+ u8 *pDMABuffer;
+ dma_addr_t pDMABufferPhys;
+
+ /* Workspace allocated at boot time and reserved to the Secure World */
+ u32 nWorkspaceAddr;
+ u32 nWorkspaceSize;
+#endif
+
+ /*
+ * Communications with the SM.
+ */
+ struct SCXLNX_COMM sm;
+
+ /*
+ * Lists the connections attached to this device. A connection is
+ * created each time a user space application "opens" a file descriptor
+ * on the driver
+ */
+ struct list_head conns;
+
+ /*
+ * The spin lock used to protect concurrent access to the connection
+ * list.
+ */
+ spinlock_t connsLock;
+
+ struct SCXLNX_DEVICE_STATS sDeviceStats;
+};
+
+/* the bits of the nFlags field of the SCXLNX_DEVICE structure */
+#define SCXLNX_DEVICE_FLAG_CDEV_INITIALIZED (0)
+#define SCXLNX_DEVICE_FLAG_SYSDEV_CLASS_REGISTERED (1)
+#define SCXLNX_DEVICE_FLAG_SYSDEV_REGISTERED (2)
+#define SCXLNX_DEVICE_FLAG_CDEV_REGISTERED (3)
+#define SCXLNX_DEVICE_FLAG_CDEV_ADDED (4)
+#define SCXLNX_DEVICE_SYSFS_REGISTERED (5)
+
+/*----------------------------------------------------------------------------*/
+/*
+ * This type describes a connection state.
+ * This is used to determine whether a message is valid or not.
+ *
+ * Messages are only valid in a certain device state.
+ * Messages may be invalidated between the start of the ioctl call and the
+ * moment the message is sent to the Secure World.
+ *
+ * SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT :
+ * The connection has no DEVICE_CONTEXT created and no
+ * CREATE_DEVICE_CONTEXT being processed by the Secure World
+ * SCXLNX_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT :
+ * The connection has a CREATE_DEVICE_CONTEXT being processed by the Secure
+ * World
+ * SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT :
+ * The connection has a DEVICE_CONTEXT created and no
+ * DESTROY_DEVICE_CONTEXT is being processed by the Secure World
+ * SCXLNX_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT :
+ * The connection has a DESTROY_DEVICE_CONTEXT being processed by the Secure
+ * World
+ */
+enum SCXLNX_CONN_STATE {
+ SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT = 0,
+ SCXLNX_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT,
+ SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT,
+ SCXLNX_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT
+};
+
+
+/*
+ * This type describes the status of the command.
+ *
+ * PENDING:
+ * The initial state; the command has not been sent yet.
+ * SENT:
+ * The command has been sent, we are waiting for an answer.
+ * ABORTED:
+ * The command cannot be sent because the device context is invalid.
+ * Note that this only covers the case where some other thread
+ * sent a DESTROY_DEVICE_CONTEXT command.
+ */
+enum SCXLNX_COMMAND_STATE {
+ SCXLNX_COMMAND_STATE_PENDING = 0,
+ SCXLNX_COMMAND_STATE_SENT,
+ SCXLNX_COMMAND_STATE_ABORTED
+};
+
+
+/*
+ * This structure describes a connection to the driver
+ * A connection is created each time an application opens a file descriptor on
+ * the driver
+ */
+struct SCXLNX_CONNECTION {
+ /*
+ * Identifies the connection in the list of the connections attached to
+ * the same device.
+ */
+ struct list_head list;
+
+ /*
+ * State of the connection.
+ */
+ enum SCXLNX_CONN_STATE nState;
+
+ /*
+ * A pointer to the corresponding device structure
+ */
+ struct SCXLNX_DEVICE *pDevice;
+
+ /*
+ * A spinlock to use to access nState
+ */
+ spinlock_t stateLock;
+
+ /*
+ * Counts the number of operations currently pending on the connection.
+ * (for debug only)
+ */
+ atomic_t nPendingOpCounter;
+
+ /*
+ * A handle for the device context
+ */
+ u32 hDeviceContext;
+
+ /*
+ * Lists the used shared memory descriptors
+ */
+ struct list_head sUsedSharedMemoryList;
+
+ /*
+ * Lists the free shared memory descriptors
+ */
+ struct list_head sFreeSharedMemoryList;
+
+ /*
+ * A mutex to use to access this structure
+ */
+ struct mutex sharedMemoriesMutex;
+
+ /*
+ * Counts the number of shared memories registered.
+ */
+ atomic_t nShmemAllocated;
+
+ /*
+ * Page to retrieve memory properties when
+ * registering shared memory through REGISTER_SHARED_MEMORY
+ * messages
+ */
+ struct vm_area_struct **ppVmas;
+
+ /*
+ * coarse page table allocation context
+ */
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT sAllocationContext;
+
+#ifdef CONFIG_TF_ZEBRA
+ /* Lists all the Cryptoki Update Shortcuts */
+ struct list_head ShortcutList;
+
+ /* Lock to protect concurrent accesses to ShortcutList */
+ spinlock_t shortcutListCriticalSectionLock;
+#endif
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * The nOperationID field of a message points to this structure.
+ * It is used to identify the thread that triggered the message transmission
+ * Whoever reads an answer can wake up that thread using the completion event
+ */
+struct SCXLNX_ANSWER_STRUCT {
+ bool bAnswerCopied;
+ union SCX_ANSWER_MESSAGE *pAnswer;
+};
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * The ASCII-C string representation of the base name of the devices managed by
+ * this driver.
+ */
+#define SCXLNX_DEVICE_BASE_NAME "tf_driver"
+
+
+/**
+ * The major and minor numbers of the registered character device driver.
+ * Only 1 instance of the driver is supported.
+ */
+#define SCXLNX_DEVICE_MINOR_NUMBER (0)
+
+struct SCXLNX_DEVICE *SCXLNXGetDevice(void);
+
+#define CLEAN_CACHE_CFG_MASK (~0xC) /* 1111 0011 */
+
+/*----------------------------------------------------------------------------*/
+/*
+ * Kernel Differences
+ */
+
+#ifdef CONFIG_ANDROID
+#define GROUP_INFO get_current_groups()
+#else
+#define GROUP_INFO (current->group_info)
+#endif
+
+#endif /* !defined(__SCXLNX_DEFS_H__) */
diff --git a/security/tf_driver/scxlnx_device.c b/security/tf_driver/scxlnx_device.c
new file mode 100644
index 000000000000..4c9386714586
--- /dev/null
+++ b/security/tf_driver/scxlnx_device.c
@@ -0,0 +1,697 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/pm.h>
+#include <linux/sysdev.h>
+#include <linux/vmalloc.h>
+#include <linux/signal.h>
+#ifdef CONFIG_ANDROID
+#include <linux/device.h>
+#endif
+
+#include "scx_protocol.h"
+#include "scxlnx_defs.h"
+#include "scxlnx_util.h"
+#include "scxlnx_conn.h"
+#include "scxlnx_comm.h"
+#ifdef CONFIG_TF_ZEBRA
+#include <plat/cpu.h>
+#include "scxlnx_zebra.h"
+#endif
+
+#include "s_version.h"
+
+/*----------------------------------------------------------------------------
+ * Forward Declarations
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Creates and registers the device to be managed by the specified driver.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static int SCXLNXDeviceRegister(void);
+
+
+/*
+ * Implements the device Open callback.
+ */
+static int SCXLNXDeviceOpen(
+ struct inode *inode,
+ struct file *file);
+
+
+/*
+ * Implements the device Release callback.
+ */
+static int SCXLNXDeviceRelease(
+ struct inode *inode,
+ struct file *file);
+
+
+/*
+ * Implements the device ioctl callback.
+ */
+static long SCXLNXDeviceIoctl(
+ struct file *file,
+ unsigned int ioctl_num,
+ unsigned long ioctl_param);
+
+
+/*
+ * Implements the device shutdown callback.
+ */
+static int SCXLNXDeviceShutdown(
+ struct sys_device *sysdev);
+
+
+/*
+ * Implements the device suspend callback.
+ */
+static int SCXLNXDeviceSuspend(
+ struct sys_device *sysdev,
+ pm_message_t state);
+
+
+/*
+ * Implements the device resume callback.
+ */
+static int SCXLNXDeviceResume(
+ struct sys_device *sysdev);
+
+
+/*---------------------------------------------------------------------------
+ * Module Parameters
+ *---------------------------------------------------------------------------*/
+
+/*
+ * The device major number used to register a unique character device driver.
+ * Let the default value be 122
+ */
+static int device_major_number = 122;
+
+module_param(device_major_number, int, 0000);
+MODULE_PARM_DESC(device_major_number,
+ "The device major number used to register a unique character "
+ "device driver");
+
+#ifdef CONFIG_TF_TRUSTZONE
+/**
+ * The softint interrupt line used by the Secure World.
+ */
+static int soft_interrupt = -1;
+
+module_param(soft_interrupt, int, 0000);
+MODULE_PARM_DESC(soft_interrupt,
+ "The softint interrupt line used by the Secure world");
+#endif
+
+#ifdef CONFIG_ANDROID
+static struct class *tf_class;
+#endif
+
+/*----------------------------------------------------------------------------
+ * Global Variables
+ *----------------------------------------------------------------------------*/
+
+/*
+ * tf_driver character device definitions.
+ * read and write methods are not defined
+ * and will return an error if used by user space
+ */
+static const struct file_operations g_SCXLNXDeviceFileOps = {
+ .owner = THIS_MODULE,
+ .open = SCXLNXDeviceOpen,
+ .release = SCXLNXDeviceRelease,
+ .unlocked_ioctl = SCXLNXDeviceIoctl,
+ .llseek = no_llseek,
+};
+
+
+static struct sysdev_class g_SCXLNXDeviceSysClass = {
+ .name = SCXLNX_DEVICE_BASE_NAME,
+ .shutdown = SCXLNXDeviceShutdown,
+ .suspend = SCXLNXDeviceSuspend,
+ .resume = SCXLNXDeviceResume,
+};
+
+/* The single device supported by this driver */
+static struct SCXLNX_DEVICE g_SCXLNXDevice = {0, };
+
+/*----------------------------------------------------------------------------
+ * Implementations
+ *----------------------------------------------------------------------------*/
+
+struct SCXLNX_DEVICE *SCXLNXGetDevice(void)
+{
+ return &g_SCXLNXDevice;
+}
+
+/*
+ * displays the driver stats
+ */
+static ssize_t kobject_show(struct kobject *pkobject,
+ struct attribute *pattributes, char *buf)
+{
+ struct SCXLNX_DEVICE_STATS *pDeviceStats = &g_SCXLNXDevice.sDeviceStats;
+ u32 nStatPagesAllocated;
+ u32 nStatPagesLocked;
+ u32 nStatMemoriesAllocated;
+
+ nStatMemoriesAllocated =
+ atomic_read(&(pDeviceStats->stat_memories_allocated));
+ nStatPagesAllocated =
+ atomic_read(&(pDeviceStats->stat_pages_allocated));
+ nStatPagesLocked = atomic_read(&(pDeviceStats->stat_pages_locked));
+
+ /*
+ * AFY: could we add the number of context switches (call to the SMI
+ * instruction)
+ */
+
+ return snprintf(buf, PAGE_SIZE,
+ "stat.memories.allocated: %d\n"
+ "stat.pages.allocated: %d\n"
+ "stat.pages.locked: %d\n",
+ nStatMemoriesAllocated,
+ nStatPagesAllocated,
+ nStatPagesLocked);
+}
+
+static const struct sysfs_ops kobj_sysfs_operations = {
+ .show = kobject_show,
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * First routine called when the kernel module is loaded
+ */
+static int __init SCXLNXDeviceRegister(void)
+{
+ int nError;
+ struct SCXLNX_DEVICE *pDevice = &g_SCXLNXDevice;
+ struct SCXLNX_DEVICE_STATS *pDeviceStats = &pDevice->sDeviceStats;
+
+ dprintk(KERN_INFO "SCXLNXDeviceRegister()\n");
+
+#ifdef CONFIG_TF_ZEBRA
+ nError = SCXLNXCtrlDeviceInit();
+ if (nError <= 0)
+ return nError;
+#endif
+
+ /*
+ * Initialize the device
+ */
+ pDevice->nDevNum = MKDEV(device_major_number,
+ SCXLNX_DEVICE_MINOR_NUMBER);
+ cdev_init(&pDevice->cdev, &g_SCXLNXDeviceFileOps);
+ pDevice->cdev.owner = THIS_MODULE;
+
+ pDevice->sysdev.id = 0;
+ pDevice->sysdev.cls = &g_SCXLNXDeviceSysClass;
+
+ INIT_LIST_HEAD(&pDevice->conns);
+ spin_lock_init(&pDevice->connsLock);
+
+ /* register the sysfs object driver stats */
+ pDeviceStats->kobj_type.sysfs_ops = &kobj_sysfs_operations;
+
+ pDeviceStats->kobj_stat_attribute.name = "info";
+ pDeviceStats->kobj_stat_attribute.mode = S_IRUGO;
+ pDeviceStats->kobj_attribute_list[0] =
+ &pDeviceStats->kobj_stat_attribute;
+
+ pDeviceStats->kobj_type.default_attrs =
+ pDeviceStats->kobj_attribute_list,
+ kobject_init_and_add(&(pDeviceStats->kobj),
+ &(pDeviceStats->kobj_type), NULL, "%s",
+ SCXLNX_DEVICE_BASE_NAME);
+
+ /*
+ * Register the system device.
+ */
+
+ nError = sysdev_class_register(&g_SCXLNXDeviceSysClass);
+ if (nError != 0) {
+ printk(KERN_ERR "SCXLNXDeviceRegister():"
+ " sysdev_class_register failed (error %d)!\n",
+ nError);
+ goto sysdev_class_register_failed;
+ }
+
+ nError = sysdev_register(&pDevice->sysdev);
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXDeviceRegister(): "
+ "sysdev_register failed (error %d)!\n",
+ nError);
+ goto sysdev_register_failed;
+ }
+
+ /*
+ * Register the char device.
+ */
+ printk(KERN_INFO "Registering char device %s (%u:%u)\n",
+ SCXLNX_DEVICE_BASE_NAME,
+ MAJOR(pDevice->nDevNum),
+ MINOR(pDevice->nDevNum));
+ nError = register_chrdev_region(pDevice->nDevNum, 1,
+ SCXLNX_DEVICE_BASE_NAME);
+ if (nError != 0) {
+ printk(KERN_ERR "SCXLNXDeviceRegister():"
+ " register_chrdev_region failed (error %d)!\n",
+ nError);
+ goto register_chrdev_region_failed;
+ }
+
+ nError = cdev_add(&pDevice->cdev, pDevice->nDevNum, 1);
+ if (nError != 0) {
+ printk(KERN_ERR "SCXLNXDeviceRegister(): "
+ "cdev_add failed (error %d)!\n",
+ nError);
+ goto cdev_add_failed;
+ }
+
+ /*
+ * Initialize the communication with the Secure World.
+ */
+#ifdef CONFIG_TF_TRUSTZONE
+ pDevice->sm.nSoftIntIrq = soft_interrupt;
+#endif
+ nError = SCXLNXCommInit(&g_SCXLNXDevice.sm);
+ if (nError != S_SUCCESS) {
+ dprintk(KERN_ERR "SCXLNXDeviceRegister(): "
+ "SCXLNXCommInit failed (error %d)!\n",
+ nError);
+ goto init_failed;
+ }
+
+#ifdef CONFIG_ANDROID
+ tf_class = class_create(THIS_MODULE, SCXLNX_DEVICE_BASE_NAME);
+ device_create(tf_class, NULL,
+ pDevice->nDevNum,
+ NULL, SCXLNX_DEVICE_BASE_NAME);
+#endif
+
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * Initializes the /dev/tf_ctrl device node.
+ */
+ nError = SCXLNXCtrlDeviceRegister();
+ if (nError)
+ goto init_failed;
+#endif
+
+#ifdef CONFIG_BENCH_SECURE_CYCLE
+ runBogoMIPS();
+ addressCacheProperty((unsigned long) &SCXLNXDeviceRegister);
+#endif
+ /*
+ * Successful completion.
+ */
+
+ dprintk(KERN_INFO "SCXLNXDeviceRegister(): Success\n");
+ return 0;
+
+ /*
+ * Error: undo all operations in the reverse order
+ */
+init_failed:
+ cdev_del(&pDevice->cdev);
+cdev_add_failed:
+ unregister_chrdev_region(pDevice->nDevNum, 1);
+register_chrdev_region_failed:
+ sysdev_unregister(&(pDevice->sysdev));
+sysdev_register_failed:
+ sysdev_class_unregister(&g_SCXLNXDeviceSysClass);
+sysdev_class_register_failed:
+ kobject_del(&g_SCXLNXDevice.sDeviceStats.kobj);
+
+ dprintk(KERN_INFO "SCXLNXDeviceRegister(): Failure (error %d)\n",
+ nError);
+ return nError;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int SCXLNXDeviceOpen(struct inode *inode, struct file *file)
+{
+ int nError;
+ struct SCXLNX_DEVICE *pDevice = &g_SCXLNXDevice;
+ struct SCXLNX_CONNECTION *pConn = NULL;
+
+ dprintk(KERN_INFO "SCXLNXDeviceOpen(%u:%u, %p)\n",
+ imajor(inode), iminor(inode), file);
+
+ /* Dummy lseek for non-seekable driver */
+ nError = nonseekable_open(inode, file);
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXDeviceOpen(%p): "
+ "nonseekable_open failed (error %d)!\n",
+ file, nError);
+ goto error;
+ }
+
+#ifndef CONFIG_ANDROID
+ /*
+ * Check file flags. We only autthorize the O_RDWR access
+ */
+ if (file->f_flags != O_RDWR) {
+ dprintk(KERN_ERR "SCXLNXDeviceOpen(%p): "
+ "Invalid access mode %u\n",
+ file, file->f_flags);
+ nError = -EACCES;
+ goto error;
+ }
+#endif
+
+ /*
+ * Open a new connection.
+ */
+
+ nError = SCXLNXConnOpen(pDevice, file, &pConn);
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXDeviceOpen(%p): "
+ "SCXLNXConnOpen failed (error %d)!\n",
+ file, nError);
+ goto error;
+ }
+
+ /*
+ * Attach the connection to the device.
+ */
+ spin_lock(&(pDevice->connsLock));
+ list_add(&(pConn->list), &(pDevice->conns));
+ spin_unlock(&(pDevice->connsLock));
+
+ file->private_data = pConn;
+
+ /*
+ * Send the CreateDeviceContext command to the secure
+ */
+ nError = SCXLNXConnCreateDeviceContext(pConn);
+ if (nError != 0) {
+ dprintk(KERN_ERR "SCXLNXDeviceOpen(%p): "
+ "SCXLNXConnCreateDeviceContext failed (error %d)!\n",
+ file, nError);
+ goto error1;
+ }
+
+ /*
+ * Successful completion.
+ */
+
+ dprintk(KERN_INFO "SCXLNXDeviceOpen(%p): Success (pConn=%p)\n",
+ file, pConn);
+ return 0;
+
+ /*
+ * Error handling.
+ */
+
+error1:
+ SCXLNXConnClose(pConn);
+error:
+ dprintk(KERN_INFO "SCXLNXDeviceOpen(%p): Failure (error %d)\n",
+ file, nError);
+ return nError;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int SCXLNXDeviceRelease(struct inode *inode, struct file *file)
+{
+ struct SCXLNX_CONNECTION *pConn;
+
+ dprintk(KERN_INFO "SCXLNXDeviceRelease(%u:%u, %p)\n",
+ imajor(inode), iminor(inode), file);
+
+ pConn = SCXLNXConnFromFile(file);
+ spin_lock(&g_SCXLNXDevice.connsLock);
+ list_del(&pConn->list);
+ spin_unlock(&g_SCXLNXDevice.connsLock);
+ SCXLNXConnClose(pConn);
+
+ dprintk(KERN_INFO "SCXLNXDeviceRelease(%p): Success\n", file);
+ return 0;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static long SCXLNXDeviceIoctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ int nResult = S_SUCCESS;
+ struct SCXLNX_CONNECTION *pConn;
+ union SCX_COMMAND_MESSAGE sMessage;
+ struct SCX_COMMAND_HEADER sCommandHeader;
+ union SCX_ANSWER_MESSAGE sAnswer;
+ u32 nCommandSize;
+ u32 nAnswerSize;
+ void *pUserAnswer;
+
+ dprintk(KERN_INFO "SCXLNXDeviceIoctl(%p, %u, %p)\n",
+ file, ioctl_num, (void *) ioctl_param);
+
+ switch (ioctl_num) {
+ case IOCTL_SCX_GET_VERSION:
+ /* ioctl is asking for the driver interface version */
+ nResult = SCX_DRIVER_INTERFACE_VERSION;
+ goto exit;
+
+ case IOCTL_SCX_EXCHANGE:
+ /*
+ * ioctl is asking to perform a message exchange with the Secure
+ * Module
+ */
+
+ /*
+ * Make a local copy of the data from the user application
+ * This routine checks the data is readable
+ *
+ * Get the header first.
+ */
+ if (copy_from_user(&sCommandHeader,
+ (struct SCX_COMMAND_HEADER *)ioctl_param,
+ sizeof(struct SCX_COMMAND_HEADER))) {
+ dprintk(KERN_ERR "SCXLNXDeviceIoctl(%p): "
+ "Cannot access ioctl parameter %p\n",
+ file, (void *) ioctl_param);
+ nResult = -EFAULT;
+ goto exit;
+ }
+
+ /* size in words of u32 */
+ nCommandSize = sCommandHeader.nMessageSize +
+ sizeof(struct SCX_COMMAND_HEADER)/sizeof(u32);
+ if (nCommandSize > sizeof(sMessage)/sizeof(u32)) {
+ dprintk(KERN_ERR "SCXLNXDeviceIoctl(%p): "
+ "Buffer overflow: too many bytes to copy %d\n",
+ file, nCommandSize);
+ nResult = -EFAULT;
+ goto exit;
+ }
+
+ if (copy_from_user(&sMessage,
+ (union SCX_COMMAND_MESSAGE *)ioctl_param,
+ nCommandSize * sizeof(u32))) {
+ dprintk(KERN_ERR "SCXLNXDeviceIoctl(%p): "
+ "Cannot access ioctl parameter %p\n",
+ file, (void *) ioctl_param);
+ nResult = -EFAULT;
+ goto exit;
+ }
+
+ pConn = SCXLNXConnFromFile(file);
+ BUG_ON(pConn == NULL);
+
+ /*
+ * The answer memory space address is in the nOperationID field
+ */
+ pUserAnswer = (void *) sMessage.sHeader.nOperationID;
+
+ atomic_inc(&(pConn->nPendingOpCounter));
+
+ dprintk(KERN_WARNING "SCXLNXDeviceIoctl(%p): "
+ "Sending message type 0x%08x\n",
+ file, sMessage.sHeader.nMessageType);
+
+ switch (sMessage.sHeader.nMessageType) {
+ case SCX_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+ nResult = SCXLNXConnOpenClientSession(pConn,
+ &sMessage, &sAnswer);
+ break;
+
+ case SCX_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+ nResult = SCXLNXConnCloseClientSession(pConn,
+ &sMessage, &sAnswer);
+ break;
+
+ case SCX_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+ nResult = SCXLNXConnRegisterSharedMemory(pConn,
+ &sMessage, &sAnswer);
+ break;
+
+ case SCX_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+ nResult = SCXLNXConnReleaseSharedMemory(pConn,
+ &sMessage, &sAnswer);
+ break;
+
+ case SCX_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+ nResult = SCXLNXConnInvokeClientCommand(pConn,
+ &sMessage, &sAnswer);
+ break;
+
+ case SCX_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+ nResult = SCXLNXConnCancelClientCommand(pConn,
+ &sMessage, &sAnswer);
+ break;
+
+ default:
+ dprintk(KERN_ERR "SCXLNXDeviceIoctlExchange(%p): "
+ "Incorrect message type (0x%08x)!\n",
+ pConn, sMessage.sHeader.nMessageType);
+ nResult = -EOPNOTSUPP;
+ break;
+ }
+
+ atomic_dec(&(pConn->nPendingOpCounter));
+
+ if (nResult != 0) {
+ dprintk(KERN_WARNING "SCXLNXDeviceIoctl(%p): "
+ "Operation returning error code 0x%08x)!\n",
+ file, nResult);
+ goto exit;
+ }
+
+ /*
+ * Copy the answer back to the user space application.
+ * The driver does not check this field, only copy back to user
+ * space the data handed over by Secure World
+ */
+ nAnswerSize = sAnswer.sHeader.nMessageSize +
+ sizeof(struct SCX_ANSWER_HEADER)/sizeof(u32);
+ if (copy_to_user(pUserAnswer,
+ &sAnswer, nAnswerSize * sizeof(u32))) {
+ dprintk(KERN_WARNING "SCXLNXDeviceIoctl(%p): "
+ "Failed to copy back the full command "
+ "answer to %p\n", file, pUserAnswer);
+ nResult = -EFAULT;
+ goto exit;
+ }
+
+ /* successful completion */
+ dprintk(KERN_INFO "SCXLNXDeviceIoctl(%p): Success\n", file);
+ break;
+
+ case IOCTL_SCX_GET_DESCRIPTION: {
+ /* ioctl asking for the version information buffer */
+ struct SCX_VERSION_INFORMATION_BUFFER *pInfoBuffer;
+
+ dprintk(KERN_INFO "IOCTL_SCX_GET_DESCRIPTION:(%p, %u, %p)\n",
+ file, ioctl_num, (void *) ioctl_param);
+
+ pInfoBuffer =
+ ((struct SCX_VERSION_INFORMATION_BUFFER *) ioctl_param);
+
+ dprintk(KERN_INFO "IOCTL_SCX_GET_DESCRIPTION1: "
+ "sDriverDescription=\"%64s\"\n", S_VERSION_STRING);
+
+ if (copy_to_user(pInfoBuffer->sDriverDescription,
+ S_VERSION_STRING,
+ strlen(S_VERSION_STRING) + 1)) {
+ dprintk(KERN_ERR "SCXLNXDeviceIoctl(%p): "
+ "Fail to copy back the driver description "
+ "to %p\n",
+ file, pInfoBuffer->sDriverDescription);
+ nResult = -EFAULT;
+ goto exit;
+ }
+
+ dprintk(KERN_INFO "IOCTL_SCX_GET_DESCRIPTION2: "
+ "sSecureWorldDescription=\"%64s\"\n",
+ SCXLNXCommGetDescription(&g_SCXLNXDevice.sm));
+
+ if (copy_to_user(pInfoBuffer->sSecureWorldDescription,
+ SCXLNXCommGetDescription(&g_SCXLNXDevice.sm),
+ SCX_DESCRIPTION_BUFFER_LENGTH)) {
+ dprintk(KERN_WARNING "SCXLNXDeviceIoctl(%p): "
+ "Failed to copy back the secure world "
+ "description to %p\n",
+ file, pInfoBuffer->sSecureWorldDescription);
+ nResult = -EFAULT;
+ goto exit;
+ }
+ break;
+ }
+
+ default:
+ dprintk(KERN_ERR "SCXLNXDeviceIoctl(%p): "
+ "Unknown IOCTL code 0x%08x!\n",
+ file, ioctl_num);
+ nResult = -EOPNOTSUPP;
+ goto exit;
+ }
+
+exit:
+ return nResult;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int SCXLNXDeviceShutdown(struct sys_device *sysdev)
+{
+
+ return SCXLNXCommPowerManagement(&g_SCXLNXDevice.sm,
+ SCXLNX_POWER_OPERATION_SHUTDOWN);
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int SCXLNXDeviceSuspend(struct sys_device *sysdev, pm_message_t state)
+{
+ printk(KERN_INFO "SCXLNXDeviceSuspend: Enter\n");
+ return SCXLNXCommPowerManagement(&g_SCXLNXDevice.sm,
+ SCXLNX_POWER_OPERATION_HIBERNATE);
+}
+
+
+/*----------------------------------------------------------------------------*/
+
+static int SCXLNXDeviceResume(struct sys_device *sysdev)
+{
+ return SCXLNXCommPowerManagement(&g_SCXLNXDevice.sm,
+ SCXLNX_POWER_OPERATION_RESUME);
+}
+
+
+/*----------------------------------------------------------------------------*/
+
+module_init(SCXLNXDeviceRegister);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Trusted Logic S.A.");
diff --git a/security/tf_driver/scxlnx_util.c b/security/tf_driver/scxlnx_util.c
new file mode 100644
index 000000000000..df928a4ec2c5
--- /dev/null
+++ b/security/tf_driver/scxlnx_util.c
@@ -0,0 +1,1141 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+#include <linux/mman.h>
+#include "scxlnx_util.h"
+
+/*----------------------------------------------------------------------------
+ * Debug printing routines
+ *----------------------------------------------------------------------------*/
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+
+void addressCacheProperty(unsigned long va)
+{
+ unsigned long pa;
+ unsigned long inner;
+ unsigned long outer;
+
+ asm volatile ("mcr p15, 0, %0, c7, c8, 0" : : "r" (va));
+ asm volatile ("mrc p15, 0, %0, c7, c4, 0" : "=r" (pa));
+
+ dprintk(KERN_INFO "VA:%x, PA:%x\n",
+ (unsigned int) va,
+ (unsigned int) pa);
+
+ if (pa & 1) {
+ dprintk(KERN_INFO "Prop Error\n");
+ return;
+ }
+
+ outer = (pa >> 2) & 3;
+ dprintk(KERN_INFO "\touter : %x", (unsigned int) outer);
+
+ switch (outer) {
+ case 3:
+ dprintk(KERN_INFO "Write-Back, no Write-Allocate\n");
+ break;
+ case 2:
+ dprintk(KERN_INFO "Write-Through, no Write-Allocate.\n");
+ break;
+ case 1:
+ dprintk(KERN_INFO "Write-Back, Write-Allocate.\n");
+ break;
+ case 0:
+ dprintk(KERN_INFO "Non-cacheable.\n");
+ break;
+ }
+
+ inner = (pa >> 4) & 7;
+ dprintk(KERN_INFO "\tinner : %x", (unsigned int)inner);
+
+ switch (inner) {
+ case 7:
+ dprintk(KERN_INFO "Write-Back, no Write-Allocate\n");
+ break;
+ case 6:
+ dprintk(KERN_INFO "Write-Through.\n");
+ break;
+ case 5:
+ dprintk(KERN_INFO "Write-Back, Write-Allocate.\n");
+ break;
+ case 3:
+ dprintk(KERN_INFO "Device.\n");
+ break;
+ case 1:
+ dprintk(KERN_INFO "Strongly-ordered.\n");
+ break;
+ case 0:
+ dprintk(KERN_INFO "Non-cacheable.\n");
+ break;
+ }
+
+ if (pa & 0x00000002)
+ dprintk(KERN_INFO "SuperSection.\n");
+ if (pa & 0x00000080)
+ dprintk(KERN_INFO "Memory is shareable.\n");
+ else
+ dprintk(KERN_INFO "Memory is non-shareable.\n");
+
+ if (pa & 0x00000200)
+ dprintk(KERN_INFO "Non-secure.\n");
+}
+
+#ifdef CONFIG_BENCH_SECURE_CYCLE
+
+#define LOOP_SIZE (100000)
+
+void runBogoMIPS(void)
+{
+ uint32_t nCycles;
+ void *pAddress = &runBogoMIPS;
+
+ dprintk(KERN_INFO "BogoMIPS:\n");
+
+ setupCounters();
+ nCycles = runCodeSpeed(LOOP_SIZE);
+ dprintk(KERN_INFO "%u cycles with code access\n", nCycles);
+ nCycles = runDataSpeed(LOOP_SIZE, (unsigned long)pAddress);
+ dprintk(KERN_INFO "%u cycles to access %x\n", nCycles,
+ (unsigned int) pAddress);
+}
+
+#endif /* CONFIG_BENCH_SECURE_CYCLE */
+
+/*
+ * Dump the L1 shared buffer.
+ */
+void SCXLNXDumpL1SharedBuffer(struct SCHANNEL_C1S_BUFFER *pBuf)
+{
+ dprintk(KERN_INFO
+ "buffer@%p:\n"
+ " nConfigFlags_S=%08X\n"
+ " sVersionDescription=%64s\n"
+ " nStatus_S=%08X\n"
+ " nSyncSerial_N=%08X\n"
+ " nSyncSerial_S=%08X\n"
+ " sTime_N[0]=%016llX\n"
+ " sTime_N[1]=%016llX\n"
+ " sTimeout_S[0]=%016llX\n"
+ " sTimeout_S[1]=%016llX\n"
+ " nFirstCommand=%08X\n"
+ " nFirstFreeCommand=%08X\n"
+ " nFirstAnswer=%08X\n"
+ " nFirstFreeAnswer=%08X\n\n",
+ pBuf,
+ pBuf->nConfigFlags_S,
+ pBuf->sVersionDescription,
+ pBuf->nStatus_S,
+ pBuf->nSyncSerial_N,
+ pBuf->nSyncSerial_S,
+ pBuf->sTime_N[0],
+ pBuf->sTime_N[1],
+ pBuf->sTimeout_S[0],
+ pBuf->sTimeout_S[1],
+ pBuf->nFirstCommand,
+ pBuf->nFirstFreeCommand,
+ pBuf->nFirstAnswer,
+ pBuf->nFirstFreeAnswer);
+}
+
+
+/*
+ * Dump the specified SChannel message using dprintk.
+ */
+void SCXLNXDumpMessage(union SCX_COMMAND_MESSAGE *pMessage)
+{
+ u32 i;
+
+ dprintk(KERN_INFO "message@%p:\n", pMessage);
+
+ switch (pMessage->sHeader.nMessageType) {
+ case SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT\n"
+ " nOperationID = 0x%08X\n"
+ " nDeviceContextID = 0x%08X\n",
+ pMessage->sHeader.nMessageSize,
+ pMessage->sHeader.nMessageType,
+ pMessage->sHeader.nOperationID,
+ pMessage->sCreateDeviceContextMessage.nDeviceContextID
+ );
+ break;
+
+ case SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT\n"
+ " nOperationID = 0x%08X\n"
+ " hDeviceContext = 0x%08X\n",
+ pMessage->sHeader.nMessageSize,
+ pMessage->sHeader.nMessageType,
+ pMessage->sHeader.nOperationID,
+ pMessage->sDestroyDeviceContextMessage.hDeviceContext);
+ break;
+
+ case SCX_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "SCX_MESSAGE_TYPE_OPEN_CLIENT_SESSION\n"
+ " nParamTypes = 0x%04X\n"
+ " nOperationID = 0x%08X\n"
+ " hDeviceContext = 0x%08X\n"
+ " nCancellationID = 0x%08X\n"
+ " sTimeout = 0x%016llX\n"
+ " sDestinationUUID = "
+ "%08X-%04X-%04X-%02X%02X-"
+ "%02X%02X%02X%02X%02X%02X\n",
+ pMessage->sHeader.nMessageSize,
+ pMessage->sHeader.nMessageType,
+ pMessage->sOpenClientSessionMessage.nParamTypes,
+ pMessage->sHeader.nOperationID,
+ pMessage->sOpenClientSessionMessage.hDeviceContext,
+ pMessage->sOpenClientSessionMessage.nCancellationID,
+ pMessage->sOpenClientSessionMessage.sTimeout,
+ pMessage->sOpenClientSessionMessage.sDestinationUUID.
+ time_low,
+ pMessage->sOpenClientSessionMessage.sDestinationUUID.
+ time_mid,
+ pMessage->sOpenClientSessionMessage.sDestinationUUID.
+ time_hi_and_version,
+ pMessage->sOpenClientSessionMessage.sDestinationUUID.
+ clock_seq_and_node[0],
+ pMessage->sOpenClientSessionMessage.sDestinationUUID.
+ clock_seq_and_node[1],
+ pMessage->sOpenClientSessionMessage.sDestinationUUID.
+ clock_seq_and_node[2],
+ pMessage->sOpenClientSessionMessage.sDestinationUUID.
+ clock_seq_and_node[3],
+ pMessage->sOpenClientSessionMessage.sDestinationUUID.
+ clock_seq_and_node[4],
+ pMessage->sOpenClientSessionMessage.sDestinationUUID.
+ clock_seq_and_node[5],
+ pMessage->sOpenClientSessionMessage.sDestinationUUID.
+ clock_seq_and_node[6],
+ pMessage->sOpenClientSessionMessage.sDestinationUUID.
+ clock_seq_and_node[7]
+ );
+
+ for (i = 0; i < 4; i++) {
+ uint32_t *pParam = (uint32_t *) &pMessage->
+ sOpenClientSessionMessage.sParams[i];
+ dprintk(KERN_INFO " sParams[%d] = "
+ "0x%08X:0x%08X:0x%08X\n",
+ i, pParam[0], pParam[1], pParam[2]);
+ }
+
+ switch (SCX_LOGIN_GET_MAIN_TYPE(
+ pMessage->sOpenClientSessionMessage.nLoginType)) {
+ case SCX_LOGIN_PUBLIC:
+ dprintk(
+ KERN_INFO " nLoginType = "
+ "SCX_LOGIN_PUBLIC\n");
+ break;
+ case SCX_LOGIN_USER:
+ dprintk(
+ KERN_INFO " nLoginType = "
+ "SCX_LOGIN_USER\n");
+ break;
+ case SCX_LOGIN_GROUP:
+ dprintk(
+ KERN_INFO " nLoginType = "
+ "SCX_LOGIN_GROUP\n");
+ break;
+ case SCX_LOGIN_APPLICATION:
+ dprintk(
+ KERN_INFO " nLoginType = "
+ "SCX_LOGIN_APPLICATION\n");
+ break;
+ case SCX_LOGIN_APPLICATION_USER:
+ dprintk(
+ KERN_INFO " nLoginType = "
+ "SCX_LOGIN_APPLICATION_USER\n");
+ break;
+ case SCX_LOGIN_APPLICATION_GROUP:
+ dprintk(
+ KERN_INFO " nLoginType = "
+ "SCX_LOGIN_APPLICATION_GROUP\n");
+ break;
+ case SCX_LOGIN_AUTHENTICATION:
+ dprintk(
+ KERN_INFO " nLoginType = "
+ "SCX_LOGIN_AUTHENTICATION\n");
+ break;
+ case SCX_LOGIN_PRIVILEGED:
+ dprintk(
+ KERN_INFO " nLoginType = "
+ "SCX_LOGIN_PRIVILEGED\n");
+ break;
+ default:
+ dprintk(
+ KERN_ERR " nLoginType = "
+ "0x%08X (Unknown login type)\n",
+ pMessage->sOpenClientSessionMessage.nLoginType);
+ break;
+ }
+
+ dprintk(
+ KERN_INFO " sLoginData = ");
+ for (i = 0; i < 20; i++)
+ dprintk(
+ KERN_INFO "%d",
+ pMessage->sOpenClientSessionMessage.
+ sLoginData[i]);
+ dprintk("\n");
+ break;
+
+ case SCX_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "SCX_MESSAGE_TYPE_CLOSE_CLIENT_SESSION\n"
+ " nOperationID = 0x%08X\n"
+ " hDeviceContext = 0x%08X\n"
+ " hClientSession = 0x%08X\n",
+ pMessage->sHeader.nMessageSize,
+ pMessage->sHeader.nMessageType,
+ pMessage->sHeader.nOperationID,
+ pMessage->sCloseClientSessionMessage.hDeviceContext,
+ pMessage->sCloseClientSessionMessage.hClientSession
+ );
+ break;
+
+ case SCX_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "SCX_MESSAGE_TYPE_REGISTER_SHARED_MEMORY\n"
+ " nMemoryFlags = 0x%04X\n"
+ " nOperationID = 0x%08X\n"
+ " hDeviceContext = 0x%08X\n"
+ " nBlockID = 0x%08X\n"
+ " nSharedMemSize = 0x%08X\n"
+ " nSharedMemStartOffset = 0x%08X\n"
+ " nSharedMemDescriptors[0] = 0x%08X\n"
+ " nSharedMemDescriptors[1] = 0x%08X\n"
+ " nSharedMemDescriptors[2] = 0x%08X\n"
+ " nSharedMemDescriptors[3] = 0x%08X\n"
+ " nSharedMemDescriptors[4] = 0x%08X\n"
+ " nSharedMemDescriptors[5] = 0x%08X\n"
+ " nSharedMemDescriptors[6] = 0x%08X\n"
+ " nSharedMemDescriptors[7] = 0x%08X\n",
+ pMessage->sHeader.nMessageSize,
+ pMessage->sHeader.nMessageType,
+ pMessage->sRegisterSharedMemoryMessage.nMemoryFlags,
+ pMessage->sHeader.nOperationID,
+ pMessage->sRegisterSharedMemoryMessage.hDeviceContext,
+ pMessage->sRegisterSharedMemoryMessage.nBlockID,
+ pMessage->sRegisterSharedMemoryMessage.nSharedMemSize,
+ pMessage->sRegisterSharedMemoryMessage.
+ nSharedMemStartOffset,
+ pMessage->sRegisterSharedMemoryMessage.
+ nSharedMemDescriptors[0],
+ pMessage->sRegisterSharedMemoryMessage.
+ nSharedMemDescriptors[1],
+ pMessage->sRegisterSharedMemoryMessage.
+ nSharedMemDescriptors[2],
+ pMessage->sRegisterSharedMemoryMessage.
+ nSharedMemDescriptors[3],
+ pMessage->sRegisterSharedMemoryMessage.
+ nSharedMemDescriptors[4],
+ pMessage->sRegisterSharedMemoryMessage.
+ nSharedMemDescriptors[5],
+ pMessage->sRegisterSharedMemoryMessage.
+ nSharedMemDescriptors[6],
+ pMessage->sRegisterSharedMemoryMessage.
+ nSharedMemDescriptors[7]);
+ break;
+
+ case SCX_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "SCX_MESSAGE_TYPE_RELEASE_SHARED_MEMORY\n"
+ " nOperationID = 0x%08X\n"
+ " hDeviceContext = 0x%08X\n"
+ " hBlock = 0x%08X\n",
+ pMessage->sHeader.nMessageSize,
+ pMessage->sHeader.nMessageType,
+ pMessage->sHeader.nOperationID,
+ pMessage->sReleaseSharedMemoryMessage.hDeviceContext,
+ pMessage->sReleaseSharedMemoryMessage.hBlock);
+ break;
+
+ case SCX_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "SCX_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND\n"
+ " nParamTypes = 0x%04X\n"
+ " nOperationID = 0x%08X\n"
+ " hDeviceContext = 0x%08X\n"
+ " hClientSession = 0x%08X\n"
+ " sTimeout = 0x%016llX\n"
+ " nCancellationID = 0x%08X\n"
+ " nClientCommandIdentifier = 0x%08X\n",
+ pMessage->sHeader.nMessageSize,
+ pMessage->sHeader.nMessageType,
+ pMessage->sInvokeClientCommandMessage.nParamTypes,
+ pMessage->sHeader.nOperationID,
+ pMessage->sInvokeClientCommandMessage.hDeviceContext,
+ pMessage->sInvokeClientCommandMessage.hClientSession,
+ pMessage->sInvokeClientCommandMessage.sTimeout,
+ pMessage->sInvokeClientCommandMessage.nCancellationID,
+ pMessage->sInvokeClientCommandMessage.
+ nClientCommandIdentifier
+ );
+
+ for (i = 0; i < 4; i++) {
+ uint32_t *pParam = (uint32_t *) &pMessage->
+ sOpenClientSessionMessage.sParams[i];
+ dprintk(KERN_INFO " sParams[%d] = "
+ "0x%08X:0x%08X:0x%08X\n", i,
+ pParam[0], pParam[1], pParam[2]);
+ }
+ break;
+
+ case SCX_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "SCX_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND\n"
+ " nOperationID = 0x%08X\n"
+ " hDeviceContext = 0x%08X\n"
+ " hClientSession = 0x%08X\n",
+ pMessage->sHeader.nMessageSize,
+ pMessage->sHeader.nMessageType,
+ pMessage->sHeader.nOperationID,
+ pMessage->sCancelClientOperationMessage.hDeviceContext,
+ pMessage->sCancelClientOperationMessage.hClientSession);
+ break;
+
+ case SCX_MESSAGE_TYPE_MANAGEMENT:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "SCX_MESSAGE_TYPE_MANAGEMENT\n"
+ " nOperationID = 0x%08X\n"
+ " nCommand = 0x%08X\n"
+ " nW3BSize = 0x%08X\n"
+ " nW3BStartOffset = 0x%08X\n",
+ pMessage->sHeader.nMessageSize,
+ pMessage->sHeader.nMessageType,
+ pMessage->sHeader.nOperationID,
+ pMessage->sManagementMessage.nCommand,
+ pMessage->sManagementMessage.nW3BSize,
+ pMessage->sManagementMessage.nW3BStartOffset);
+ break;
+
+ default:
+ dprintk(
+ KERN_ERR " nMessageType = 0x%08X "
+ "(Unknown message type)\n",
+ pMessage->sHeader.nMessageType);
+ break;
+ }
+}
+
+
+/*
+ * Dump the specified SChannel answer using dprintk.
+ */
+void SCXLNXDumpAnswer(union SCX_ANSWER_MESSAGE *pAnswer)
+{
+ u32 i;
+ dprintk(
+ KERN_INFO "answer@%p:\n",
+ pAnswer);
+
+ switch (pAnswer->sHeader.nMessageType) {
+ case SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "SCX_ANSWER_CREATE_DEVICE_CONTEXT\n"
+ " nOperationID = 0x%08X\n"
+ " nErrorCode = 0x%08X\n"
+ " hDeviceContext = 0x%08X\n",
+ pAnswer->sHeader.nMessageSize,
+ pAnswer->sHeader.nMessageType,
+ pAnswer->sHeader.nOperationID,
+ pAnswer->sCreateDeviceContextAnswer.nErrorCode,
+ pAnswer->sCreateDeviceContextAnswer.hDeviceContext);
+ break;
+
+ case SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "ANSWER_DESTROY_DEVICE_CONTEXT\n"
+ " nOperationID = 0x%08X\n"
+ " nErrorCode = 0x%08X\n"
+ " nDeviceContextID = 0x%08X\n",
+ pAnswer->sHeader.nMessageSize,
+ pAnswer->sHeader.nMessageType,
+ pAnswer->sHeader.nOperationID,
+ pAnswer->sDestroyDeviceContextAnswer.nErrorCode,
+ pAnswer->sDestroyDeviceContextAnswer.nDeviceContextID);
+ break;
+
+
+ case SCX_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "SCX_ANSWER_OPEN_CLIENT_SESSION\n"
+ " nReturnOrigin = 0x%02X\n"
+ " nOperationID = 0x%08X\n"
+ " nErrorCode = 0x%08X\n"
+ " hClientSession = 0x%08X\n",
+ pAnswer->sHeader.nMessageSize,
+ pAnswer->sHeader.nMessageType,
+ pAnswer->sOpenClientSessionAnswer.nReturnOrigin,
+ pAnswer->sHeader.nOperationID,
+ pAnswer->sOpenClientSessionAnswer.nErrorCode,
+ pAnswer->sOpenClientSessionAnswer.hClientSession);
+ for (i = 0; i < 4; i++) {
+ dprintk(KERN_INFO " sAnswers[%d]=0x%08X:0x%08X\n",
+ i,
+ pAnswer->sOpenClientSessionAnswer.sAnswers[i].
+ sValue.a,
+ pAnswer->sOpenClientSessionAnswer.sAnswers[i].
+ sValue.b);
+ }
+ break;
+
+ case SCX_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "ANSWER_CLOSE_CLIENT_SESSION\n"
+ " nOperationID = 0x%08X\n"
+ " nErrorCode = 0x%08X\n",
+ pAnswer->sHeader.nMessageSize,
+ pAnswer->sHeader.nMessageType,
+ pAnswer->sHeader.nOperationID,
+ pAnswer->sCloseClientSessionAnswer.nErrorCode);
+ break;
+
+ case SCX_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "SCX_ANSWER_REGISTER_SHARED_MEMORY\n"
+ " nOperationID = 0x%08X\n"
+ " nErrorCode = 0x%08X\n"
+ " hBlock = 0x%08X\n",
+ pAnswer->sHeader.nMessageSize,
+ pAnswer->sHeader.nMessageType,
+ pAnswer->sHeader.nOperationID,
+ pAnswer->sRegisterSharedMemoryAnswer.nErrorCode,
+ pAnswer->sRegisterSharedMemoryAnswer.hBlock);
+ break;
+
+ case SCX_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "ANSWER_RELEASE_SHARED_MEMORY\n"
+ " nOperationID = 0x%08X\n"
+ " nErrorCode = 0x%08X\n"
+ " nBlockID = 0x%08X\n",
+ pAnswer->sHeader.nMessageSize,
+ pAnswer->sHeader.nMessageType,
+ pAnswer->sHeader.nOperationID,
+ pAnswer->sReleaseSharedMemoryAnswer.nErrorCode,
+ pAnswer->sReleaseSharedMemoryAnswer.nBlockID);
+ break;
+
+ case SCX_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "SCX_ANSWER_INVOKE_CLIENT_COMMAND\n"
+ " nReturnOrigin = 0x%02X\n"
+ " nOperationID = 0x%08X\n"
+ " nErrorCode = 0x%08X\n",
+ pAnswer->sHeader.nMessageSize,
+ pAnswer->sHeader.nMessageType,
+ pAnswer->sInvokeClientCommandAnswer.nReturnOrigin,
+ pAnswer->sHeader.nOperationID,
+ pAnswer->sInvokeClientCommandAnswer.nErrorCode
+ );
+ for (i = 0; i < 4; i++) {
+ dprintk(KERN_INFO " sAnswers[%d]=0x%08X:0x%08X\n",
+ i,
+ pAnswer->sInvokeClientCommandAnswer.sAnswers[i].
+ sValue.a,
+ pAnswer->sInvokeClientCommandAnswer.sAnswers[i].
+ sValue.b);
+ }
+ break;
+
+ case SCX_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "SCX_ANSWER_CANCEL_CLIENT_COMMAND\n"
+ " nOperationID = 0x%08X\n"
+ " nErrorCode = 0x%08X\n",
+ pAnswer->sHeader.nMessageSize,
+ pAnswer->sHeader.nMessageType,
+ pAnswer->sHeader.nOperationID,
+ pAnswer->sCancelClientOperationAnswer.nErrorCode);
+ break;
+
+ case SCX_MESSAGE_TYPE_MANAGEMENT:
+ dprintk(KERN_INFO
+ " nMessageSize = 0x%02X\n"
+ " nMessageType = 0x%02X "
+ "SCX_MESSAGE_TYPE_MANAGEMENT\n"
+ " nOperationID = 0x%08X\n"
+ " nErrorCode = 0x%08X\n",
+ pAnswer->sHeader.nMessageSize,
+ pAnswer->sHeader.nMessageType,
+ pAnswer->sHeader.nOperationID,
+ pAnswer->sHeader.nErrorCode);
+ break;
+
+ default:
+ dprintk(
+ KERN_ERR " nMessageType = 0x%02X "
+ "(Unknown message type)\n",
+ pAnswer->sHeader.nMessageType);
+ break;
+
+ }
+}
+
+#endif /* defined(TF_DRIVER_DEBUG_SUPPORT) */
+
+/*----------------------------------------------------------------------------
+ * SHA-1 implementation
+ * This is taken from the Linux kernel source crypto/sha1.c
+ *----------------------------------------------------------------------------*/
+
+struct sha1_ctx {
+ u64 count;
+ u32 state[5];
+ u8 buffer[64];
+};
+
+static inline u32 rol(u32 value, u32 bits)
+{
+ return ((value) << (bits)) | ((value) >> (32 - (bits)));
+}
+
+/* blk0() and blk() perform the initial expand. */
+/* I got the idea of expanding during the round function from SSLeay */
+#define blk0(i) block32[i]
+
+#define blk(i) (block32[i & 15] = rol( \
+ block32[(i + 13) & 15] ^ block32[(i + 8) & 15] ^ \
+ block32[(i + 2) & 15] ^ block32[i & 15], 1))
+
+/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
+#define R0(v, w, x, y, z, i) do { \
+ z += ((w & (x ^ y)) ^ y) + blk0(i) + 0x5A827999 + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+#define R1(v, w, x, y, z, i) do { \
+ z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+#define R2(v, w, x, y, z, i) do { \
+ z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+#define R3(v, w, x, y, z, i) do { \
+ z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+#define R4(v, w, x, y, z, i) do { \
+ z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+
+/* Hash a single 512-bit block. This is the core of the algorithm. */
+static void sha1_transform(u32 *state, const u8 *in)
+{
+ u32 a, b, c, d, e;
+ u32 block32[16];
+
+ /* convert/copy data to workspace */
+ for (a = 0; a < sizeof(block32)/sizeof(u32); a++)
+ block32[a] = ((u32) in[4 * a]) << 24 |
+ ((u32) in[4 * a + 1]) << 16 |
+ ((u32) in[4 * a + 2]) << 8 |
+ ((u32) in[4 * a + 3]);
+
+ /* Copy context->state[] to working vars */
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+
+ /* 4 rounds of 20 operations each. Loop unrolled. */
+ R0(a, b, c, d, e, 0); R0(e, a, b, c, d, 1);
+ R0(d, e, a, b, c, 2); R0(c, d, e, a, b, 3);
+ R0(b, c, d, e, a, 4); R0(a, b, c, d, e, 5);
+ R0(e, a, b, c, d, 6); R0(d, e, a, b, c, 7);
+ R0(c, d, e, a, b, 8); R0(b, c, d, e, a, 9);
+ R0(a, b, c, d, e, 10); R0(e, a, b, c, d, 11);
+ R0(d, e, a, b, c, 12); R0(c, d, e, a, b, 13);
+ R0(b, c, d, e, a, 14); R0(a, b, c, d, e, 15);
+
+ R1(e, a, b, c, d, 16); R1(d, e, a, b, c, 17);
+ R1(c, d, e, a, b, 18); R1(b, c, d, e, a, 19);
+
+ R2(a, b, c, d, e, 20); R2(e, a, b, c, d, 21);
+ R2(d, e, a, b, c, 22); R2(c, d, e, a, b, 23);
+ R2(b, c, d, e, a, 24); R2(a, b, c, d, e, 25);
+ R2(e, a, b, c, d, 26); R2(d, e, a, b, c, 27);
+ R2(c, d, e, a, b, 28); R2(b, c, d, e, a, 29);
+ R2(a, b, c, d, e, 30); R2(e, a, b, c, d, 31);
+ R2(d, e, a, b, c, 32); R2(c, d, e, a, b, 33);
+ R2(b, c, d, e, a, 34); R2(a, b, c, d, e, 35);
+ R2(e, a, b, c, d, 36); R2(d, e, a, b, c, 37);
+ R2(c, d, e, a, b, 38); R2(b, c, d, e, a, 39);
+
+ R3(a, b, c, d, e, 40); R3(e, a, b, c, d, 41);
+ R3(d, e, a, b, c, 42); R3(c, d, e, a, b, 43);
+ R3(b, c, d, e, a, 44); R3(a, b, c, d, e, 45);
+ R3(e, a, b, c, d, 46); R3(d, e, a, b, c, 47);
+ R3(c, d, e, a, b, 48); R3(b, c, d, e, a, 49);
+ R3(a, b, c, d, e, 50); R3(e, a, b, c, d, 51);
+ R3(d, e, a, b, c, 52); R3(c, d, e, a, b, 53);
+ R3(b, c, d, e, a, 54); R3(a, b, c, d, e, 55);
+ R3(e, a, b, c, d, 56); R3(d, e, a, b, c, 57);
+ R3(c, d, e, a, b, 58); R3(b, c, d, e, a, 59);
+
+ R4(a, b, c, d, e, 60); R4(e, a, b, c, d, 61);
+ R4(d, e, a, b, c, 62); R4(c, d, e, a, b, 63);
+ R4(b, c, d, e, a, 64); R4(a, b, c, d, e, 65);
+ R4(e, a, b, c, d, 66); R4(d, e, a, b, c, 67);
+ R4(c, d, e, a, b, 68); R4(b, c, d, e, a, 69);
+ R4(a, b, c, d, e, 70); R4(e, a, b, c, d, 71);
+ R4(d, e, a, b, c, 72); R4(c, d, e, a, b, 73);
+ R4(b, c, d, e, a, 74); R4(a, b, c, d, e, 75);
+ R4(e, a, b, c, d, 76); R4(d, e, a, b, c, 77);
+ R4(c, d, e, a, b, 78); R4(b, c, d, e, a, 79);
+
+ /* Add the working vars back into context.state[] */
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ /* Wipe variables */
+ a = b = c = d = e = 0;
+ memset(block32, 0x00, sizeof(block32));
+}
+
+
+static void sha1_init(void *ctx)
+{
+ struct sha1_ctx *sctx = ctx;
+ static const struct sha1_ctx initstate = {
+ 0,
+ { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 },
+ { 0, }
+ };
+
+ *sctx = initstate;
+}
+
+
+static void sha1_update(void *ctx, const u8 *data, unsigned int len)
+{
+ struct sha1_ctx *sctx = ctx;
+ unsigned int i, j;
+
+ j = (sctx->count >> 3) & 0x3f;
+ sctx->count += len << 3;
+
+ if ((j + len) > 63) {
+ memcpy(&sctx->buffer[j], data, (i = 64 - j));
+ sha1_transform(sctx->state, sctx->buffer);
+ for ( ; i + 63 < len; i += 64)
+ sha1_transform(sctx->state, &data[i]);
+ j = 0;
+ } else
+ i = 0;
+ memcpy(&sctx->buffer[j], &data[i], len - i);
+}
+
+
+/* Add padding and return the message digest. */
+static void sha1_final(void *ctx, u8 *out)
+{
+ struct sha1_ctx *sctx = ctx;
+ u32 i, j, index, padlen;
+ u64 t;
+ u8 bits[8] = { 0, };
+ static const u8 padding[64] = { 0x80, };
+
+ t = sctx->count;
+ bits[7] = 0xff & t; t >>= 8;
+ bits[6] = 0xff & t; t >>= 8;
+ bits[5] = 0xff & t; t >>= 8;
+ bits[4] = 0xff & t; t >>= 8;
+ bits[3] = 0xff & t; t >>= 8;
+ bits[2] = 0xff & t; t >>= 8;
+ bits[1] = 0xff & t; t >>= 8;
+ bits[0] = 0xff & t;
+
+ /* Pad out to 56 mod 64 */
+ index = (sctx->count >> 3) & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64+56) - index);
+ sha1_update(sctx, padding, padlen);
+
+ /* Append length */
+ sha1_update(sctx, bits, sizeof(bits));
+
+ /* Store state in digest */
+ for (i = j = 0; i < 5; i++, j += 4) {
+ u32 t2 = sctx->state[i];
+ out[j+3] = t2 & 0xff; t2 >>= 8;
+ out[j+2] = t2 & 0xff; t2 >>= 8;
+ out[j+1] = t2 & 0xff; t2 >>= 8;
+ out[j] = t2 & 0xff;
+ }
+
+ /* Wipe context */
+ memset(sctx, 0, sizeof(*sctx));
+}
+
+
+
+
+/*----------------------------------------------------------------------------
+ * Process identification
+ *----------------------------------------------------------------------------*/
+
+/* This function generates a processes hash table for authentication */
+int SCXLNXConnGetCurrentProcessHash(void *pHash)
+{
+ int nResult = 0;
+ void *buffer;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+
+ buffer = internal_kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (buffer == NULL) {
+ dprintk(
+ KERN_ERR "SCXLNXConnGetCurrentProcessHash:"
+ KERN_ERR " Out of memory for buffer!\n");
+ return -ENOMEM;
+ }
+
+ mm = current->mm;
+
+ down_read(&(mm->mmap_sem));
+ for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
+ if ((vma->vm_flags & VM_EXECUTABLE) != 0 && vma->vm_file
+ != NULL) {
+ struct dentry *dentry;
+ unsigned long start;
+ unsigned long cur;
+ unsigned long end;
+ struct sha1_ctx sha1Context;
+
+ dentry = dget(vma->vm_file->f_dentry);
+
+ dprintk(
+ KERN_DEBUG "SCXLNXConnGetCurrentProcessHash: "
+ "Found executable VMA for inode %lu "
+ "(%lu bytes).\n",
+ dentry->d_inode->i_ino,
+ (unsigned long) (dentry->d_inode->
+ i_size));
+
+ start = do_mmap(vma->vm_file, 0,
+ dentry->d_inode->i_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_PRIVATE, 0);
+ if (start < 0) {
+ dprintk(
+ KERN_ERR "SCXLNXConnGetCurrentProcess"
+ "Hash: do_mmap failed (error %d)!\n",
+ (int) start);
+ dput(dentry);
+ nResult = -EFAULT;
+ goto vma_out;
+ }
+
+ end = start + dentry->d_inode->i_size;
+
+ sha1_init(&sha1Context);
+ cur = start;
+ while (cur < end) {
+ unsigned long chunk;
+
+ chunk = end - cur;
+ if (chunk > PAGE_SIZE)
+ chunk = PAGE_SIZE;
+ if (copy_from_user(buffer, (const void *) cur,
+ chunk) != 0) {
+ dprintk(
+ KERN_ERR "SCXLNXConnGetCurrent"
+ "ProcessHash: copy_from_user "
+ "failed!\n");
+ nResult = -EINVAL;
+ (void) do_munmap(mm, start,
+ dentry->d_inode->i_size);
+ dput(dentry);
+ goto vma_out;
+ }
+ sha1_update(&sha1Context, buffer, chunk);
+ cur += chunk;
+ }
+ sha1_final(&sha1Context, pHash);
+ nResult = 0;
+
+ (void) do_munmap(mm, start, dentry->d_inode->i_size);
+ dput(dentry);
+ break;
+ }
+ }
+vma_out:
+ up_read(&(mm->mmap_sem));
+
+ internal_kfree(buffer);
+
+ if (nResult == -ENOENT)
+ dprintk(
+ KERN_ERR "SCXLNXConnGetCurrentProcessHash: "
+ "No executable VMA found for process!\n");
+ return nResult;
+}
+
+
+/* This function hashes the path of the current application.
+ * If pData = NULL ,nothing else is added to the hash
+ else add pData to the hash
+ */
+int SCXLNXConnHashApplicationPathAndData(char *pBuffer, void *pData,
+ u32 nDataLen)
+{
+ int nResult = -ENOENT;
+ char *buffer = NULL;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+
+ buffer = internal_kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (buffer == NULL) {
+ nResult = -ENOMEM;
+ goto end;
+ }
+
+ mm = current->mm;
+
+ down_read(&(mm->mmap_sem));
+ for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
+ if ((vma->vm_flags & VM_EXECUTABLE) != 0
+ && vma->vm_file != NULL) {
+ struct path *path;
+ char *endpath;
+ size_t pathlen;
+ struct sha1_ctx sha1Context;
+ u8 pHashData[SHA1_DIGEST_SIZE];
+
+ path = &vma->vm_file->f_path;
+
+ endpath = d_path(path, buffer, PAGE_SIZE);
+ if (IS_ERR(path)) {
+ nResult = PTR_ERR(endpath);
+ up_read(&(mm->mmap_sem));
+ goto end;
+ }
+ pathlen = (buffer + PAGE_SIZE) - endpath;
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+ {
+ char *pChar;
+ dprintk(KERN_DEBUG "current process path = ");
+ for (pChar = endpath;
+ pChar < buffer + PAGE_SIZE;
+ pChar++)
+ dprintk("%c", *pChar);
+
+ dprintk(", uid=%d, euid=%d\n", current_uid(),
+ current_euid());
+ }
+#endif /*defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+ sha1_init(&sha1Context);
+ sha1_update(&sha1Context, endpath, pathlen);
+ if (pData != NULL) {
+ dprintk(KERN_INFO "SCXLNXConnHashApplication"
+ "PathAndData: Hashing additional"
+ "data\n");
+ sha1_update(&sha1Context, pData, nDataLen);
+ }
+ sha1_final(&sha1Context, pHashData);
+ memcpy(pBuffer, pHashData, sizeof(pHashData));
+
+ nResult = 0;
+
+ break;
+ }
+ }
+ up_read(&(mm->mmap_sem));
+
+ end:
+ if (buffer != NULL)
+ internal_kfree(buffer);
+
+ return nResult;
+}
+
+void *internal_kmalloc(size_t nSize, int nPriority)
+{
+ void *pResult;
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ pResult = kmalloc(nSize, nPriority);
+
+ if (pResult != NULL)
+ atomic_inc(
+ &pDevice->sDeviceStats.stat_memories_allocated);
+
+ return pResult;
+}
+
+void internal_kfree(void *pMemory)
+{
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ if (pMemory != NULL)
+ atomic_dec(
+ &pDevice->sDeviceStats.stat_memories_allocated);
+ return kfree(pMemory);
+}
+
+void internal_vunmap(void *pMemory)
+{
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ if (pMemory != NULL)
+ atomic_dec(
+ &pDevice->sDeviceStats.stat_memories_allocated);
+
+ vunmap((void *) (((unsigned int)pMemory) & 0xFFFFF000));
+}
+
+void *internal_vmalloc(size_t nSize)
+{
+ void *pResult;
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ pResult = vmalloc(nSize);
+
+ if (pResult != NULL)
+ atomic_inc(
+ &pDevice->sDeviceStats.stat_memories_allocated);
+
+ return pResult;
+}
+
+void internal_vfree(void *pMemory)
+{
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ if (pMemory != NULL)
+ atomic_dec(
+ &pDevice->sDeviceStats.stat_memories_allocated);
+ return vfree(pMemory);
+}
+
+unsigned long internal_get_zeroed_page(int nPriority)
+{
+ unsigned long nResult;
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ nResult = get_zeroed_page(nPriority);
+
+ if (nResult != 0)
+ atomic_inc(&pDevice->sDeviceStats.
+ stat_pages_allocated);
+
+ return nResult;
+}
+
+void internal_free_page(unsigned long pPage)
+{
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ if (pPage != 0)
+ atomic_dec(
+ &pDevice->sDeviceStats.stat_pages_allocated);
+ return free_page(pPage);
+}
+
+int internal_get_user_pages(
+ struct task_struct *tsk,
+ struct mm_struct *mm,
+ unsigned long start,
+ int len,
+ int write,
+ int force,
+ struct page **pages,
+ struct vm_area_struct **vmas)
+{
+ int nResult;
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ nResult = get_user_pages(
+ tsk,
+ mm,
+ start,
+ len,
+ write,
+ force,
+ pages,
+ vmas);
+
+ if (nResult > 0)
+ atomic_add(nResult,
+ &pDevice->sDeviceStats.stat_pages_locked);
+
+ return nResult;
+}
+
+void internal_get_page(struct page *page)
+{
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ atomic_inc(&pDevice->sDeviceStats.stat_pages_locked);
+
+ get_page(page);
+}
+
+void internal_page_cache_release(struct page *page)
+{
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ atomic_dec(&pDevice->sDeviceStats.stat_pages_locked);
+
+ page_cache_release(page);
+}
+
+
diff --git a/security/tf_driver/scxlnx_util.h b/security/tf_driver/scxlnx_util.h
new file mode 100644
index 000000000000..daff3a7d4b95
--- /dev/null
+++ b/security/tf_driver/scxlnx_util.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+#ifndef __SCXLNX_UTIL_H__
+#define __SCXLNX_UTIL_H__
+
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/crypto.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <asm/byteorder.h>
+
+#include "scx_protocol.h"
+#include "scxlnx_defs.h"
+
+/*----------------------------------------------------------------------------
+ * Debug printing routines
+ *----------------------------------------------------------------------------*/
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+
+void addressCacheProperty(unsigned long va);
+
+#define dprintk printk
+
+void SCXLNXDumpL1SharedBuffer(struct SCHANNEL_C1S_BUFFER *pBuf);
+
+void SCXLNXDumpMessage(union SCX_COMMAND_MESSAGE *pMessage);
+
+void SCXLNXDumpAnswer(union SCX_ANSWER_MESSAGE *pAnswer);
+
+#ifdef CONFIG_BENCH_SECURE_CYCLE
+void setupCounters(void);
+void runBogoMIPS(void);
+int runCodeSpeed(unsigned int nLoop);
+int runDataSpeed(unsigned int nLoop, unsigned long nVA);
+#endif /* CONFIG_BENCH_SECURE_CYCLE */
+
+#else /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+#define dprintk(args...) do { ; } while (0)
+#define SCXLNXDumpL1SharedBuffer(pBuf) ((void) 0)
+#define SCXLNXDumpMessage(pMessage) ((void) 0)
+#define SCXLNXDumpAnswer(pAnswer) ((void) 0)
+
+#endif /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+#define SHA1_DIGEST_SIZE 20
+
+/*----------------------------------------------------------------------------
+ * Process identification
+ *----------------------------------------------------------------------------*/
+
+int SCXLNXConnGetCurrentProcessHash(void *pHash);
+
+int SCXLNXConnHashApplicationPathAndData(char *pBuffer, void *pData,
+ u32 nDataLen);
+
+/*----------------------------------------------------------------------------
+ * Statistic computation
+ *----------------------------------------------------------------------------*/
+
+void *internal_kmalloc(size_t nSize, int nPriority);
+void internal_kfree(void *pMemory);
+void internal_vunmap(void *pMemory);
+void *internal_vmalloc(size_t nSize);
+void internal_vfree(void *pMemory);
+unsigned long internal_get_zeroed_page(int nPriority);
+void internal_free_page(unsigned long pPage);
+int internal_get_user_pages(
+ struct task_struct *tsk,
+ struct mm_struct *mm,
+ unsigned long start,
+ int len,
+ int write,
+ int force,
+ struct page **pages,
+ struct vm_area_struct **vmas);
+void internal_get_page(struct page *page);
+void internal_page_cache_release(struct page *page);
+#endif /* __SCXLNX_UTIL_H__ */
+