summaryrefslogtreecommitdiff
path: root/security
diff options
context:
space:
mode:
authorChris Johnson <cwj@nvidia.com>2014-03-24 16:54:40 -0700
committerSimone Willett <swillett@nvidia.com>2014-05-13 14:34:00 -0700
commitb42a6d5d4475c0ef5339d7583abb7347ea6276e8 (patch)
tree75c2d0828f468306ef8093db5be63b87a2c0bf3e /security
parent17fa41423e194410d5270706733e77344aff3a42 (diff)
security: tlk_driver: ensure VPR SMC occurs on CPU0
The te_set_vpr_params routine is called both by normal user mode threads and worker threads as part of a free/shrink of VPR. The calls from worker threads, will fail in sched_setaffinity() because they have PF_NO_SETAFFINITY set, so previously no switch occurred and the SMC issued on something other than CPU0. This change detects the worker thread case, and if needed, issues a work_on_cpu() instead for the VPR programming. Bug 1473456 Change-Id: I76fa71c577e243b073cabb93928e688fdf7833ec Signed-off-by: Chris Johnson <cwj@nvidia.com> Reviewed-on: http://git-master/r/385924 (cherry picked from commit 30fed4ced5517c8c78b34d3a8db734263cf5d798) Reviewed-on: http://git-master/r/406698 Reviewed-by: Arto Merilainen <amerilainen@nvidia.com> Tested-by: Arto Merilainen <amerilainen@nvidia.com> Reviewed-by: Thomas Cherry <tcherry@nvidia.com>
Diffstat (limited to 'security')
-rw-r--r--security/tlk_driver/ote_comms.c50
1 files changed, 48 insertions, 2 deletions
diff --git a/security/tlk_driver/ote_comms.c b/security/tlk_driver/ote_comms.c
index 64045ac0154e..2facb386c7a7 100644
--- a/security/tlk_driver/ote_comms.c
+++ b/security/tlk_driver/ote_comms.c
@@ -26,6 +26,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
+#include <asm/smp_plat.h>
#include "ote_protocol.h"
@@ -327,8 +328,37 @@ static void do_smc_compat(struct te_request_compat *request,
tlk_generic_smc(request->type, smc_args, smc_params);
}
+struct tlk_smc_work_args {
+ uint32_t arg0;
+ uint32_t arg1;
+ uint32_t arg2;
+};
+
+static long tlk_generic_smc_on_cpu0(void *args)
+{
+ struct tlk_smc_work_args *work;
+ int cpu = cpu_logical_map(smp_processor_id());
+ uint32_t retval;
+
+ BUG_ON(cpu != 0);
+
+ work = (struct tlk_smc_work_args *)args;
+ retval = _tlk_generic_smc(work->arg0, work->arg1, work->arg2);
+ while (retval == 0xFFFFFFFD)
+ retval = _tlk_generic_smc((60 << 24), 0, 0);
+ return retval;
+}
+
/*
* VPR programming SMC
+ *
+ * This routine is called both from normal threads and worker threads.
+ * The worker threads are per-cpu and have PF_NO_SETAFFINITY set, so
+ * any calls to sched_setaffinity will fail.
+ *
+ * If it's a worker thread on CPU0, just invoke the SMC directly. If
+ * it's running on a non-CPU0, use work_on_cpu() to schedule the SMC
+ * on CPU0.
*/
int te_set_vpr_params(void *vpr_base, size_t vpr_size)
{
@@ -337,8 +367,24 @@ int te_set_vpr_params(void *vpr_base, size_t vpr_size)
/* Share the same lock used when request is send from user side */
mutex_lock(&smc_lock);
- retval = tlk_generic_smc(TE_SMC_PROGRAM_VPR, (uintptr_t)vpr_base,
- vpr_size);
+ if (current->flags & PF_WQ_WORKER) {
+ struct tlk_smc_work_args work_args;
+ int cpu = cpu_logical_map(smp_processor_id());
+
+ work_args.arg0 = TE_SMC_PROGRAM_VPR;
+ work_args.arg1 = (uint32_t)vpr_base;
+ work_args.arg2 = vpr_size;
+
+ /* depending on the CPU, execute directly or sched work */
+ if (cpu == 0)
+ retval = tlk_generic_smc_on_cpu0(&work_args);
+ else
+ retval = work_on_cpu(0,
+ tlk_generic_smc_on_cpu0, &work_args);
+ } else {
+ retval = tlk_generic_smc(TE_SMC_PROGRAM_VPR,
+ (uintptr_t)vpr_base, vpr_size);
+ }
mutex_unlock(&smc_lock);