summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/cell/spufs/run.c
diff options
context:
space:
mode:
authorBob Nelson <rrnelson@linux.vnet.ibm.com>2007-07-20 21:39:52 +0200
committerArnd Bergmann <arnd@klappe.arndb.de>2007-07-20 21:42:20 +0200
commit36aaccc1e96481e8310b1d13600096da0f24ff43 (patch)
tree2726a068ae0d4529ccde030136255abbd1b1e069 /arch/powerpc/platforms/cell/spufs/run.c
parent2414059420311e5384de646eebfd529c184afd3c (diff)
[CELL] oprofile: enable SPU switch notification to detect currently active SPU tasks
From: Maynard Johnson <mpjohn@us.ibm.com> This patch adds to the capability of spu_switch_event_register so that the caller is also notified of currently active SPU tasks. Exports spu_switch_event_register and spu_switch_event_unregister so that OProfile can get access to the notifications provided. Signed-off-by: Maynard Johnson <mpjohn@us.ibm.com> Signed-off-by: Carl Love <carll@us.ibm.com> Signed-off-by: Bob Nelson <rrnelson@us.ibm.com> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com> Acked-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/run.c')
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index c0238ea5b55a..0b50fa5cb39d 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -18,15 +18,17 @@ void spufs_stop_callback(struct spu *spu)
wake_up_all(&ctx->stop_wq);
}
-static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
+static inline int spu_stopped(struct spu_context *ctx, u32 *stat)
{
struct spu *spu;
u64 pte_fault;
*stat = ctx->ops->status_read(ctx);
- if (ctx->state != SPU_STATE_RUNNABLE)
- return 1;
+
spu = ctx->spu;
+ if (ctx->state != SPU_STATE_RUNNABLE ||
+ test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
+ return 1;
pte_fault = spu->dsisr &
(MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ?
@@ -124,7 +126,7 @@ out:
return ret;
}
-static int spu_run_init(struct spu_context *ctx, u32 * npc)
+static int spu_run_init(struct spu_context *ctx, u32 *npc)
{
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
@@ -158,8 +160,8 @@ static int spu_run_init(struct spu_context *ctx, u32 * npc)
return 0;
}
-static int spu_run_fini(struct spu_context *ctx, u32 * npc,
- u32 * status)
+static int spu_run_fini(struct spu_context *ctx, u32 *npc,
+ u32 *status)
{
int ret = 0;
@@ -298,6 +300,7 @@ static inline int spu_process_events(struct spu_context *ctx)
long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
{
int ret;
+ struct spu *spu;
u32 status;
if (mutex_lock_interruptible(&ctx->run_mutex))
@@ -333,6 +336,14 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
if (unlikely(ret))
break;
+ spu = ctx->spu;
+ if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
+ &ctx->sched_flags))) {
+ if (!(status & SPU_STATUS_STOPPED_BY_STOP)) {
+ spu_switch_notify(spu, ctx);
+ continue;
+ }
+ }
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);