summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/cell/spufs/run.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2007-02-13 21:54:29 +0100
committerArnd Bergmann <arnd@klappe.arndb.de>2007-02-13 21:55:43 +0100
commit2eb1b12049844a8ebc670e0e4fc908bc3f8933d3 (patch)
treeb02521b88557dddabcb9de649855151323881cc0 /arch/powerpc/platforms/cell/spufs/run.c
parent72cb360839f88c02ccf38f1df214316e05886ff3 (diff)
[POWERPC] spu sched: static timeslicing for SCHED_RR contexts
For SCHED_RR tasks we can do some really trivial timeslicing. Basically we fire up a time for every scheduler tick that searches for a higher or same priority thread that is on the runqueue and if there is one context switches to it. Because we can't lock spus from timer context we actually run this from a delayed runqueue instead of a timer. A nice optimization would be to skip the actual priority bitmap search when there are less contexts than physical spus available. To implement this I need a so far unpublished patch from Andre, and it will be added after we have that patch in. Note that right now we only do the time slicing for SCHED_RR tasks. The code would work for SCHED_OTHER tasks aswell, but their prio value is defered from the one the PPU thread has at time of spu_run, and using this for spu scheduling decisions would make the code very unfair. SCHED_OTHER support will be enabled once we the spu scheduler knows how to calculcate cpu_context.prio (very soon) Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/run.c')
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index a973e79e9fdc..353a8fa07ab8 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -164,8 +164,10 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
if (runcntl == 0)
runcntl = SPU_RUNCNTL_RUNNABLE;
- } else
+ } else {
+ spu_start_tick(ctx);
ctx->ops->npc_write(ctx, *npc);
+ }
ctx->ops->runcntl_write(ctx, runcntl);
return ret;
@@ -176,6 +178,7 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
{
int ret = 0;
+ spu_stop_tick(ctx);
*status = ctx->ops->status_read(ctx);
*npc = ctx->ops->npc_read(ctx);
spu_release(ctx);
@@ -329,8 +332,10 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
}
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
ret = spu_reacquire_runnable(ctx, npc, &status);
- if (ret)
+ if (ret) {
+ spu_stop_tick(ctx);
goto out2;
+ }
continue;
}
ret = spu_process_events(ctx);