summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/cell/spufs
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs')
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c9
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c54
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c51
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c233
-rw-r--r--arch/powerpc/platforms/cell/spufs/gang.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c144
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c91
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c740
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_restore.c8
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped480
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_save.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h108
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c90
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c34
15 files changed, 1534 insertions, 518 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index d32db9ffc6eb..ec01214e51ee 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -162,7 +162,8 @@ static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
BUG_ON(avail != (4 - slot));
ctx->csa.spu_mailbox_data[slot] = data;
ctx->csa.spu_chnlcnt_RW[29] = ++slot;
- ctx->csa.prob.mb_stat_R = (((4 - slot) & 0xff) << 8);
+ ctx->csa.prob.mb_stat_R &= ~(0x00ff00);
+ ctx->csa.prob.mb_stat_R |= (((4 - slot) & 0xff) << 8);
gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
ret = 4;
} else {
@@ -320,6 +321,12 @@ static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
/* FIXME: what are the side-effects of this? */
prob->dma_querymask_RW = mask;
prob->dma_querytype_RW = mode;
+ /* In the current implementation, the SPU context is always
+ * acquired in runnable state when new bits are added to the
+ * mask (tagwait), so it's sufficient just to mask
+ * dma_tagstatus_R with the 'mask' parameter here.
+ */
+ ctx->csa.prob.dma_tagstatus_R &= mask;
out:
spin_unlock(&ctx->csa.register_lock);
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 7c51cb54bca1..9cb081c26e71 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -22,11 +22,16 @@
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/slab.h>
+#include <asm/atomic.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
#include "spufs.h"
+
+atomic_t nr_spu_contexts = ATOMIC_INIT(0);
+
struct spu_context *alloc_spu_context(struct spu_gang *gang)
{
struct spu_context *ctx;
@@ -51,12 +56,15 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
ctx->ops = &spu_backing_ops;
ctx->owner = get_task_mm(current);
INIT_LIST_HEAD(&ctx->rq);
+ INIT_LIST_HEAD(&ctx->aff_list);
if (gang)
spu_gang_add_ctx(gang, ctx);
- ctx->rt_priority = current->rt_priority;
- ctx->policy = current->policy;
- ctx->prio = current->prio;
- INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick);
+
+ __spu_update_sched_info(ctx);
+ spu_set_timeslice(ctx);
+ ctx->stats.util_state = SPU_UTIL_IDLE_LOADED;
+
+ atomic_inc(&nr_spu_contexts);
goto out;
out_free:
kfree(ctx);
@@ -75,7 +83,10 @@ void destroy_spu_context(struct kref *kref)
spu_fini_csa(&ctx->csa);
if (ctx->gang)
spu_gang_remove_ctx(ctx->gang, ctx);
+ if (ctx->prof_priv_kref)
+ kref_put(ctx->prof_priv_kref, ctx->prof_priv_release);
BUG_ON(!list_empty(&ctx->rq));
+ atomic_dec(&nr_spu_contexts);
kfree(ctx);
}
@@ -159,6 +170,39 @@ int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags)
void spu_acquire_saved(struct spu_context *ctx)
{
spu_acquire(ctx);
- if (ctx->state != SPU_STATE_SAVED)
+ if (ctx->state != SPU_STATE_SAVED) {
+ set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags);
spu_deactivate(ctx);
+ }
}
+
+/**
+ * spu_release_saved - unlock spu context and return it to the runqueue
+ * @ctx: context to unlock
+ */
+void spu_release_saved(struct spu_context *ctx)
+{
+ BUG_ON(ctx->state != SPU_STATE_SAVED);
+
+ if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags))
+ spu_activate(ctx, 0);
+
+ spu_release(ctx);
+}
+
+void spu_set_profile_private_kref(struct spu_context *ctx,
+ struct kref *prof_info_kref,
+ void ( * prof_info_release) (struct kref *kref))
+{
+ ctx->prof_priv_kref = prof_info_kref;
+ ctx->prof_priv_release = prof_info_release;
+}
+EXPORT_SYMBOL_GPL(spu_set_profile_private_kref);
+
+void *spu_get_profile_private_kref(struct spu_context *ctx)
+{
+ return ctx->prof_priv_kref;
+}
+EXPORT_SYMBOL_GPL(spu_get_profile_private_kref);
+
+
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 5d9ad5a0307b..5e31799b1e3f 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -226,7 +226,7 @@ static void spufs_arch_write_notes(struct file *file)
spu_acquire_saved(ctx_info->ctx);
for (j = 0; j < spufs_coredump_num_notes; j++)
spufs_arch_write_note(ctx_info, j, file);
- spu_release(ctx_info->ctx);
+ spu_release_saved(ctx_info->ctx);
list_del(&ctx_info->list);
kfree(ctx_info);
}
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index 0f75c07e29d8..917eab4be486 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -33,7 +33,8 @@
* function. Currently, there are a few corner cases that we haven't had
* to handle fortunately.
*/
-static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, unsigned long dsisr)
+static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
+ unsigned long dsisr, unsigned *flt)
{
struct vm_area_struct *vma;
unsigned long is_write;
@@ -73,22 +74,21 @@ good_area:
goto bad_area;
}
ret = 0;
- switch (handle_mm_fault(mm, vma, ea, is_write)) {
- case VM_FAULT_MINOR:
- current->min_flt++;
- break;
- case VM_FAULT_MAJOR:
- current->maj_flt++;
- break;
- case VM_FAULT_SIGBUS:
- ret = -EFAULT;
- goto bad_area;
- case VM_FAULT_OOM:
- ret = -ENOMEM;
- goto bad_area;
- default:
+ *flt = handle_mm_fault(mm, vma, ea, is_write);
+ if (unlikely(*flt & VM_FAULT_ERROR)) {
+ if (*flt & VM_FAULT_OOM) {
+ ret = -ENOMEM;
+ goto bad_area;
+ } else if (*flt & VM_FAULT_SIGBUS) {
+ ret = -EFAULT;
+ goto bad_area;
+ }
BUG();
}
+ if (*flt & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
up_read(&mm->mmap_sem);
return ret;
@@ -153,6 +153,7 @@ int spufs_handle_class1(struct spu_context *ctx)
{
u64 ea, dsisr, access;
unsigned long flags;
+ unsigned flt = 0;
int ret;
/*
@@ -178,9 +179,15 @@ int spufs_handle_class1(struct spu_context *ctx)
if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
return 0;
+ spuctx_switch_state(ctx, SPU_UTIL_IOWAIT);
+
pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea,
dsisr, ctx->state);
+ ctx->stats.hash_flt++;
+ if (ctx->state == SPU_STATE_RUNNABLE)
+ ctx->spu->stats.hash_flt++;
+
/* we must not hold the lock when entering spu_handle_mm_fault */
spu_release(ctx);
@@ -192,7 +199,7 @@ int spufs_handle_class1(struct spu_context *ctx)
/* hashing failed, so try the actual fault handler */
if (ret)
- ret = spu_handle_mm_fault(current->mm, ea, dsisr);
+ ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);
spu_acquire(ctx);
/*
@@ -201,11 +208,23 @@ int spufs_handle_class1(struct spu_context *ctx)
* In case of unhandled error report the problem to user space.
*/
if (!ret) {
+ if (flt & VM_FAULT_MAJOR)
+ ctx->stats.maj_flt++;
+ else
+ ctx->stats.min_flt++;
+ if (ctx->state == SPU_STATE_RUNNABLE) {
+ if (flt & VM_FAULT_MAJOR)
+ ctx->spu->stats.maj_flt++;
+ else
+ ctx->spu->stats.min_flt++;
+ }
+
if (ctx->spu)
ctx->ops->restart_dma(ctx);
} else
spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
+ spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
return ret;
}
EXPORT_SYMBOL_GPL(spufs_handle_class1);
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index b1e7e2f8a2e9..7de4e919687b 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -28,6 +28,7 @@
#include <linux/pagemap.h>
#include <linux/poll.h>
#include <linux/ptrace.h>
+#include <linux/seq_file.h>
#include <asm/io.h>
#include <asm/semaphore.h>
@@ -39,6 +40,7 @@
#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
+
static int
spufs_mem_open(struct inode *inode, struct file *file)
{
@@ -216,12 +218,12 @@ unsigned long spufs_get_unmapped_area(struct file *file, unsigned long addr,
#endif /* CONFIG_SPU_FS_64K_LS */
static const struct file_operations spufs_mem_fops = {
- .open = spufs_mem_open,
- .release = spufs_mem_release,
- .read = spufs_mem_read,
- .write = spufs_mem_write,
- .llseek = generic_file_llseek,
- .mmap = spufs_mem_mmap,
+ .open = spufs_mem_open,
+ .release = spufs_mem_release,
+ .read = spufs_mem_read,
+ .write = spufs_mem_write,
+ .llseek = generic_file_llseek,
+ .mmap = spufs_mem_mmap,
#ifdef CONFIG_SPU_FS_64K_LS
.get_unmapped_area = spufs_get_unmapped_area,
#endif
@@ -368,7 +370,7 @@ spufs_regs_read(struct file *file, char __user *buffer,
spu_acquire_saved(ctx);
ret = __spufs_regs_read(ctx, buffer, size, pos);
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
@@ -390,7 +392,7 @@ spufs_regs_write(struct file *file, const char __user *buffer,
ret = copy_from_user(lscsa->gprs + *pos - size,
buffer, size) ? -EFAULT : size;
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
@@ -419,7 +421,7 @@ spufs_fpcr_read(struct file *file, char __user * buffer,
spu_acquire_saved(ctx);
ret = __spufs_fpcr_read(ctx, buffer, size, pos);
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
@@ -441,7 +443,7 @@ spufs_fpcr_write(struct file *file, const char __user * buffer,
ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
buffer, size) ? -EFAULT : size;
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
@@ -866,7 +868,7 @@ static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
spu_acquire_saved(ctx);
ret = __spufs_signal1_read(ctx, buf, len, pos);
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
@@ -932,6 +934,13 @@ static const struct file_operations spufs_signal1_fops = {
.mmap = spufs_signal1_mmap,
};
+static const struct file_operations spufs_signal1_nosched_fops = {
+ .open = spufs_signal1_open,
+ .release = spufs_signal1_release,
+ .write = spufs_signal1_write,
+ .mmap = spufs_signal1_mmap,
+};
+
static int spufs_signal2_open(struct inode *inode, struct file *file)
{
struct spufs_inode_info *i = SPUFS_I(inode);
@@ -990,7 +999,7 @@ static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
spu_acquire_saved(ctx);
ret = __spufs_signal2_read(ctx, buf, len, pos);
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
@@ -1060,6 +1069,13 @@ static const struct file_operations spufs_signal2_fops = {
.mmap = spufs_signal2_mmap,
};
+static const struct file_operations spufs_signal2_nosched_fops = {
+ .open = spufs_signal2_open,
+ .release = spufs_signal2_release,
+ .write = spufs_signal2_write,
+ .mmap = spufs_signal2_mmap,
+};
+
static void spufs_signal1_type_set(void *data, u64 val)
{
struct spu_context *ctx = data;
@@ -1497,14 +1513,15 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
if (status)
ret = status;
}
- spu_release(ctx);
if (ret)
- goto out;
+ goto out_unlock;
ctx->tagwait |= 1 << cmd.tag;
ret = size;
+out_unlock:
+ spu_release(ctx);
out:
return ret;
}
@@ -1515,14 +1532,14 @@ static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
u32 free_elements, tagstatus;
unsigned int mask;
+ poll_wait(file, &ctx->mfc_wq, wait);
+
spu_acquire(ctx);
ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
free_elements = ctx->ops->get_mfc_free_elements(ctx);
tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
spu_release(ctx);
- poll_wait(file, &ctx->mfc_wq, wait);
-
mask = 0;
if (free_elements & 0xffff)
mask |= POLLOUT | POLLWRNORM;
@@ -1609,7 +1626,7 @@ static void spufs_decr_set(void *data, u64 val)
struct spu_lscsa *lscsa = ctx->csa.lscsa;
spu_acquire_saved(ctx);
lscsa->decr.slot[0] = (u32) val;
- spu_release(ctx);
+ spu_release_saved(ctx);
}
static u64 __spufs_decr_get(void *data)
@@ -1625,7 +1642,7 @@ static u64 spufs_decr_get(void *data)
u64 ret;
spu_acquire_saved(ctx);
ret = __spufs_decr_get(data);
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
@@ -1634,17 +1651,21 @@ DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
static void spufs_decr_status_set(void *data, u64 val)
{
struct spu_context *ctx = data;
- struct spu_lscsa *lscsa = ctx->csa.lscsa;
spu_acquire_saved(ctx);
- lscsa->decr_status.slot[0] = (u32) val;
- spu_release(ctx);
+ if (val)
+ ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
+ else
+ ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
+ spu_release_saved(ctx);
}
static u64 __spufs_decr_status_get(void *data)
{
struct spu_context *ctx = data;
- struct spu_lscsa *lscsa = ctx->csa.lscsa;
- return lscsa->decr_status.slot[0];
+ if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
+ return SPU_DECR_STATUS_RUNNING;
+ else
+ return 0;
}
static u64 spufs_decr_status_get(void *data)
@@ -1653,7 +1674,7 @@ static u64 spufs_decr_status_get(void *data)
u64 ret;
spu_acquire_saved(ctx);
ret = __spufs_decr_status_get(data);
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
@@ -1665,7 +1686,7 @@ static void spufs_event_mask_set(void *data, u64 val)
struct spu_lscsa *lscsa = ctx->csa.lscsa;
spu_acquire_saved(ctx);
lscsa->event_mask.slot[0] = (u32) val;
- spu_release(ctx);
+ spu_release_saved(ctx);
}
static u64 __spufs_event_mask_get(void *data)
@@ -1681,7 +1702,7 @@ static u64 spufs_event_mask_get(void *data)
u64 ret;
spu_acquire_saved(ctx);
ret = __spufs_event_mask_get(data);
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
@@ -1705,7 +1726,7 @@ static u64 spufs_event_status_get(void *data)
spu_acquire_saved(ctx);
ret = __spufs_event_status_get(data);
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
@@ -1717,7 +1738,7 @@ static void spufs_srr0_set(void *data, u64 val)
struct spu_lscsa *lscsa = ctx->csa.lscsa;
spu_acquire_saved(ctx);
lscsa->srr0.slot[0] = (u32) val;
- spu_release(ctx);
+ spu_release_saved(ctx);
}
static u64 spufs_srr0_get(void *data)
@@ -1727,7 +1748,7 @@ static u64 spufs_srr0_get(void *data)
u64 ret;
spu_acquire_saved(ctx);
ret = lscsa->srr0.slot[0];
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
@@ -1783,7 +1804,7 @@ static u64 spufs_lslr_get(void *data)
spu_acquire_saved(ctx);
ret = __spufs_lslr_get(data);
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
@@ -1797,6 +1818,29 @@ static int spufs_info_open(struct inode *inode, struct file *file)
return 0;
}
+static int spufs_caps_show(struct seq_file *s, void *private)
+{
+ struct spu_context *ctx = s->private;
+
+ if (!(ctx->flags & SPU_CREATE_NOSCHED))
+ seq_puts(s, "sched\n");
+ if (!(ctx->flags & SPU_CREATE_ISOLATE))
+ seq_puts(s, "step\n");
+ return 0;
+}
+
+static int spufs_caps_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
+}
+
+static const struct file_operations spufs_caps_fops = {
+ .open = spufs_caps_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
char __user *buf, size_t len, loff_t *pos)
{
@@ -1824,7 +1868,7 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
spin_lock(&ctx->csa.register_lock);
ret = __spufs_mbox_info_read(ctx, buf, len, pos);
spin_unlock(&ctx->csa.register_lock);
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
@@ -1862,7 +1906,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
spin_lock(&ctx->csa.register_lock);
ret = __spufs_ibox_info_read(ctx, buf, len, pos);
spin_unlock(&ctx->csa.register_lock);
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
@@ -1903,7 +1947,7 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
spin_lock(&ctx->csa.register_lock);
ret = __spufs_wbox_info_read(ctx, buf, len, pos);
spin_unlock(&ctx->csa.register_lock);
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
@@ -1953,7 +1997,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
spin_lock(&ctx->csa.register_lock);
ret = __spufs_dma_info_read(ctx, buf, len, pos);
spin_unlock(&ctx->csa.register_lock);
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
@@ -2004,7 +2048,7 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
spin_lock(&ctx->csa.register_lock);
ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
spin_unlock(&ctx->csa.register_lock);
- spu_release(ctx);
+ spu_release_saved(ctx);
return ret;
}
@@ -2014,7 +2058,117 @@ static const struct file_operations spufs_proxydma_info_fops = {
.read = spufs_proxydma_info_read,
};
+static int spufs_show_tid(struct seq_file *s, void *private)
+{
+ struct spu_context *ctx = s->private;
+
+ seq_printf(s, "%d\n", ctx->tid);
+ return 0;
+}
+
+static int spufs_tid_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
+}
+
+static const struct file_operations spufs_tid_fops = {
+ .open = spufs_tid_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const char *ctx_state_names[] = {
+ "user", "system", "iowait", "loaded"
+};
+
+static unsigned long long spufs_acct_time(struct spu_context *ctx,
+ enum spu_utilization_state state)
+{
+ struct timespec ts;
+ unsigned long long time = ctx->stats.times[state];
+
+ /*
+ * In general, utilization statistics are updated by the controlling
+ * thread as the spu context moves through various well defined
+ * state transitions, but if the context is lazily loaded its
+ * utilization statistics are not updated as the controlling thread
+ * is not tightly coupled with the execution of the spu context. We
+ * calculate and apply the time delta from the last recorded state
+ * of the spu context.
+ */
+ if (ctx->spu && ctx->stats.util_state == state) {
+ ktime_get_ts(&ts);
+ time += timespec_to_ns(&ts) - ctx->stats.tstamp;
+ }
+
+ return time / NSEC_PER_MSEC;
+}
+
+static unsigned long long spufs_slb_flts(struct spu_context *ctx)
+{
+ unsigned long long slb_flts = ctx->stats.slb_flt;
+
+ if (ctx->state == SPU_STATE_RUNNABLE) {
+ slb_flts += (ctx->spu->stats.slb_flt -
+ ctx->stats.slb_flt_base);
+ }
+
+ return slb_flts;
+}
+
+static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
+{
+ unsigned long long class2_intrs = ctx->stats.class2_intr;
+
+ if (ctx->state == SPU_STATE_RUNNABLE) {
+ class2_intrs += (ctx->spu->stats.class2_intr -
+ ctx->stats.class2_intr_base);
+ }
+
+ return class2_intrs;
+}
+
+
+static int spufs_show_stat(struct seq_file *s, void *private)
+{
+ struct spu_context *ctx = s->private;
+
+ spu_acquire(ctx);
+ seq_printf(s, "%s %llu %llu %llu %llu "
+ "%llu %llu %llu %llu %llu %llu %llu %llu\n",
+ ctx_state_names[ctx->stats.util_state],
+ spufs_acct_time(ctx, SPU_UTIL_USER),
+ spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
+ spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
+ spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
+ ctx->stats.vol_ctx_switch,
+ ctx->stats.invol_ctx_switch,
+ spufs_slb_flts(ctx),
+ ctx->stats.hash_flt,
+ ctx->stats.min_flt,
+ ctx->stats.maj_flt,
+ spufs_class2_intrs(ctx),
+ ctx->stats.libassist);
+ spu_release(ctx);
+ return 0;
+}
+
+static int spufs_stat_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
+}
+
+static const struct file_operations spufs_stat_fops = {
+ .open = spufs_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
struct tree_descr spufs_dir_contents[] = {
+ { "capabilities", &spufs_caps_fops, 0444, },
{ "mem", &spufs_mem_fops, 0666, },
{ "regs", &spufs_regs_fops, 0666, },
{ "mbox", &spufs_mbox_fops, 0444, },
@@ -2046,10 +2200,13 @@ struct tree_descr spufs_dir_contents[] = {
{ "wbox_info", &spufs_wbox_info_fops, 0444, },
{ "dma_info", &spufs_dma_info_fops, 0444, },
{ "proxydma_info", &spufs_proxydma_info_fops, 0444, },
+ { "tid", &spufs_tid_fops, 0444, },
+ { "stat", &spufs_stat_fops, 0444, },
{},
};
struct tree_descr spufs_dir_nosched_contents[] = {
+ { "capabilities", &spufs_caps_fops, 0444, },
{ "mem", &spufs_mem_fops, 0666, },
{ "mbox", &spufs_mbox_fops, 0444, },
{ "ibox", &spufs_ibox_fops, 0444, },
@@ -2057,8 +2214,8 @@ struct tree_descr spufs_dir_nosched_contents[] = {
{ "mbox_stat", &spufs_mbox_stat_fops, 0444, },
{ "ibox_stat", &spufs_ibox_stat_fops, 0444, },
{ "wbox_stat", &spufs_wbox_stat_fops, 0444, },
- { "signal1", &spufs_signal1_fops, 0666, },
- { "signal2", &spufs_signal2_fops, 0666, },
+ { "signal1", &spufs_signal1_nosched_fops, 0222, },
+ { "signal2", &spufs_signal2_nosched_fops, 0222, },
{ "signal1_type", &spufs_signal1_type, 0666, },
{ "signal2_type", &spufs_signal2_type, 0666, },
{ "mss", &spufs_mss_fops, 0666, },
@@ -2068,6 +2225,8 @@ struct tree_descr spufs_dir_nosched_contents[] = {
{ "psmap", &spufs_psmap_fops, 0666, },
{ "phys-id", &spufs_id_ops, 0666, },
{ "object-id", &spufs_object_id_ops, 0666, },
+ { "tid", &spufs_tid_fops, 0444, },
+ { "stat", &spufs_stat_fops, 0444, },
{},
};
diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c
index 212ea78f9051..71a443253021 100644
--- a/arch/powerpc/platforms/cell/spufs/gang.c
+++ b/arch/powerpc/platforms/cell/spufs/gang.c
@@ -35,7 +35,9 @@ struct spu_gang *alloc_spu_gang(void)
kref_init(&gang->kref);
mutex_init(&gang->mutex);
+ mutex_init(&gang->aff_mutex);
INIT_LIST_HEAD(&gang->list);
+ INIT_LIST_HEAD(&gang->aff_list_head);
out:
return gang;
@@ -73,6 +75,10 @@ void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx)
{
mutex_lock(&gang->mutex);
WARN_ON(ctx->gang != gang);
+ if (!list_empty(&ctx->aff_list)) {
+ list_del_init(&ctx->aff_list);
+ gang->aff_flags &= ~AFF_OFFSETS_SET;
+ }
list_del_init(&ctx->gang_list);
gang->contexts--;
mutex_unlock(&gang->mutex);
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 9807206e0219..b3d0dd118dd0 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -232,10 +232,6 @@ static int spufs_dir_close(struct inode *inode, struct file *file)
return dcache_dir_close(inode, file);
}
-const struct inode_operations spufs_dir_inode_operations = {
- .lookup = simple_lookup,
-};
-
const struct file_operations spufs_context_fops = {
.open = dcache_dir_open,
.release = spufs_dir_close,
@@ -269,7 +265,7 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
goto out_iput;
ctx->flags = flags;
- inode->i_op = &spufs_dir_inode_operations;
+ inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
if (flags & SPU_CREATE_NOSCHED)
ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
@@ -320,11 +316,107 @@ out:
return ret;
}
-static int spufs_create_context(struct inode *inode,
- struct dentry *dentry,
- struct vfsmount *mnt, int flags, int mode)
+static struct spu_context *
+spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
+ struct file *filp)
+{
+ struct spu_context *tmp, *neighbor;
+ int count, node;
+ int aff_supp;
+
+ aff_supp = !list_empty(&(list_entry(cbe_spu_info[0].spus.next,
+ struct spu, cbe_list))->aff_list);
+
+ if (!aff_supp)
+ return ERR_PTR(-EINVAL);
+
+ if (flags & SPU_CREATE_GANG)
+ return ERR_PTR(-EINVAL);
+
+ if (flags & SPU_CREATE_AFFINITY_MEM &&
+ gang->aff_ref_ctx &&
+ gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM)
+ return ERR_PTR(-EEXIST);
+
+ if (gang->aff_flags & AFF_MERGED)
+ return ERR_PTR(-EBUSY);
+
+ neighbor = NULL;
+ if (flags & SPU_CREATE_AFFINITY_SPU) {
+ if (!filp || filp->f_op != &spufs_context_fops)
+ return ERR_PTR(-EINVAL);
+
+ neighbor = get_spu_context(
+ SPUFS_I(filp->f_dentry->d_inode)->i_ctx);
+
+ if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) &&
+ !list_is_last(&neighbor->aff_list, &gang->aff_list_head) &&
+ !list_entry(neighbor->aff_list.next, struct spu_context,
+ aff_list)->aff_head)
+ return ERR_PTR(-EEXIST);
+
+ if (gang != neighbor->gang)
+ return ERR_PTR(-EINVAL);
+
+ count = 1;
+ list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
+ count++;
+ if (list_empty(&neighbor->aff_list))
+ count++;
+
+ for (node = 0; node < MAX_NUMNODES; node++) {
+ if ((cbe_spu_info[node].n_spus - atomic_read(
+ &cbe_spu_info[node].reserved_spus)) >= count)
+ break;
+ }
+
+ if (node == MAX_NUMNODES)
+ return ERR_PTR(-EEXIST);
+ }
+
+ return neighbor;
+}
+
+static void
+spufs_set_affinity(unsigned int flags, struct spu_context *ctx,
+ struct spu_context *neighbor)
+{
+ if (flags & SPU_CREATE_AFFINITY_MEM)
+ ctx->gang->aff_ref_ctx = ctx;
+
+ if (flags & SPU_CREATE_AFFINITY_SPU) {
+ if (list_empty(&neighbor->aff_list)) {
+ list_add_tail(&neighbor->aff_list,
+ &ctx->gang->aff_list_head);
+ neighbor->aff_head = 1;
+ }
+
+ if (list_is_last(&neighbor->aff_list, &ctx->gang->aff_list_head)
+ || list_entry(neighbor->aff_list.next, struct spu_context,
+ aff_list)->aff_head) {
+ list_add(&ctx->aff_list, &neighbor->aff_list);
+ } else {
+ list_add_tail(&ctx->aff_list, &neighbor->aff_list);
+ if (neighbor->aff_head) {
+ neighbor->aff_head = 0;
+ ctx->aff_head = 1;
+ }
+ }
+
+ if (!ctx->gang->aff_ref_ctx)
+ ctx->gang->aff_ref_ctx = ctx;
+ }
+}
+
+static int
+spufs_create_context(struct inode *inode, struct dentry *dentry,
+ struct vfsmount *mnt, int flags, int mode,
+ struct file *aff_filp)
{
int ret;
+ int affinity;
+ struct spu_gang *gang;
+ struct spu_context *neighbor;
ret = -EPERM;
if ((flags & SPU_CREATE_NOSCHED) &&
@@ -340,9 +432,29 @@ static int spufs_create_context(struct inode *inode,
if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
goto out_unlock;
+ gang = NULL;
+ neighbor = NULL;
+ affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
+ if (affinity) {
+ gang = SPUFS_I(inode)->i_gang;
+ ret = -EINVAL;
+ if (!gang)
+ goto out_unlock;
+ mutex_lock(&gang->aff_mutex);
+ neighbor = spufs_assert_affinity(flags, gang, aff_filp);
+ if (IS_ERR(neighbor)) {
+ ret = PTR_ERR(neighbor);
+ goto out_aff_unlock;
+ }
+ }
+
ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO);
if (ret)
- goto out_unlock;
+ goto out_aff_unlock;
+
+ if (affinity)
+ spufs_set_affinity(flags, SPUFS_I(dentry->d_inode)->i_ctx,
+ neighbor);
/*
* get references for dget and mntget, will be released
@@ -356,6 +468,9 @@ static int spufs_create_context(struct inode *inode,
goto out;
}
+out_aff_unlock:
+ if (affinity)
+ mutex_unlock(&gang->aff_mutex);
out_unlock:
mutex_unlock(&inode->i_mutex);
out:
@@ -386,7 +501,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
if (!gang)
goto out_iput;
- inode->i_op = &spufs_dir_inode_operations;
+ inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
d_instantiate(dentry, inode);
@@ -454,7 +569,8 @@ out:
static struct file_system_type spufs_type;
-long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode)
+long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
+ struct file *filp)
{
struct dentry *dentry;
int ret;
@@ -491,7 +607,7 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode)
dentry, nd->mnt, mode);
else
return spufs_create_context(nd->dentry->d_inode,
- dentry, nd->mnt, flags, mode);
+ dentry, nd->mnt, flags, mode, filp);
out_dput:
dput(dentry);
@@ -593,7 +709,7 @@ spufs_create_root(struct super_block *sb, void *data)
if (!inode)
goto out;
- inode->i_op = &spufs_dir_inode_operations;
+ inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
SPUFS_I(inode)->i_ctx = NULL;
@@ -658,7 +774,7 @@ static int __init spufs_init(void)
ret = -ENOMEM;
spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
sizeof(struct spufs_inode_info), 0,
- SLAB_HWCACHE_ALIGN, spufs_init_once, NULL);
+ SLAB_HWCACHE_ALIGN, spufs_init_once);
if (!spufs_inode_cache)
goto out;
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 57626600b1a4..958f10e90fdd 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -18,18 +18,21 @@ void spufs_stop_callback(struct spu *spu)
wake_up_all(&ctx->stop_wq);
}
-static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
+static inline int spu_stopped(struct spu_context *ctx, u32 *stat)
{
struct spu *spu;
u64 pte_fault;
*stat = ctx->ops->status_read(ctx);
- if (ctx->state != SPU_STATE_RUNNABLE)
- return 1;
+
spu = ctx->spu;
+ if (ctx->state != SPU_STATE_RUNNABLE ||
+ test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
+ return 1;
pte_fault = spu->dsisr &
(MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
- return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
+ return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ?
+ 1 : 0;
}
static int spu_setup_isolated(struct spu_context *ctx)
@@ -123,8 +126,10 @@ out:
return ret;
}
-static int spu_run_init(struct spu_context *ctx, u32 * npc)
+static int spu_run_init(struct spu_context *ctx, u32 *npc)
{
+ spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
+
if (ctx->flags & SPU_CREATE_ISOLATE) {
unsigned long runcntl;
@@ -142,22 +147,28 @@ static int spu_run_init(struct spu_context *ctx, u32 * npc)
runcntl = SPU_RUNCNTL_RUNNABLE;
ctx->ops->runcntl_write(ctx, runcntl);
} else {
- spu_start_tick(ctx);
+ unsigned long mode = SPU_PRIVCNTL_MODE_NORMAL;
ctx->ops->npc_write(ctx, *npc);
+ if (test_thread_flag(TIF_SINGLESTEP))
+ mode = SPU_PRIVCNTL_MODE_SINGLE_STEP;
+ out_be64(&ctx->spu->priv2->spu_privcntl_RW, mode);
ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
}
+ spuctx_switch_state(ctx, SPU_UTIL_USER);
+
return 0;
}
-static int spu_run_fini(struct spu_context *ctx, u32 * npc,
- u32 * status)
+static int spu_run_fini(struct spu_context *ctx, u32 *npc,
+ u32 *status)
{
int ret = 0;
- spu_stop_tick(ctx);
*status = ctx->ops->status_read(ctx);
*npc = ctx->ops->npc_read(ctx);
+
+ spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
spu_release(ctx);
if (signal_pending(current))
@@ -182,11 +193,7 @@ static int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
if (ret)
return ret;
- ret = spu_run_init(ctx, npc);
- if (ret) {
- spu_release(ctx);
- return ret;
- }
+ spuctx_switch_state(ctx, SPU_UTIL_USER);
return 0;
}
@@ -286,10 +293,10 @@ static inline int spu_process_events(struct spu_context *ctx)
return ret;
}
-long spufs_run_spu(struct file *file, struct spu_context *ctx,
- u32 *npc, u32 *event)
+long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
{
int ret;
+ struct spu *spu;
u32 status;
if (mutex_lock_interruptible(&ctx->run_mutex))
@@ -298,9 +305,26 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
ctx->ops->master_start(ctx);
ctx->event_return = 0;
- ret = spu_acquire_runnable(ctx, 0);
- if (ret)
- return ret;
+ spu_acquire(ctx);
+ if (ctx->state == SPU_STATE_SAVED) {
+ __spu_update_sched_info(ctx);
+ spu_set_timeslice(ctx);
+
+ ret = spu_activate(ctx, 0);
+ if (ret) {
+ spu_release(ctx);
+ goto out;
+ }
+ } else {
+ /*
+ * We have to update the scheduling priority under active_mutex
+ * to protect against find_victim().
+ *
+ * No need to update the timeslice ASAP, it will get updated
+ * once the current one has expired.
+ */
+ spu_update_sched_info(ctx);
+ }
ret = spu_run_init(ctx, npc);
if (ret) {
@@ -312,6 +336,17 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
if (unlikely(ret))
break;
+ spu = ctx->spu;
+ if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
+ &ctx->sched_flags))) {
+ if (!(status & SPU_STATUS_STOPPED_BY_STOP)) {
+ spu_switch_notify(spu, ctx);
+ continue;
+ }
+ }
+
+ spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
+
if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
(status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
ret = spu_process_callback(ctx);
@@ -325,16 +360,21 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
ret = spu_reacquire_runnable(ctx, npc, &status);
- if (ret) {
- spu_stop_tick(ctx);
+ if (ret)
goto out2;
- }
continue;
}
ret = spu_process_events(ctx);
} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
- SPU_STATUS_STOPPED_BY_HALT)));
+ SPU_STATUS_STOPPED_BY_HALT |
+ SPU_STATUS_SINGLE_STEP)));
+
+ if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
+ (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100) &&
+ (ctx->state == SPU_STATE_RUNNABLE))
+ ctx->stats.libassist++;
+
ctx->ops->master_stop(ctx);
ret = spu_run_fini(ctx, npc, &status);
@@ -344,10 +384,15 @@ out2:
if ((ret == 0) ||
((ret == -ERESTARTSYS) &&
((status & SPU_STATUS_STOPPED_BY_HALT) ||
+ (status & SPU_STATUS_SINGLE_STEP) ||
((status & SPU_STATUS_STOPPED_BY_STOP) &&
(status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
ret = status;
+ /* Note: we don't need to force_sig SIGTRAP on single-step
+ * since we have TIF_SINGLESTEP set, thus the kernel will do
+ * it upon return from the syscall anyawy
+ */
if ((status & SPU_STATUS_STOPPED_BY_STOP)
&& (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
force_sig(SIGTRAP, current);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 3b831e07f1ed..5bebe7fbe056 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -35,6 +35,10 @@
#include <linux/numa.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/pid_namespace.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
@@ -43,97 +47,175 @@
#include <asm/spu_priv1.h>
#include "spufs.h"
-#define SPU_TIMESLICE (HZ)
-
struct spu_prio_array {
DECLARE_BITMAP(bitmap, MAX_PRIO);
struct list_head runq[MAX_PRIO];
spinlock_t runq_lock;
- struct list_head active_list[MAX_NUMNODES];
- struct mutex active_mutex[MAX_NUMNODES];
+ int nr_waiting;
};
+static unsigned long spu_avenrun[3];
static struct spu_prio_array *spu_prio;
-static struct workqueue_struct *spu_sched_wq;
+static struct task_struct *spusched_task;
+static struct timer_list spusched_timer;
-static inline int node_allowed(int node)
-{
- cpumask_t mask;
+/*
+ * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
+ */
+#define NORMAL_PRIO 120
- if (!nr_cpus_node(node))
- return 0;
- mask = node_to_cpumask(node);
- if (!cpus_intersects(mask, current->cpus_allowed))
- return 0;
- return 1;
+/*
+ * Frequency of the spu scheduler tick. By default we do one SPU scheduler
+ * tick for every 10 CPU scheduler ticks.
+ */
+#define SPUSCHED_TICK (10)
+
+/*
+ * These are the 'tuning knobs' of the scheduler:
+ *
+ * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
+ * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
+ */
+#define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
+#define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
+
+#define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
+#define SCALE_PRIO(x, prio) \
+ max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
+
+/*
+ * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
+ * [800ms ... 100ms ... 5ms]
+ *
+ * The higher a thread's priority, the bigger timeslices
+ * it gets during one round of execution. But even the lowest
+ * priority thread gets MIN_TIMESLICE worth of execution time.
+ */
+void spu_set_timeslice(struct spu_context *ctx)
+{
+ if (ctx->prio < NORMAL_PRIO)
+ ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
+ else
+ ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
}
-void spu_start_tick(struct spu_context *ctx)
+/*
+ * Update scheduling information from the owning thread.
+ */
+void __spu_update_sched_info(struct spu_context *ctx)
{
- if (ctx->policy == SCHED_RR) {
- /*
- * Make sure the exiting bit is cleared.
- */
- clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
- mb();
- queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
- }
+ /*
+ * 32-Bit assignment are atomic on powerpc, and we don't care about
+ * memory ordering here because retriving the controlling thread is
+ * per defintion racy.
+ */
+ ctx->tid = current->pid;
+
+ /*
+ * We do our own priority calculations, so we normally want
+ * ->static_prio to start with. Unfortunately thies field
+ * contains junk for threads with a realtime scheduling
+ * policy so we have to look at ->prio in this case.
+ */
+ if (rt_prio(current->prio))
+ ctx->prio = current->prio;
+ else
+ ctx->prio = current->static_prio;
+ ctx->policy = current->policy;
+
+ /*
+ * A lot of places that don't hold list_mutex poke into
+ * cpus_allowed, including grab_runnable_context which
+ * already holds the runq_lock. So abuse runq_lock
+ * to protect this field aswell.
+ */
+ spin_lock(&spu_prio->runq_lock);
+ ctx->cpus_allowed = current->cpus_allowed;
+ spin_unlock(&spu_prio->runq_lock);
}
-void spu_stop_tick(struct spu_context *ctx)
+void spu_update_sched_info(struct spu_context *ctx)
{
- if (ctx->policy == SCHED_RR) {
- /*
- * While the work can be rearming normally setting this flag
- * makes sure it does not rearm itself anymore.
- */
- set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
- mb();
- cancel_delayed_work(&ctx->sched_work);
- }
+ int node = ctx->spu->node;
+
+ mutex_lock(&cbe_spu_info[node].list_mutex);
+ __spu_update_sched_info(ctx);
+ mutex_unlock(&cbe_spu_info[node].list_mutex);
}
-/**
- * spu_add_to_active_list - add spu to active list
- * @spu: spu to add to the active list
- */
-static void spu_add_to_active_list(struct spu *spu)
+static int __node_allowed(struct spu_context *ctx, int node)
{
- mutex_lock(&spu_prio->active_mutex[spu->node]);
- list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
- mutex_unlock(&spu_prio->active_mutex[spu->node]);
+ if (nr_cpus_node(node)) {
+ cpumask_t mask = node_to_cpumask(node);
+
+ if (cpus_intersects(mask, ctx->cpus_allowed))
+ return 1;
+ }
+
+ return 0;
}
-/**
- * spu_remove_from_active_list - remove spu from active list
- * @spu: spu to remove from the active list
- */
-static void spu_remove_from_active_list(struct spu *spu)
+static int node_allowed(struct spu_context *ctx, int node)
{
- int node = spu->node;
+ int rval;
- mutex_lock(&spu_prio->active_mutex[node]);
- list_del_init(&spu->list);
- mutex_unlock(&spu_prio->active_mutex[node]);
+ spin_lock(&spu_prio->runq_lock);
+ rval = __node_allowed(ctx, node);
+ spin_unlock(&spu_prio->runq_lock);
+
+ return rval;
}
static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
-static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
+void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
{
blocking_notifier_call_chain(&spu_switch_notifier,
ctx ? ctx->object_id : 0, spu);
}
+static void notify_spus_active(void)
+{
+ int node;
+
+ /*
+ * Wake up the active spu_contexts.
+ *
+ * When the awakened processes see their "notify_active" flag is set,
+ * they will call spu_switch_notify();
+ */
+ for_each_online_node(node) {
+ struct spu *spu;
+
+ mutex_lock(&cbe_spu_info[node].list_mutex);
+ list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
+ if (spu->alloc_state != SPU_FREE) {
+ struct spu_context *ctx = spu->ctx;
+ set_bit(SPU_SCHED_NOTIFY_ACTIVE,
+ &ctx->sched_flags);
+ mb();
+ wake_up_all(&ctx->stop_wq);
+ }
+ }
+ mutex_unlock(&cbe_spu_info[node].list_mutex);
+ }
+}
+
int spu_switch_event_register(struct notifier_block * n)
{
- return blocking_notifier_chain_register(&spu_switch_notifier, n);
+ int ret;
+ ret = blocking_notifier_chain_register(&spu_switch_notifier, n);
+ if (!ret)
+ notify_spus_active();
+ return ret;
}
+EXPORT_SYMBOL_GPL(spu_switch_event_register);
int spu_switch_event_unregister(struct notifier_block * n)
{
return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
}
+EXPORT_SYMBOL_GPL(spu_switch_event_unregister);
/**
* spu_bind_context - bind spu context to physical spu
@@ -144,11 +226,22 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
{
pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
spu->number, spu->node);
+ spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
+
+ if (ctx->flags & SPU_CREATE_NOSCHED)
+ atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
+ if (!list_empty(&ctx->aff_list))
+ atomic_inc(&ctx->gang->aff_sched_count);
+
+ ctx->stats.slb_flt_base = spu->stats.slb_flt;
+ ctx->stats.class2_intr_base = spu->stats.class2_intr;
+
spu->ctx = ctx;
spu->flags = 0;
ctx->spu = spu;
ctx->ops = &spu_hw_ops;
spu->pid = current->pid;
+ spu->tgid = current->tgid;
spu_associate_mm(spu, ctx->owner);
spu->ibox_callback = spufs_ibox_callback;
spu->wbox_callback = spufs_wbox_callback;
@@ -161,8 +254,155 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
spu->timestamp = jiffies;
spu_cpu_affinity_set(spu, raw_smp_processor_id());
spu_switch_notify(spu, ctx);
- spu_add_to_active_list(spu);
ctx->state = SPU_STATE_RUNNABLE;
+
+ spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
+}
+
+/*
+ * Must be used with the list_mutex held.
+ */
+static inline int sched_spu(struct spu *spu)
+{
+ BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
+
+ return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
+}
+
+static void aff_merge_remaining_ctxs(struct spu_gang *gang)
+{
+ struct spu_context *ctx;
+
+ list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
+ if (list_empty(&ctx->aff_list))
+ list_add(&ctx->aff_list, &gang->aff_list_head);
+ }
+ gang->aff_flags |= AFF_MERGED;
+}
+
+static void aff_set_offsets(struct spu_gang *gang)
+{
+ struct spu_context *ctx;
+ int offset;
+
+ offset = -1;
+ list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
+ aff_list) {
+ if (&ctx->aff_list == &gang->aff_list_head)
+ break;
+ ctx->aff_offset = offset--;
+ }
+
+ offset = 0;
+ list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
+ if (&ctx->aff_list == &gang->aff_list_head)
+ break;
+ ctx->aff_offset = offset++;
+ }
+
+ gang->aff_flags |= AFF_OFFSETS_SET;
+}
+
+static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
+ int group_size, int lowest_offset)
+{
+ struct spu *spu;
+ int node, n;
+
+ /*
+ * TODO: A better algorithm could be used to find a good spu to be
+ * used as reference location for the ctxs chain.
+ */
+ node = cpu_to_node(raw_smp_processor_id());
+ for (n = 0; n < MAX_NUMNODES; n++, node++) {
+ node = (node < MAX_NUMNODES) ? node : 0;
+ if (!node_allowed(ctx, node))
+ continue;
+ mutex_lock(&cbe_spu_info[node].list_mutex);
+ list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
+ if ((!mem_aff || spu->has_mem_affinity) &&
+ sched_spu(spu)) {
+ mutex_unlock(&cbe_spu_info[node].list_mutex);
+ return spu;
+ }
+ }
+ mutex_unlock(&cbe_spu_info[node].list_mutex);
+ }
+ return NULL;
+}
+
+static void aff_set_ref_point_location(struct spu_gang *gang)
+{
+ int mem_aff, gs, lowest_offset;
+ struct spu_context *ctx;
+ struct spu *tmp;
+
+ mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
+ lowest_offset = 0;
+ gs = 0;
+
+ list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
+ gs++;
+
+ list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
+ aff_list) {
+ if (&ctx->aff_list == &gang->aff_list_head)
+ break;
+ lowest_offset = ctx->aff_offset;
+ }
+
+ gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
+ lowest_offset);
+}
+
+static struct spu *ctx_location(struct spu *ref, int offset, int node)
+{
+ struct spu *spu;
+
+ spu = NULL;
+ if (offset >= 0) {
+ list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
+ BUG_ON(spu->node != node);
+ if (offset == 0)
+ break;
+ if (sched_spu(spu))
+ offset--;
+ }
+ } else {
+ list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
+ BUG_ON(spu->node != node);
+ if (offset == 0)
+ break;
+ if (sched_spu(spu))
+ offset++;
+ }
+ }
+
+ return spu;
+}
+
+/*
+ * affinity_check is called each time a context is going to be scheduled.
+ * It returns the spu ptr on which the context must run.
+ */
+static int has_affinity(struct spu_context *ctx)
+{
+ struct spu_gang *gang = ctx->gang;
+
+ if (list_empty(&ctx->aff_list))
+ return 0;
+
+ mutex_lock(&gang->aff_mutex);
+ if (!gang->aff_ref_spu) {
+ if (!(gang->aff_flags & AFF_MERGED))
+ aff_merge_remaining_ctxs(gang);
+ if (!(gang->aff_flags & AFF_OFFSETS_SET))
+ aff_set_offsets(gang);
+ aff_set_ref_point_location(gang);
+ }
+ mutex_unlock(&gang->aff_mutex);
+
+ return gang->aff_ref_spu != NULL;
}
/**
@@ -174,8 +414,13 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
{
pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
spu->pid, spu->number, spu->node);
+ spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
- spu_remove_from_active_list(spu);
+ if (spu->ctx->flags & SPU_CREATE_NOSCHED)
+ atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
+ if (!list_empty(&ctx->aff_list))
+ if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
+ ctx->gang->aff_ref_spu = NULL;
spu_switch_notify(spu, NULL);
spu_unmap_mappings(ctx);
spu_save(&ctx->csa, spu);
@@ -188,10 +433,19 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
spu->dma_callback = NULL;
spu_associate_mm(spu, NULL);
spu->pid = 0;
+ spu->tgid = 0;
ctx->ops = &spu_backing_ops;
- ctx->spu = NULL;
spu->flags = 0;
spu->ctx = NULL;
+
+ ctx->stats.slb_flt +=
+ (spu->stats.slb_flt - ctx->stats.slb_flt_base);
+ ctx->stats.class2_intr +=
+ (spu->stats.class2_intr - ctx->stats.class2_intr_base);
+
+ /* This maps the underlying spu state to idle */
+ spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
+ ctx->spu = NULL;
}
/**
@@ -200,20 +454,39 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
*/
static void __spu_add_to_rq(struct spu_context *ctx)
{
- int prio = ctx->prio;
-
- list_add_tail(&ctx->rq, &spu_prio->runq[prio]);
- set_bit(prio, spu_prio->bitmap);
+ /*
+ * Unfortunately this code path can be called from multiple threads
+ * on behalf of a single context due to the way the problem state
+ * mmap support works.
+ *
+ * Fortunately we need to wake up all these threads at the same time
+ * and can simply skip the runqueue addition for every but the first
+ * thread getting into this codepath.
+ *
+ * It's still quite hacky, and long-term we should proxy all other
+ * threads through the owner thread so that spu_run is in control
+ * of all the scheduling activity for a given context.
+ */
+ if (list_empty(&ctx->rq)) {
+ list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
+ set_bit(ctx->prio, spu_prio->bitmap);
+ if (!spu_prio->nr_waiting++)
+ __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
+ }
}
static void __spu_del_from_rq(struct spu_context *ctx)
{
int prio = ctx->prio;
- if (!list_empty(&ctx->rq))
+ if (!list_empty(&ctx->rq)) {
+ if (!--spu_prio->nr_waiting)
+ del_timer(&spusched_timer);
list_del_init(&ctx->rq);
- if (list_empty(&spu_prio->runq[prio]))
- clear_bit(prio, spu_prio->bitmap);
+
+ if (list_empty(&spu_prio->runq[prio]))
+ clear_bit(prio, spu_prio->bitmap);
+ }
}
static void spu_prio_wait(struct spu_context *ctx)
@@ -238,18 +511,41 @@ static void spu_prio_wait(struct spu_context *ctx)
static struct spu *spu_get_idle(struct spu_context *ctx)
{
- struct spu *spu = NULL;
- int node = cpu_to_node(raw_smp_processor_id());
- int n;
+ struct spu *spu;
+ int node, n;
+
+ if (has_affinity(ctx)) {
+ node = ctx->gang->aff_ref_spu->node;
+ mutex_lock(&cbe_spu_info[node].list_mutex);
+ spu = ctx_location(ctx->gang->aff_ref_spu, ctx->aff_offset, node);
+ if (spu && spu->alloc_state == SPU_FREE)
+ goto found;
+ mutex_unlock(&cbe_spu_info[node].list_mutex);
+ return NULL;
+ }
+
+ node = cpu_to_node(raw_smp_processor_id());
for (n = 0; n < MAX_NUMNODES; n++, node++) {
node = (node < MAX_NUMNODES) ? node : 0;
- if (!node_allowed(node))
+ if (!node_allowed(ctx, node))
continue;
- spu = spu_alloc_node(node);
- if (spu)
- break;
+
+ mutex_lock(&cbe_spu_info[node].list_mutex);
+ list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
+ if (spu->alloc_state == SPU_FREE)
+ goto found;
+ }
+ mutex_unlock(&cbe_spu_info[node].list_mutex);
}
+
+ return NULL;
+
+ found:
+ spu->alloc_state = SPU_USED;
+ mutex_unlock(&cbe_spu_info[node].list_mutex);
+ pr_debug("Got SPU %d %d\n", spu->number, spu->node);
+ spu_init_channels(spu);
return spu;
}
@@ -276,18 +572,18 @@ static struct spu *find_victim(struct spu_context *ctx)
node = cpu_to_node(raw_smp_processor_id());
for (n = 0; n < MAX_NUMNODES; n++, node++) {
node = (node < MAX_NUMNODES) ? node : 0;
- if (!node_allowed(node))
+ if (!node_allowed(ctx, node))
continue;
- mutex_lock(&spu_prio->active_mutex[node]);
- list_for_each_entry(spu, &spu_prio->active_list[node], list) {
+ mutex_lock(&cbe_spu_info[node].list_mutex);
+ list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
struct spu_context *tmp = spu->ctx;
- if (tmp->rt_priority < ctx->rt_priority &&
- (!victim || tmp->rt_priority < victim->rt_priority))
+ if (tmp && tmp->prio > ctx->prio &&
+ (!victim || tmp->prio > victim->prio))
victim = spu->ctx;
}
- mutex_unlock(&spu_prio->active_mutex[node]);
+ mutex_unlock(&cbe_spu_info[node].list_mutex);
if (victim) {
/*
@@ -312,7 +608,14 @@ static struct spu *find_victim(struct spu_context *ctx)
victim = NULL;
goto restart;
}
+
+ mutex_lock(&cbe_spu_info[node].list_mutex);
+ cbe_spu_info[node].nr_active--;
spu_unbind_context(spu, victim);
+ mutex_unlock(&cbe_spu_info[node].list_mutex);
+
+ victim->stats.invol_ctx_switch++;
+ spu->stats.invol_ctx_switch++;
mutex_unlock(&victim->state_mutex);
/*
* We need to break out of the wait loop in spu_run
@@ -338,22 +641,32 @@ static struct spu *find_victim(struct spu_context *ctx)
*/
int spu_activate(struct spu_context *ctx, unsigned long flags)
{
-
- if (ctx->spu)
- return 0;
-
do {
struct spu *spu;
+ /*
+ * If there are multiple threads waiting for a single context
+ * only one actually binds the context while the others will
+ * only be able to acquire the state_mutex once the context
+ * already is in runnable state.
+ */
+ if (ctx->spu)
+ return 0;
+
spu = spu_get_idle(ctx);
/*
* If this is a realtime thread we try to get it running by
* preempting a lower priority thread.
*/
- if (!spu && ctx->rt_priority)
+ if (!spu && rt_prio(ctx->prio))
spu = find_victim(ctx);
if (spu) {
+ int node = spu->node;
+
+ mutex_lock(&cbe_spu_info[node].list_mutex);
spu_bind_context(spu, ctx);
+ cbe_spu_info[node].nr_active++;
+ mutex_unlock(&cbe_spu_info[node].list_mutex);
return 0;
}
@@ -369,23 +682,28 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
* Remove the highest priority context on the runqueue and return it
* to the caller. Returns %NULL if no runnable context was found.
*/
-static struct spu_context *grab_runnable_context(int prio)
+static struct spu_context *grab_runnable_context(int prio, int node)
{
- struct spu_context *ctx = NULL;
+ struct spu_context *ctx;
int best;
spin_lock(&spu_prio->runq_lock);
- best = sched_find_first_bit(spu_prio->bitmap);
- if (best < prio) {
+ best = find_first_bit(spu_prio->bitmap, prio);
+ while (best < prio) {
struct list_head *rq = &spu_prio->runq[best];
- BUG_ON(list_empty(rq));
-
- ctx = list_entry(rq->next, struct spu_context, rq);
- __spu_del_from_rq(ctx);
+ list_for_each_entry(ctx, rq, rq) {
+ /* XXX(hch): check for affinity here aswell */
+ if (__node_allowed(ctx, node)) {
+ __spu_del_from_rq(ctx);
+ goto found;
+ }
+ }
+ best++;
}
+ ctx = NULL;
+ found:
spin_unlock(&spu_prio->runq_lock);
-
return ctx;
}
@@ -395,10 +713,19 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
struct spu_context *new = NULL;
if (spu) {
- new = grab_runnable_context(max_prio);
+ new = grab_runnable_context(max_prio, spu->node);
if (new || force) {
+ int node = spu->node;
+
+ mutex_lock(&cbe_spu_info[node].list_mutex);
spu_unbind_context(spu, ctx);
- spu_free(spu);
+ spu->alloc_state = SPU_FREE;
+ cbe_spu_info[node].nr_active--;
+ mutex_unlock(&cbe_spu_info[node].list_mutex);
+
+ ctx->stats.vol_ctx_switch++;
+ spu->stats.vol_ctx_switch++;
+
if (new)
wake_up(&new->stop_wq);
}
@@ -421,7 +748,7 @@ void spu_deactivate(struct spu_context *ctx)
}
/**
- * spu_yield - yield a physical spu if others are waiting
+ * spu_yield - yield a physical spu if others are waiting
* @ctx: spu context to yield
*
* Check if there is a higher priority context waiting and if yes
@@ -437,78 +764,213 @@ void spu_yield(struct spu_context *ctx)
}
}
-void spu_sched_tick(struct work_struct *work)
+static noinline void spusched_tick(struct spu_context *ctx)
{
- struct spu_context *ctx =
- container_of(work, struct spu_context, sched_work.work);
- int preempted;
+ if (ctx->flags & SPU_CREATE_NOSCHED)
+ return;
+ if (ctx->policy == SCHED_FIFO)
+ return;
- /*
- * If this context is being stopped avoid rescheduling from the
- * scheduler tick because we would block on the state_mutex.
- * The caller will yield the spu later on anyway.
- */
- if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
+ if (--ctx->time_slice)
return;
- mutex_lock(&ctx->state_mutex);
- preempted = __spu_deactivate(ctx, 0, ctx->prio + 1);
- mutex_unlock(&ctx->state_mutex);
+ /*
+ * Unfortunately list_mutex ranks outside of state_mutex, so
+ * we have to trylock here. If we fail give the context another
+ * tick and try again.
+ */
+ if (mutex_trylock(&ctx->state_mutex)) {
+ struct spu *spu = ctx->spu;
+ struct spu_context *new;
- if (preempted) {
- /*
- * We need to break out of the wait loop in spu_run manually
- * to ensure this context gets put on the runqueue again
- * ASAP.
- */
- wake_up(&ctx->stop_wq);
+ new = grab_runnable_context(ctx->prio + 1, spu->node);
+ if (new) {
+ spu_unbind_context(spu, ctx);
+ ctx->stats.invol_ctx_switch++;
+ spu->stats.invol_ctx_switch++;
+ spu->alloc_state = SPU_FREE;
+ cbe_spu_info[spu->node].nr_active--;
+ wake_up(&new->stop_wq);
+ /*
+ * We need to break out of the wait loop in
+ * spu_run manually to ensure this context
+ * gets put on the runqueue again ASAP.
+ */
+ wake_up(&ctx->stop_wq);
+ }
+ spu_set_timeslice(ctx);
+ mutex_unlock(&ctx->state_mutex);
} else {
- spu_start_tick(ctx);
+ ctx->time_slice++;
}
}
-int __init spu_sched_init(void)
+/**
+ * count_active_contexts - count nr of active tasks
+ *
+ * Return the number of tasks currently running or waiting to run.
+ *
+ * Note that we don't take runq_lock / list_mutex here. Reading
+ * a single 32bit value is atomic on powerpc, and we don't care
+ * about memory ordering issues here.
+ */
+static unsigned long count_active_contexts(void)
{
- int i;
+ int nr_active = 0, node;
- spu_sched_wq = create_singlethread_workqueue("spusched");
- if (!spu_sched_wq)
- return 1;
+ for (node = 0; node < MAX_NUMNODES; node++)
+ nr_active += cbe_spu_info[node].nr_active;
+ nr_active += spu_prio->nr_waiting;
- spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
- if (!spu_prio) {
- printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
- __FUNCTION__);
- destroy_workqueue(spu_sched_wq);
- return 1;
+ return nr_active;
+}
+
+/**
+ * spu_calc_load - given tick count, update the avenrun load estimates.
+ * @tick: tick count
+ *
+ * No locking against reading these values from userspace, as for
+ * the CPU loadavg code.
+ */
+static void spu_calc_load(unsigned long ticks)
+{
+ unsigned long active_tasks; /* fixed-point */
+ static int count = LOAD_FREQ;
+
+ count -= ticks;
+
+ if (unlikely(count < 0)) {
+ active_tasks = count_active_contexts() * FIXED_1;
+ do {
+ CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
+ CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
+ CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
+ count += LOAD_FREQ;
+ } while (count < 0);
+ }
+}
+
+static void spusched_wake(unsigned long data)
+{
+ mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
+ wake_up_process(spusched_task);
+ spu_calc_load(SPUSCHED_TICK);
+}
+
+static int spusched_thread(void *unused)
+{
+ struct spu *spu;
+ int node;
+
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ for (node = 0; node < MAX_NUMNODES; node++) {
+ mutex_lock(&cbe_spu_info[node].list_mutex);
+ list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
+ if (spu->ctx)
+ spusched_tick(spu->ctx);
+ mutex_unlock(&cbe_spu_info[node].list_mutex);
+ }
}
+
+ return 0;
+}
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+
+static int show_spu_loadavg(struct seq_file *s, void *private)
+{
+ int a, b, c;
+
+ a = spu_avenrun[0] + (FIXED_1/200);
+ b = spu_avenrun[1] + (FIXED_1/200);
+ c = spu_avenrun[2] + (FIXED_1/200);
+
+ /*
+ * Note that last_pid doesn't really make much sense for the
+ * SPU loadavg (it even seems very odd on the CPU side..),
+ * but we include it here to have a 100% compatible interface.
+ */
+ seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
+ LOAD_INT(a), LOAD_FRAC(a),
+ LOAD_INT(b), LOAD_FRAC(b),
+ LOAD_INT(c), LOAD_FRAC(c),
+ count_active_contexts(),
+ atomic_read(&nr_spu_contexts),
+ current->nsproxy->pid_ns->last_pid);
+ return 0;
+}
+
+static int spu_loadavg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_spu_loadavg, NULL);
+}
+
+static const struct file_operations spu_loadavg_fops = {
+ .open = spu_loadavg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int __init spu_sched_init(void)
+{
+ struct proc_dir_entry *entry;
+ int err = -ENOMEM, i;
+
+ spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
+ if (!spu_prio)
+ goto out;
+
for (i = 0; i < MAX_PRIO; i++) {
INIT_LIST_HEAD(&spu_prio->runq[i]);
__clear_bit(i, spu_prio->bitmap);
}
- __set_bit(MAX_PRIO, spu_prio->bitmap);
- for (i = 0; i < MAX_NUMNODES; i++) {
- mutex_init(&spu_prio->active_mutex[i]);
- INIT_LIST_HEAD(&spu_prio->active_list[i]);
- }
spin_lock_init(&spu_prio->runq_lock);
+
+ setup_timer(&spusched_timer, spusched_wake, 0);
+
+ spusched_task = kthread_run(spusched_thread, NULL, "spusched");
+ if (IS_ERR(spusched_task)) {
+ err = PTR_ERR(spusched_task);
+ goto out_free_spu_prio;
+ }
+
+ entry = create_proc_entry("spu_loadavg", 0, NULL);
+ if (!entry)
+ goto out_stop_kthread;
+ entry->proc_fops = &spu_loadavg_fops;
+
+ pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
+ SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
return 0;
+
+ out_stop_kthread:
+ kthread_stop(spusched_task);
+ out_free_spu_prio:
+ kfree(spu_prio);
+ out:
+ return err;
}
-void __exit spu_sched_exit(void)
+void spu_sched_exit(void)
{
- struct spu *spu, *tmp;
+ struct spu *spu;
int node;
+ remove_proc_entry("spu_loadavg", NULL);
+
+ del_timer_sync(&spusched_timer);
+ kthread_stop(spusched_task);
+
for (node = 0; node < MAX_NUMNODES; node++) {
- mutex_lock(&spu_prio->active_mutex[node]);
- list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
- list) {
- list_del_init(&spu->list);
- spu_free(spu);
- }
- mutex_unlock(&spu_prio->active_mutex[node]);
+ mutex_lock(&cbe_spu_info[node].list_mutex);
+ list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
+ if (spu->alloc_state != SPU_FREE)
+ spu->alloc_state = SPU_FREE;
+ mutex_unlock(&cbe_spu_info[node].list_mutex);
}
kfree(spu_prio);
- destroy_workqueue(spu_sched_wq);
}
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore.c b/arch/powerpc/platforms/cell/spufs/spu_restore.c
index 0bf723dcd677..21a9c952d88b 100644
--- a/arch/powerpc/platforms/cell/spufs/spu_restore.c
+++ b/arch/powerpc/platforms/cell/spufs/spu_restore.c
@@ -84,13 +84,13 @@ static inline void restore_decr(void)
unsigned int decr_running;
unsigned int decr;
- /* Restore, Step 6:
+ /* Restore, Step 6(moved):
* If the LSCSA "decrementer running" flag is set
* then write the SPU_WrDec channel with the
* decrementer value from LSCSA.
*/
offset = LSCSA_QW_OFFSET(decr_status);
- decr_running = regs_spill[offset].slot[0];
+ decr_running = regs_spill[offset].slot[0] & SPU_DECR_STATUS_RUNNING;
if (decr_running) {
offset = LSCSA_QW_OFFSET(decr);
decr = regs_spill[offset].slot[0];
@@ -296,7 +296,7 @@ static inline void restore_complete(void)
* This code deviates from the documented sequence in the
* following aspects:
*
- * 1. The EA for LSCSA is passed from PPE in the
+ * 1. The EA for LSCSA is passed from PPE in the
* signal notification channels.
* 2. The register spill area is pulled by SPU
* into LS, rather than pushed by PPE.
@@ -318,10 +318,10 @@ int main()
build_dma_list(lscsa_ea); /* Step 3. */
restore_upper_240kb(lscsa_ea); /* Step 4. */
/* Step 5: done by 'exit'. */
- restore_decr(); /* Step 6. */
enqueue_putllc(lscsa_ea); /* Step 7. */
set_tag_update(); /* Step 8. */
read_tag_status(); /* Step 9. */
+ restore_decr(); /* moved Step 6. */
read_llar_status(); /* Step 10. */
write_ppu_mb(); /* Step 11. */
write_ppuint_mb(); /* Step 12. */
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped b/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped
index 15183d209b58..f383b027e8bf 100644
--- a/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped
+++ b/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped
@@ -10,7 +10,7 @@ static unsigned int spu_restore_code[] __attribute__((__aligned__(128))) = {
0x24fd8081,
0x1cd80081,
0x33001180,
-0x42030003,
+0x42034003,
0x33800284,
0x1c010204,
0x40200000,
@@ -24,22 +24,22 @@ static unsigned int spu_restore_code[] __attribute__((__aligned__(128))) = {
0x23fffd84,
0x1c100183,
0x217ffa85,
-0x3080a000,
-0x3080a201,
-0x3080a402,
-0x3080a603,
-0x3080a804,
-0x3080aa05,
-0x3080ac06,
-0x3080ae07,
-0x3080b008,
-0x3080b209,
-0x3080b40a,
-0x3080b60b,
-0x3080b80c,
-0x3080ba0d,
-0x3080bc0e,
-0x3080be0f,
+0x3080b000,
+0x3080b201,
+0x3080b402,
+0x3080b603,
+0x3080b804,
+0x3080ba05,
+0x3080bc06,
+0x3080be07,
+0x3080c008,
+0x3080c209,
+0x3080c40a,
+0x3080c60b,
+0x3080c80c,
+0x3080ca0d,
+0x3080cc0e,
+0x3080ce0f,
0x00003ffc,
0x00000000,
0x00000000,
@@ -48,19 +48,18 @@ static unsigned int spu_restore_code[] __attribute__((__aligned__(128))) = {
0x3ec00083,
0xb0a14103,
0x01a00204,
-0x3ec10082,
-0x4202800e,
-0x04000703,
-0xb0a14202,
-0x21a00803,
-0x3fbf028d,
-0x3f20068d,
-0x3fbe0682,
+0x3ec10083,
+0x4202c002,
+0xb0a14203,
+0x21a00802,
+0x3fbf028a,
+0x3f20050a,
+0x3fbe0502,
0x3fe30102,
0x21a00882,
-0x3f82028f,
-0x3fe3078f,
-0x3fbf0784,
+0x3f82028b,
+0x3fe3058b,
+0x3fbf0584,
0x3f200204,
0x3fbe0204,
0x3fe30204,
@@ -75,252 +74,285 @@ static unsigned int spu_restore_code[] __attribute__((__aligned__(128))) = {
0x21a00083,
0x40800082,
0x21a00b02,
-0x10002818,
-0x42a00002,
-0x32800007,
-0x4207000c,
-0x18008208,
-0x40a0000b,
-0x4080020a,
-0x40800709,
-0x00200000,
-0x42070002,
-0x3ac30384,
+0x10002612,
+0x42a00003,
+0x42074006,
+0x1800c204,
+0x40a00008,
+0x40800789,
+0x1c010305,
+0x34000302,
0x1cffc489,
-0x00200000,
-0x18008383,
-0x38830382,
-0x4cffc486,
-0x3ac28185,
-0xb0408584,
-0x28830382,
-0x1c020387,
-0x38828182,
-0xb0408405,
-0x1802c408,
-0x28828182,
-0x217ff886,
-0x04000583,
-0x21a00803,
-0x3fbe0682,
-0x3fe30102,
-0x04000106,
-0x21a00886,
-0x04000603,
-0x21a00903,
-0x40803c02,
-0x21a00982,
-0x40800003,
-0x04000184,
-0x21a00a04,
+0x3ec00303,
+0x3ec00287,
+0xb0408403,
+0x24000302,
+0x34000282,
+0x1c020306,
+0xb0408207,
+0x18020204,
+0x24000282,
+0x217ffa09,
+0x04000402,
+0x21a00802,
+0x3fbe0504,
+0x3fe30204,
+0x21a00884,
+0x42074002,
+0x21a00902,
+0x40803c03,
+0x21a00983,
+0x04000485,
+0x21a00a05,
0x40802202,
0x21a00a82,
-0x42028005,
-0x34208702,
-0x21002282,
-0x21a00804,
-0x21a00886,
-0x3fbf0782,
+0x21a00805,
+0x21a00884,
+0x3fbf0582,
0x3f200102,
0x3fbe0102,
0x3fe30102,
0x21a00902,
0x40804003,
0x21a00983,
-0x21a00a04,
+0x21a00a05,
0x40805a02,
0x21a00a82,
0x40800083,
0x21a00b83,
0x01a00c02,
-0x01a00d83,
-0x3420c282,
+0x30809c03,
+0x34000182,
+0x14004102,
+0x21002082,
+0x01a00d82,
+0x3080a003,
+0x34000182,
0x21a00e02,
-0x34210283,
-0x21a00f03,
-0x34200284,
-0x77400200,
-0x3421c282,
+0x3080a203,
+0x34000182,
+0x21a00f02,
+0x3080a403,
+0x34000182,
+0x77400100,
+0x3080a603,
+0x34000182,
0x21a00702,
-0x34218283,
-0x21a00083,
-0x34214282,
+0x3080a803,
+0x34000182,
+0x21a00082,
+0x3080aa03,
+0x34000182,
0x21a00b02,
-0x4200480c,
-0x00200000,
-0x1c010286,
-0x34220284,
-0x34220302,
-0x0f608203,
-0x5c024204,
-0x3b81810b,
-0x42013c02,
-0x00200000,
-0x18008185,
-0x38808183,
-0x3b814182,
-0x21004e84,
+0x4020007f,
+0x3080ae02,
+0x42004805,
+0x3080ac04,
+0x34000103,
+0x34000202,
+0x1cffc183,
+0x3b810106,
+0x0f608184,
+0x42013802,
+0x5c020183,
+0x38810102,
+0x3b810102,
+0x21000e83,
0x4020007f,
0x35000100,
-0x000004e0,
-0x000002a0,
-0x000002e8,
-0x00000428,
+0x00000470,
+0x000002f8,
+0x00000430,
0x00000360,
-0x000002e8,
-0x000004a0,
-0x00000468,
+0x000002f8,
0x000003c8,
+0x000004a8,
+0x00000298,
0x00000360,
+0x00200000,
0x409ffe02,
0x30801203,
-0x40800204,
-0x3ec40085,
-0x10009c09,
-0x3ac10606,
-0xb060c105,
-0x4020007f,
-0x4020007f,
+0x40800208,
+0x3ec40084,
+0x40800407,
+0x3ac20289,
+0xb060c104,
+0x3ac1c284,
0x20801203,
-0x38810602,
-0xb0408586,
-0x28810602,
-0x32004180,
-0x34204702,
+0x38820282,
+0x41004003,
+0xb0408189,
+0x28820282,
+0x3881c282,
+0xb0408304,
+0x2881c282,
+0x00400000,
+0x40800003,
+0x35000000,
+0x30809e03,
+0x34000182,
0x21a00382,
0x4020007f,
-0x327fdc80,
+0x327fde00,
0x409ffe02,
0x30801203,
-0x40800204,
-0x3ec40087,
-0x40800405,
-0x00200000,
-0x40800606,
-0x3ac10608,
-0x3ac14609,
-0x3ac1860a,
-0xb060c107,
+0x40800206,
+0x3ec40084,
+0x40800407,
+0x40800608,
+0x3ac1828a,
+0x3ac20289,
+0xb060c104,
+0x3ac1c284,
0x20801203,
+0x38818282,
0x41004003,
-0x38810602,
-0x4020007f,
-0xb0408188,
-0x4020007f,
-0x28810602,
-0x41201002,
-0x38814603,
-0x10009c09,
-0xb060c109,
-0x4020007f,
-0x28814603,
+0xb040818a,
+0x10005b0b,
+0x41201003,
+0x28818282,
+0x3881c282,
+0xb0408184,
0x41193f83,
-0x38818602,
0x60ffc003,
-0xb040818a,
-0x28818602,
-0x32003080,
+0x2881c282,
+0x38820282,
+0xb0408189,
+0x28820282,
+0x327fef80,
0x409ffe02,
0x30801203,
-0x40800204,
-0x3ec40087,
-0x41201008,
-0x10009c14,
-0x40800405,
-0x3ac10609,
-0x40800606,
-0x3ac1460a,
-0xb060c107,
-0x3ac1860b,
+0x40800207,
+0x3ec40086,
+0x4120100b,
+0x10005b14,
+0x40800404,
+0x3ac1c289,
+0x40800608,
+0xb060c106,
+0x3ac10286,
+0x3ac2028a,
0x20801203,
-0x38810602,
-0xb0408409,
-0x28810602,
-0x38814603,
-0xb060c40a,
-0x4020007f,
-0x28814603,
+0x3881c282,
0x41193f83,
-0x38818602,
0x60ffc003,
-0xb040818b,
-0x28818602,
-0x32002380,
-0x409ffe02,
-0x30801204,
-0x40800205,
-0x3ec40083,
-0x40800406,
-0x3ac14607,
-0x3ac18608,
-0xb0810103,
-0x41004002,
-0x20801204,
-0x4020007f,
-0x38814603,
-0x10009c0b,
-0xb060c107,
-0x4020007f,
-0x4020007f,
-0x28814603,
-0x38818602,
-0x4020007f,
+0xb0408589,
+0x2881c282,
+0x38810282,
+0xb0408586,
+0x28810282,
+0x38820282,
+0xb040818a,
+0x28820282,
0x4020007f,
-0xb0408588,
-0x28818602,
+0x327fe280,
+0x409ffe02,
+0x30801203,
+0x40800207,
+0x3ec40084,
+0x40800408,
+0x10005b14,
+0x40800609,
+0x3ac1c28a,
+0x3ac2028b,
+0xb060c104,
+0x3ac24284,
+0x20801203,
+0x41201003,
+0x3881c282,
+0xb040830a,
+0x2881c282,
+0x38820282,
+0xb040818b,
+0x41193f83,
+0x60ffc003,
+0x28820282,
+0x38824282,
+0xb0408184,
+0x28824282,
0x4020007f,
-0x32001780,
+0x327fd580,
0x409ffe02,
-0x1000640e,
-0x40800204,
+0x1000658e,
+0x40800206,
0x30801203,
-0x40800405,
-0x3ec40087,
-0x40800606,
-0x3ac10608,
-0x3ac14609,
-0x3ac1860a,
-0xb060c107,
+0x40800407,
+0x3ec40084,
+0x40800608,
+0x3ac1828a,
+0x3ac20289,
+0xb060c104,
+0x3ac1c284,
0x20801203,
0x413d8003,
-0x38810602,
+0x38818282,
0x4020007f,
-0x327fd780,
-0x409ffe02,
-0x10007f0c,
-0x40800205,
-0x30801204,
-0x40800406,
-0x3ec40083,
-0x3ac14607,
-0x3ac18608,
-0xb0810103,
-0x413d8002,
-0x20801204,
-0x38814603,
+0x327fd800,
+0x409ffe03,
+0x30801202,
+0x40800207,
+0x3ec40084,
+0x10005b09,
+0x3ac1c288,
+0xb0408184,
0x4020007f,
-0x327feb80,
+0x4020007f,
+0x20801202,
+0x3881c282,
+0xb0408308,
+0x2881c282,
+0x327fc680,
0x409ffe02,
+0x1000588b,
+0x40800208,
0x30801203,
-0x40800204,
-0x3ec40087,
-0x40800405,
-0x1000650a,
-0x40800606,
-0x3ac10608,
-0x3ac14609,
-0x3ac1860a,
-0xb060c107,
+0x40800407,
+0x3ec40084,
+0x3ac20289,
+0xb060c104,
+0x3ac1c284,
0x20801203,
-0x38810602,
-0xb0408588,
-0x4020007f,
-0x327fc980,
-0x00400000,
-0x40800003,
-0x4020007f,
-0x35000000,
+0x413d8003,
+0x38820282,
+0x327fbd80,
+0x00200000,
+0x00000da0,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000d90,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000db0,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000dc0,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000d80,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000df0,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000de0,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000dd0,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000e04,
+0x00000000,
+0x00000000,
0x00000000,
+0x00000e00,
0x00000000,
0x00000000,
0x00000000,
diff --git a/arch/powerpc/platforms/cell/spufs/spu_save.c b/arch/powerpc/platforms/cell/spufs/spu_save.c
index 196033b8a579..ae95cc1701e9 100644
--- a/arch/powerpc/platforms/cell/spufs/spu_save.c
+++ b/arch/powerpc/platforms/cell/spufs/spu_save.c
@@ -44,7 +44,7 @@ static inline void save_event_mask(void)
* Read the SPU_RdEventMsk channel and save to the LSCSA.
*/
offset = LSCSA_QW_OFFSET(event_mask);
- regs_spill[offset].slot[0] = spu_readch(SPU_RdEventStatMask);
+ regs_spill[offset].slot[0] = spu_readch(SPU_RdEventMask);
}
static inline void save_tag_mask(void)
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 47617e8014a5..2bfdeb8ea8bd 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -26,6 +26,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
+#include <linux/cpumask.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
@@ -41,7 +42,8 @@ struct spu_gang;
/* ctx->sched_flags */
enum {
- SPU_SCHED_EXITING = 0,
+ SPU_SCHED_NOTIFY_ACTIVE,
+ SPU_SCHED_WAS_ACTIVE, /* was active upon spu_acquire_saved() */
};
struct spu_context {
@@ -80,14 +82,41 @@ struct spu_context {
struct list_head gang_list;
struct spu_gang *gang;
+ struct kref *prof_priv_kref;
+ void ( * prof_priv_release) (struct kref *kref);
+
+ /* owner thread */
+ pid_t tid;
/* scheduler fields */
- struct list_head rq;
- struct delayed_work sched_work;
+ struct list_head rq;
+ unsigned int time_slice;
unsigned long sched_flags;
- unsigned long rt_priority;
+ cpumask_t cpus_allowed;
int policy;
int prio;
+
+ /* statistics */
+ struct {
+ /* updates protected by ctx->state_mutex */
+ enum spu_utilization_state util_state;
+ unsigned long long tstamp; /* time of last state switch */
+ unsigned long long times[SPU_UTIL_MAX];
+ unsigned long long vol_ctx_switch;
+ unsigned long long invol_ctx_switch;
+ unsigned long long min_flt;
+ unsigned long long maj_flt;
+ unsigned long long hash_flt;
+ unsigned long long slb_flt;
+ unsigned long long slb_flt_base; /* # at last ctx switch */
+ unsigned long long class2_intr;
+ unsigned long long class2_intr_base; /* # at last ctx switch */
+ unsigned long long libassist;
+ } stats;
+
+ struct list_head aff_list;
+ int aff_head;
+ int aff_offset;
};
struct spu_gang {
@@ -95,8 +124,19 @@ struct spu_gang {
struct mutex mutex;
struct kref kref;
int contexts;
+
+ struct spu_context *aff_ref_ctx;
+ struct list_head aff_list_head;
+ struct mutex aff_mutex;
+ int aff_flags;
+ struct spu *aff_ref_spu;
+ atomic_t aff_sched_count;
};
+/* Flag bits for spu_gang aff_flags */
+#define AFF_OFFSETS_SET 1
+#define AFF_MERGED 2
+
struct mfc_dma_command {
int32_t pad; /* reserved */
uint32_t lsa; /* local storage address */
@@ -160,10 +200,9 @@ extern struct tree_descr spufs_dir_contents[];
extern struct tree_descr spufs_dir_nosched_contents[];
/* system call implementation */
-long spufs_run_spu(struct file *file,
- struct spu_context *ctx, u32 *npc, u32 *status);
-long spufs_create(struct nameidata *nd,
- unsigned int flags, mode_t mode);
+long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status);
+long spufs_create(struct nameidata *nd, unsigned int flags,
+ mode_t mode, struct file *filp);
extern const struct file_operations spufs_context_fops;
/* gang management */
@@ -176,7 +215,11 @@ void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
/* fault handling */
int spufs_handle_class1(struct spu_context *ctx);
+/* affinity */
+struct spu *affinity_check(struct spu_context *ctx);
+
/* context management */
+extern atomic_t nr_spu_contexts;
static inline void spu_acquire(struct spu_context *ctx)
{
mutex_lock(&ctx->state_mutex);
@@ -196,21 +239,23 @@ void spu_unmap_mappings(struct spu_context *ctx);
void spu_forget(struct spu_context *ctx);
int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags);
void spu_acquire_saved(struct spu_context *ctx);
+void spu_release_saved(struct spu_context *ctx);
int spu_activate(struct spu_context *ctx, unsigned long flags);
void spu_deactivate(struct spu_context *ctx);
void spu_yield(struct spu_context *ctx);
-void spu_start_tick(struct spu_context *ctx);
-void spu_stop_tick(struct spu_context *ctx);
-void spu_sched_tick(struct work_struct *work);
+void spu_switch_notify(struct spu *spu, struct spu_context *ctx);
+void spu_set_timeslice(struct spu_context *ctx);
+void spu_update_sched_info(struct spu_context *ctx);
+void __spu_update_sched_info(struct spu_context *ctx);
int __init spu_sched_init(void);
-void __exit spu_sched_exit(void);
+void spu_sched_exit(void);
extern char *isolated_loader;
/*
* spufs_wait
- * Same as wait_event_interruptible(), except that here
+ * Same as wait_event_interruptible(), except that here
* we need to call spu_release(ctx) before sleeping, and
* then spu_acquire(ctx) when awoken.
*/
@@ -256,4 +301,41 @@ struct spufs_coredump_reader {
extern struct spufs_coredump_reader spufs_coredump_read[];
extern int spufs_coredump_num_notes;
+/*
+ * This function is a little bit too large for an inline, but
+ * as fault.c is built into the kernel we can't move it out of
+ * line.
+ */
+static inline void spuctx_switch_state(struct spu_context *ctx,
+ enum spu_utilization_state new_state)
+{
+ unsigned long long curtime;
+ signed long long delta;
+ struct timespec ts;
+ struct spu *spu;
+ enum spu_utilization_state old_state;
+
+ ktime_get_ts(&ts);
+ curtime = timespec_to_ns(&ts);
+ delta = curtime - ctx->stats.tstamp;
+
+ WARN_ON(!mutex_is_locked(&ctx->state_mutex));
+ WARN_ON(delta < 0);
+
+ spu = ctx->spu;
+ old_state = ctx->stats.util_state;
+ ctx->stats.util_state = new_state;
+ ctx->stats.tstamp = curtime;
+
+ /*
+ * Update the physical SPU utilization statistics.
+ */
+ if (spu) {
+ ctx->stats.times[old_state] += delta;
+ spu->stats.times[old_state] += delta;
+ spu->stats.util_state = new_state;
+ spu->stats.tstamp = curtime;
+ }
+}
+
#endif
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 71a0b41adb8c..27ffdae98e5a 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -70,7 +70,7 @@
}
#endif /* debug */
-#define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
+#define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
static inline void acquire_spu_lock(struct spu *spu)
{
@@ -180,7 +180,7 @@ static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
case MFC_CNTL_SUSPEND_COMPLETE:
if (csa) {
csa->priv2.mfc_control_RW =
- in_be64(&priv2->mfc_control_RW) |
+ MFC_CNTL_SUSPEND_MASK |
MFC_CNTL_SUSPEND_DMA_QUEUE;
}
break;
@@ -190,9 +190,7 @@ static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
MFC_CNTL_SUSPEND_COMPLETE);
if (csa) {
- csa->priv2.mfc_control_RW =
- in_be64(&priv2->mfc_control_RW) &
- ~MFC_CNTL_SUSPEND_DMA_QUEUE;
+ csa->priv2.mfc_control_RW = 0;
}
break;
}
@@ -251,16 +249,8 @@ static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu)
* Read MFC_CNTL[Ds]. Update saved copy of
* CSA.MFC_CNTL[Ds].
*/
- if (in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING) {
- csa->priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
- csa->suspend_time = get_cycles();
- out_be64(&priv2->spu_chnlcntptr_RW, 7ULL);
- eieio();
- csa->spu_chnldata_RW[7] = in_be64(&priv2->spu_chnldata_RW);
- eieio();
- } else {
- csa->priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
- }
+ csa->priv2.mfc_control_RW |=
+ in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING;
}
static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
@@ -271,7 +261,8 @@ static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
* Write MFC_CNTL[Dh] set to a '1' to halt
* the decrementer.
*/
- out_be64(&priv2->mfc_control_RW, MFC_CNTL_DECREMENTER_HALTED);
+ out_be64(&priv2->mfc_control_RW,
+ MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK);
eieio();
}
@@ -387,6 +378,19 @@ static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
}
+static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Save the Prxy_TagStatus register in the CSA.
+ *
+ * It is unnecessary to restore dma_tagstatus_R, however,
+ * dma_tagstatus_R in the CSA is accessed via backing_ops, so
+ * we must save it.
+ */
+ csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R);
+}
+
static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
@@ -602,7 +606,7 @@ static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
- u64 idx, ch_indices[7] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
+ u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
int i;
/* Save, Step 42:
@@ -613,7 +617,7 @@ static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
/* Save the following CH: [0,3,4,24,25,27] */
- for (i = 0; i < 7; i++) {
+ for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
eieio();
@@ -970,13 +974,13 @@ static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
*/
}
-static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
+static inline void suspend_mfc_and_halt_decr(struct spu_state *csa,
+ struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 7:
- * Restore, Step 47.
- * Write MFC_Cntl[Dh,Sc]='1','1' to suspend
+ * Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend
* the queue and halt the decrementer.
*/
out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
@@ -1077,7 +1081,7 @@ static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
- u64 ch_indices[7] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
+ u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
u64 idx;
int i;
@@ -1089,7 +1093,7 @@ static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
out_be64(&priv2->spu_chnldata_RW, 0UL);
/* Reset the following CH: [0,3,4,24,25,27] */
- for (i = 0; i < 7; i++) {
+ for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
eieio();
@@ -1276,7 +1280,15 @@ static inline void setup_decr(struct spu_state *csa, struct spu *spu)
cycles_t resume_time = get_cycles();
cycles_t delta_time = resume_time - csa->suspend_time;
+ csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING;
+ if (csa->lscsa->decr.slot[0] < delta_time) {
+ csa->lscsa->decr_status.slot[0] |=
+ SPU_DECR_STATUS_WRAPPED;
+ }
+
csa->lscsa->decr.slot[0] -= delta_time;
+ } else {
+ csa->lscsa->decr_status.slot[0] = 0;
}
}
@@ -1385,6 +1397,18 @@ static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
}
+static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Restore, Step 47.
+ * Write MFC_Cntl[Sc,Sm]='1','0' to suspend
+ * the queue.
+ */
+ out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
+ eieio();
+}
+
static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 49:
@@ -1535,10 +1559,10 @@ static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
* "wrapped" flag is set, OR in a '1' to
* CSA.SPU_Event_Status[Tm].
*/
- if (csa->lscsa->decr_status.slot[0] == 1) {
+ if (csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED) {
csa->spu_chnldata_RW[0] |= 0x20;
}
- if ((csa->lscsa->decr_status.slot[0] == 1) &&
+ if ((csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED) &&
(csa->spu_chnlcnt_RW[0] == 0 &&
((csa->spu_chnldata_RW[2] & 0x20) == 0x0) &&
((csa->spu_chnldata_RW[0] & 0x20) != 0x1))) {
@@ -1549,18 +1573,13 @@ static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
- u64 idx, ch_indices[7] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
+ u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
int i;
/* Restore, Step 59:
+ * Restore the following CH: [0,3,4,24,25,27]
*/
-
- /* Restore CH 1 without count */
- out_be64(&priv2->spu_chnlcntptr_RW, 1);
- out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[1]);
-
- /* Restore the following CH: [0,3,4,24,25,27] */
- for (i = 0; i < 7; i++) {
+ for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
eieio();
@@ -1812,6 +1831,7 @@ static void save_csa(struct spu_state *prev, struct spu *spu)
save_mfc_queues(prev, spu); /* Step 19. */
save_ppu_querymask(prev, spu); /* Step 20. */
save_ppu_querytype(prev, spu); /* Step 21. */
+ save_ppu_tagstatus(prev, spu); /* NEW. */
save_mfc_csr_tsq(prev, spu); /* Step 22. */
save_mfc_csr_cmd(prev, spu); /* Step 23. */
save_mfc_csr_ato(prev, spu); /* Step 24. */
@@ -1918,7 +1938,7 @@ static void harvest(struct spu_state *prev, struct spu *spu)
set_switch_pending(prev, spu); /* Step 5. */
stop_spu_isolate(spu); /* NEW. */
remove_other_spu_access(prev, spu); /* Step 6. */
- suspend_mfc(prev, spu); /* Step 7. */
+ suspend_mfc_and_halt_decr(prev, spu); /* Step 7. */
wait_suspend_mfc_complete(prev, spu); /* Step 8. */
if (!suspend_spe(prev, spu)) /* Step 9. */
clear_spu_status(prev, spu); /* Step 10. */
@@ -1930,7 +1950,7 @@ static void harvest(struct spu_state *prev, struct spu *spu)
reset_spu_privcntl(prev, spu); /* Step 16. */
reset_spu_lslr(prev, spu); /* Step 17. */
setup_mfc_sr1(prev, spu); /* Step 18. */
- spu_invalidate_slbs(spu); /* Step 19. */
+ spu_invalidate_slbs(spu); /* Step 19. */
reset_ch_part1(prev, spu); /* Step 20. */
reset_ch_part2(prev, spu); /* Step 21. */
enable_interrupts(prev, spu); /* Step 22. */
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index 8e37bdf4dfda..43f0fb88abbc 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -47,7 +47,7 @@ static long do_spu_run(struct file *filp,
goto out;
i = SPUFS_I(filp->f_path.dentry->d_inode);
- ret = spufs_run_spu(filp, i->i_ctx, &npc, &status);
+ ret = spufs_run_spu(i->i_ctx, &npc, &status);
if (put_user(npc, unpc))
ret = -EFAULT;
@@ -76,8 +76,8 @@ asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
}
#endif
-asmlinkage long sys_spu_create(const char __user *pathname,
- unsigned int flags, mode_t mode)
+asmlinkage long do_spu_create(const char __user *pathname, unsigned int flags,
+ mode_t mode, struct file *neighbor)
{
char *tmp;
int ret;
@@ -90,7 +90,7 @@ asmlinkage long sys_spu_create(const char __user *pathname,
ret = path_lookup(tmp, LOOKUP_PARENT|
LOOKUP_OPEN|LOOKUP_CREATE, &nd);
if (!ret) {
- ret = spufs_create(&nd, flags, mode);
+ ret = spufs_create(&nd, flags, mode, neighbor);
path_release(&nd);
}
putname(tmp);
@@ -99,8 +99,32 @@ asmlinkage long sys_spu_create(const char __user *pathname,
return ret;
}
+#ifndef MODULE
+asmlinkage long sys_spu_create(const char __user *pathname, unsigned int flags,
+ mode_t mode, int neighbor_fd)
+{
+ int fput_needed;
+ struct file *neighbor;
+ long ret;
+
+ if (flags & SPU_CREATE_AFFINITY_SPU) {
+ ret = -EBADF;
+ neighbor = fget_light(neighbor_fd, &fput_needed);
+ if (neighbor) {
+ ret = do_spu_create(pathname, flags, mode, neighbor);
+ fput_light(neighbor, fput_needed);
+ }
+ }
+ else {
+ ret = do_spu_create(pathname, flags, mode, NULL);
+ }
+
+ return ret;
+}
+#endif
+
struct spufs_calls spufs_calls = {
- .create_thread = sys_spu_create,
+ .create_thread = do_spu_create,
.spu_run = do_spu_run,
.owner = THIS_MODULE,
};