summaryrefslogtreecommitdiff
path: root/drivers/video/tegra/host/nvhost_syncpt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/video/tegra/host/nvhost_syncpt.c')
-rw-r--r--drivers/video/tegra/host/nvhost_syncpt.c161
1 files changed, 113 insertions, 48 deletions
diff --git a/drivers/video/tegra/host/nvhost_syncpt.c b/drivers/video/tegra/host/nvhost_syncpt.c
index 13ad0fc3a3cf..9fa7d0652c1f 100644
--- a/drivers/video/tegra/host/nvhost_syncpt.c
+++ b/drivers/video/tegra/host/nvhost_syncpt.c
@@ -21,10 +21,12 @@
#include <linux/nvhost_ioctl.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <trace/events/nvhost.h>
#include "nvhost_syncpt.h"
+#include "nvhost_acm.h"
#include "dev.h"
+#include "chip_support.h"
-#define MAX_STUCK_CHECK_COUNT 15
#define MAX_SYNCPT_LENGTH 5
/* Name of sysfs node for min and max value */
static const char *min_name = "min";
@@ -36,12 +38,12 @@ static const char *max_name = "max";
void nvhost_syncpt_reset(struct nvhost_syncpt *sp)
{
u32 i;
- BUG_ON(!(syncpt_op(sp).reset && syncpt_op(sp).reset_wait_base));
+ BUG_ON(!(syncpt_op().reset && syncpt_op().reset_wait_base));
- for (i = 0; i < sp->nb_pts; i++)
- syncpt_op(sp).reset(sp, i);
- for (i = 0; i < sp->nb_bases; i++)
- syncpt_op(sp).reset_wait_base(sp, i);
+ for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++)
+ syncpt_op().reset(sp, i);
+ for (i = 0; i < nvhost_syncpt_nb_bases(sp); i++)
+ syncpt_op().reset_wait_base(sp, i);
wmb();
}
@@ -51,17 +53,17 @@ void nvhost_syncpt_reset(struct nvhost_syncpt *sp)
void nvhost_syncpt_save(struct nvhost_syncpt *sp)
{
u32 i;
- BUG_ON(!(syncpt_op(sp).update_min && syncpt_op(sp).read_wait_base));
+ BUG_ON(!(syncpt_op().update_min && syncpt_op().read_wait_base));
- for (i = 0; i < sp->nb_pts; i++) {
- if (client_managed(i))
- syncpt_op(sp).update_min(sp, i);
+ for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) {
+ if (nvhost_syncpt_client_managed(sp, i))
+ syncpt_op().update_min(sp, i);
else
BUG_ON(!nvhost_syncpt_min_eq_max(sp, i));
}
- for (i = 0; i < sp->nb_bases; i++)
- syncpt_op(sp).read_wait_base(sp, i);
+ for (i = 0; i < nvhost_syncpt_nb_bases(sp); i++)
+ syncpt_op().read_wait_base(sp, i);
}
/**
@@ -69,9 +71,14 @@ void nvhost_syncpt_save(struct nvhost_syncpt *sp)
*/
u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
{
- BUG_ON(!syncpt_op(sp).update_min);
+ u32 val;
+
+ BUG_ON(!syncpt_op().update_min);
+
+ val = syncpt_op().update_min(sp, id);
+ trace_nvhost_syncpt_update_min(id, val);
- return syncpt_op(sp).update_min(sp, id);
+ return val;
}
/**
@@ -80,9 +87,9 @@ u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
{
u32 val;
- BUG_ON(!syncpt_op(sp).update_min);
+ BUG_ON(!syncpt_op().update_min);
nvhost_module_busy(syncpt_to_dev(sp)->dev);
- val = syncpt_op(sp).update_min(sp, id);
+ val = syncpt_op().update_min(sp, id);
nvhost_module_idle(syncpt_to_dev(sp)->dev);
return val;
}
@@ -93,9 +100,9 @@ u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id)
{
u32 val;
- BUG_ON(!syncpt_op(sp).read_wait_base);
+ BUG_ON(!syncpt_op().read_wait_base);
nvhost_module_busy(syncpt_to_dev(sp)->dev);
- syncpt_op(sp).read_wait_base(sp, id);
+ syncpt_op().read_wait_base(sp, id);
val = sp->base_val[id];
nvhost_module_idle(syncpt_to_dev(sp)->dev);
return val;
@@ -107,8 +114,8 @@ u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id)
*/
void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
{
- BUG_ON(!syncpt_op(sp).cpu_incr);
- syncpt_op(sp).cpu_incr(sp, id);
+ BUG_ON(!syncpt_op().cpu_incr);
+ syncpt_op().cpu_incr(sp, id);
}
/**
@@ -116,7 +123,7 @@ void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
*/
void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
{
- if (client_managed(id))
+ if (nvhost_syncpt_client_managed(sp, id))
nvhost_syncpt_incr_max(sp, id, 1);
nvhost_module_busy(syncpt_to_dev(sp)->dev);
nvhost_syncpt_cpu_incr(sp, id);
@@ -124,6 +131,19 @@ void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
}
/**
+ * Updated sync point form hardware, and returns true if syncpoint is expired,
+ * false if we may need to wait
+ */
+static bool syncpt_update_min_is_expired(
+ struct nvhost_syncpt *sp,
+ u32 id,
+ u32 thresh)
+{
+ syncpt_op().update_min(sp, id);
+ return nvhost_syncpt_is_expired(sp, id, thresh);
+}
+
+/**
* Main entrypoint for syncpoint value waits.
*/
int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
@@ -149,7 +169,7 @@ int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
nvhost_module_busy(syncpt_to_dev(sp)->dev);
/* try to read from register */
- val = syncpt_op(sp).update_min(sp, id);
+ val = syncpt_op().update_min(sp, id);
if (nvhost_syncpt_is_expired(sp, id, thresh)) {
if (value)
*value = val;
@@ -184,9 +204,9 @@ int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
while (timeout) {
u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
int remain = wait_event_interruptible_timeout(wq,
- nvhost_syncpt_is_expired(sp, id, thresh),
+ syncpt_update_min_is_expired(sp, id, thresh),
check);
- if (remain > 0) {
+ if (remain > 0 || nvhost_syncpt_is_expired(sp, id, thresh)) {
if (value)
*value = nvhost_syncpt_read_min(sp, id);
err = 0;
@@ -198,20 +218,19 @@ int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
}
if (timeout != NVHOST_NO_TIMEOUT)
timeout -= check;
- if (timeout) {
+ if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
dev_warn(&syncpt_to_dev(sp)->dev->dev,
"%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n",
- current->comm, id, syncpt_op(sp).name(sp, id),
+ current->comm, id, syncpt_op().name(sp, id),
thresh, timeout);
- syncpt_op(sp).debug(sp);
- if (check_count > MAX_STUCK_CHECK_COUNT) {
+ syncpt_op().debug(sp);
+ if (check_count == MAX_STUCK_CHECK_COUNT) {
if (low_timeout) {
dev_warn(&syncpt_to_dev(sp)->dev->dev,
"is timeout %d too low?\n",
low_timeout);
}
nvhost_debug_dump(syncpt_to_dev(sp));
- BUG();
}
check_count++;
}
@@ -279,7 +298,7 @@ bool nvhost_syncpt_is_expired(
* If future valueis zero, we have a client managed sync point. In that
* case we do a direct comparison.
*/
- if (!client_managed(id))
+ if (!nvhost_syncpt_client_managed(sp, id))
return future_val - thresh >= current_val - thresh;
else
return (s32)(current_val - thresh) >= 0;
@@ -287,7 +306,7 @@ bool nvhost_syncpt_is_expired(
void nvhost_syncpt_debug(struct nvhost_syncpt *sp)
{
- syncpt_op(sp).debug(sp);
+ syncpt_op().debug(sp);
}
int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx)
@@ -296,7 +315,7 @@ int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx)
u32 reg;
nvhost_module_busy(host->dev);
- reg = syncpt_op(sp).mutex_try_lock(sp, idx);
+ reg = syncpt_op().mutex_try_lock(sp, idx);
if (reg) {
nvhost_module_idle(host->dev);
return -EBUSY;
@@ -307,20 +326,15 @@ int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx)
void nvhost_mutex_unlock(struct nvhost_syncpt *sp, int idx)
{
- syncpt_op(sp).mutex_unlock(sp, idx);
+ syncpt_op().mutex_unlock(sp, idx);
nvhost_module_idle(syncpt_to_dev(sp)->dev);
atomic_dec(&sp->lock_counts[idx]);
}
-/* check for old WAITs to be removed (avoiding a wrap) */
-int nvhost_syncpt_wait_check(struct nvhost_syncpt *sp,
- struct nvmap_client *nvmap,
- u32 waitchk_mask,
- struct nvhost_waitchk *wait,
- int num_waitchk)
+/* remove a wait pointed to by patch_addr */
+int nvhost_syncpt_patch_wait(struct nvhost_syncpt *sp, void *patch_addr)
{
- return syncpt_op(sp).wait_check(sp, nvmap,
- waitchk_mask, wait, num_waitchk);
+ return syncpt_op().patch_wait(sp, patch_addr);
}
/* Displays the current value of the sync point via sysfs */
@@ -354,10 +368,15 @@ int nvhost_syncpt_init(struct nvhost_device *dev,
int err = 0;
/* Allocate structs for min, max and base values */
- sp->min_val = kzalloc(sizeof(atomic_t) * sp->nb_pts, GFP_KERNEL);
- sp->max_val = kzalloc(sizeof(atomic_t) * sp->nb_pts, GFP_KERNEL);
- sp->base_val = kzalloc(sizeof(u32) * sp->nb_bases, GFP_KERNEL);
- sp->lock_counts = kzalloc(sizeof(atomic_t) * sp->nb_mlocks, GFP_KERNEL);
+ sp->min_val = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_pts(sp),
+ GFP_KERNEL);
+ sp->max_val = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_pts(sp),
+ GFP_KERNEL);
+ sp->base_val = kzalloc(sizeof(u32) * nvhost_syncpt_nb_bases(sp),
+ GFP_KERNEL);
+ sp->lock_counts =
+ kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_mlocks(sp),
+ GFP_KERNEL);
if (!(sp->min_val && sp->max_val && sp->base_val && sp->lock_counts)) {
/* frees happen in the deinit */
@@ -372,15 +391,15 @@ int nvhost_syncpt_init(struct nvhost_device *dev,
}
/* Allocate two attributes for each sync point: min and max */
- sp->syncpt_attrs = kzalloc(sizeof(*sp->syncpt_attrs) * sp->nb_pts * 2,
- GFP_KERNEL);
+ sp->syncpt_attrs = kzalloc(sizeof(*sp->syncpt_attrs)
+ * nvhost_syncpt_nb_pts(sp) * 2, GFP_KERNEL);
if (!sp->syncpt_attrs) {
err = -ENOMEM;
goto fail;
}
/* Fill in the attributes */
- for (i = 0; i < sp->nb_pts; i++) {
+ for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) {
char name[MAX_SYNCPT_LENGTH];
struct kobject *kobj;
struct nvhost_syncpt_attr *min = &sp->syncpt_attrs[i*2];
@@ -441,3 +460,49 @@ void nvhost_syncpt_deinit(struct nvhost_syncpt *sp)
kfree(sp->syncpt_attrs);
sp->syncpt_attrs = NULL;
}
+
+int nvhost_syncpt_client_managed(struct nvhost_syncpt *sp, u32 id)
+{
+ return BIT(id) & syncpt_to_dev(sp)->info.client_managed;
+}
+
+int nvhost_syncpt_nb_pts(struct nvhost_syncpt *sp)
+{
+ return syncpt_to_dev(sp)->info.nb_pts;
+}
+
+int nvhost_syncpt_nb_bases(struct nvhost_syncpt *sp)
+{
+ return syncpt_to_dev(sp)->info.nb_bases;
+}
+
+int nvhost_syncpt_nb_mlocks(struct nvhost_syncpt *sp)
+{
+ return syncpt_to_dev(sp)->info.nb_mlocks;
+}
+
+/* public sync point API */
+u32 nvhost_syncpt_incr_max_ext(struct nvhost_device *dev, u32 id, u32 incrs)
+{
+ struct nvhost_syncpt *sp = &(nvhost_get_host(dev)->syncpt);
+ return nvhost_syncpt_incr_max(sp, id, incrs);
+}
+
+void nvhost_syncpt_cpu_incr_ext(struct nvhost_device *dev, u32 id)
+{
+ struct nvhost_syncpt *sp = &(nvhost_get_host(dev)->syncpt);
+ nvhost_syncpt_cpu_incr(sp, id);
+}
+
+u32 nvhost_syncpt_read_ext(struct nvhost_device *dev, u32 id)
+{
+ struct nvhost_syncpt *sp = &(nvhost_get_host(dev)->syncpt);
+ return nvhost_syncpt_read(sp, id);
+}
+
+int nvhost_syncpt_wait_timeout_ext(struct nvhost_device *dev, u32 id, u32 thresh,
+ u32 timeout, u32 *value)
+{
+ struct nvhost_syncpt *sp = &(nvhost_get_host(dev)->syncpt);
+ return nvhost_syncpt_wait_timeout(sp, id, thresh, timeout, value);
+}