summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAlex Van Brunt <avanbrunt@nvidia.com>2014-05-16 12:37:06 -0700
committerMandar Padmawar <mpadmawar@nvidia.com>2014-05-23 01:57:53 -0700
commit1937ef6f9ce114ad8a9cac6a06e3dce8a1affae2 (patch)
tree32e8ca16ec87c1b5148ca53bd55e233a18c19ab6 /kernel
parent82f35296c448f4701e4cfce7b37114a9915ea2a2 (diff)
mutex: save power with better cpu_relax
Use cpu_relaxed_read and cpu_read_relax to allow more architectures to be able to "relax" instead of busy spinning. Bug 1440421 Change-Id: I48e36d7b3c953fe43ebb23ea814de7738c91e394 Signed-off-by: Alex Van Brunt <avanbrunt@nvidia.com> Reviewed-on: http://git-master/r/412712 Reviewed-by: Sumit Singh <sumsingh@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'kernel')
-rw-r--r--kernel/mutex.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c
index ad53a664f113..ee2254231661 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -137,8 +137,8 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
ACCESS_ONCE(prev->next) = node;
smp_wmb();
/* Wait until the lock holder passes the lock down */
- while (!ACCESS_ONCE(node->locked))
- arch_mutex_cpu_relax();
+ while (!cpu_relaxed_read(&(node->locked)))
+ cpu_read_relax();
}
static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
@@ -152,8 +152,8 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
if (cmpxchg(lock, node, NULL) == node)
return;
/* Wait until the next pointer is set */
- while (!(next = ACCESS_ONCE(node->next)))
- arch_mutex_cpu_relax();
+ while (!(next = cpu_relaxed_read_long(&(node->next))))
+ cpu_read_relax();
}
ACCESS_ONCE(next->locked) = 1;
smp_wmb();