summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/compiler.h17
-rw-r--r--ipc/sem.c14
-rw-r--r--kernel/locking/qspinlock.c2
3 files changed, 15 insertions, 18 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 2bcaedc0f032..59a7004fc7dd 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -305,6 +305,17 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
})
/**
+ * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
+ *
+ * A control dependency provides a LOAD->STORE order, the additional RMB
+ * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
+ * aka. (load)-ACQUIRE.
+ *
+ * Architectures that do not do load speculation can have this be barrier().
+ */
+#define smp_acquire__after_ctrl_dep() smp_rmb()
+
+/**
* smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
* @ptr: pointer to the variable to wait on
* @cond: boolean expression to wait for
@@ -314,10 +325,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
*
* Due to C lacking lambda expressions we load the value of *ptr into a
* pre-named variable @VAL to be used in @cond.
- *
- * The control dependency provides a LOAD->STORE order, the additional RMB
- * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
- * aka. ACQUIRE.
*/
#ifndef smp_cond_load_acquire
#define smp_cond_load_acquire(ptr, cond_expr) ({ \
@@ -329,7 +336,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
break; \
cpu_relax(); \
} \
- smp_rmb(); /* ctrl + rmb := acquire */ \
+ smp_acquire__after_ctrl_dep(); \
VAL; \
})
#endif
diff --git a/ipc/sem.c b/ipc/sem.c
index b3757ea0694b..84dff3df11a4 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -260,16 +260,6 @@ static void sem_rcu_free(struct rcu_head *head)
}
/*
- * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
- * are only control barriers.
- * The code must pair with spin_unlock(&sem->lock) or
- * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
- *
- * smp_rmb() is sufficient, as writes cannot pass the control barrier.
- */
-#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
-
-/*
* Wait until all currently ongoing simple ops have completed.
* Caller must own sem_perm.lock.
* New simple ops cannot start, because simple ops first check
@@ -292,7 +282,7 @@ static void sem_wait_array(struct sem_array *sma)
sem = sma->sem_base + i;
spin_unlock_wait(&sem->lock);
}
- ipc_smp_acquire__after_spin_is_unlocked();
+ smp_acquire__after_ctrl_dep();
}
/*
@@ -350,7 +340,7 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
* complex_count++;
* spin_unlock(sem_perm.lock);
*/
- ipc_smp_acquire__after_spin_is_unlocked();
+ smp_acquire__after_ctrl_dep();
/*
* Now repeat the test of complex_count:
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 1b8dda90ebfa..730655533440 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -379,7 +379,7 @@ void queued_spin_unlock_wait(struct qspinlock *lock)
cpu_relax();
done:
- smp_rmb(); /* CTRL + RMB -> ACQUIRE */
+ smp_acquire__after_ctrl_dep();
}
EXPORT_SYMBOL(queued_spin_unlock_wait);
#endif