summaryrefslogtreecommitdiff
path: root/include/linux/percpu-refcount.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/percpu-refcount.h')
-rw-r--r--include/linux/percpu-refcount.h59
1 files changed, 46 insertions, 13 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index d5c89e0dd0e6..b4337646388b 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -128,12 +128,16 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
static inline bool __ref_is_percpu(struct percpu_ref *ref,
unsigned long __percpu **percpu_countp)
{
- unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
-
/* paired with smp_store_release() in percpu_ref_reinit() */
- smp_read_barrier_depends();
+ unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
- if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC))
+ /*
+ * Theoretically, the following could test just ATOMIC; however,
+ * then we'd have to mask off DEAD separately as DEAD may be
+ * visible without ATOMIC if we race with percpu_ref_kill(). DEAD
+ * implies ATOMIC anyway. Test them together.
+ */
+ if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
return false;
*percpu_countp = (unsigned long __percpu *)percpu_ptr;
@@ -141,28 +145,42 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
}
/**
- * percpu_ref_get - increment a percpu refcount
+ * percpu_ref_get_many - increment a percpu refcount
* @ref: percpu_ref to get
+ * @nr: number of references to get
*
- * Analagous to atomic_long_inc().
+ * Analogous to atomic_long_add().
*
* This function is safe to call as long as @ref is between init and exit.
*/
-static inline void percpu_ref_get(struct percpu_ref *ref)
+static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
{
unsigned long __percpu *percpu_count;
rcu_read_lock_sched();
if (__ref_is_percpu(ref, &percpu_count))
- this_cpu_inc(*percpu_count);
+ this_cpu_add(*percpu_count, nr);
else
- atomic_long_inc(&ref->count);
+ atomic_long_add(nr, &ref->count);
rcu_read_unlock_sched();
}
/**
+ * percpu_ref_get - increment a percpu refcount
+ * @ref: percpu_ref to get
+ *
+ * Analagous to atomic_long_inc().
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline void percpu_ref_get(struct percpu_ref *ref)
+{
+ percpu_ref_get_many(ref, 1);
+}
+
+/**
* percpu_ref_tryget - try to increment a percpu refcount
* @ref: percpu_ref to try-get
*
@@ -225,29 +243,44 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
}
/**
- * percpu_ref_put - decrement a percpu refcount
+ * percpu_ref_put_many - decrement a percpu refcount
* @ref: percpu_ref to put
+ * @nr: number of references to put
*
* Decrement the refcount, and if 0, call the release function (which was passed
* to percpu_ref_init())
*
* This function is safe to call as long as @ref is between init and exit.
*/
-static inline void percpu_ref_put(struct percpu_ref *ref)
+static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
{
unsigned long __percpu *percpu_count;
rcu_read_lock_sched();
if (__ref_is_percpu(ref, &percpu_count))
- this_cpu_dec(*percpu_count);
- else if (unlikely(atomic_long_dec_and_test(&ref->count)))
+ this_cpu_sub(*percpu_count, nr);
+ else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
ref->release(ref);
rcu_read_unlock_sched();
}
/**
+ * percpu_ref_put - decrement a percpu refcount
+ * @ref: percpu_ref to put
+ *
+ * Decrement the refcount, and if 0, call the release function (which was passed
+ * to percpu_ref_init())
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline void percpu_ref_put(struct percpu_ref *ref)
+{
+ percpu_ref_put_many(ref, 1);
+}
+
+/**
* percpu_ref_is_zero - test whether a percpu refcount reached zero
* @ref: percpu_ref to test
*