summaryrefslogtreecommitdiff
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2018-06-18 12:52:50 +1000
committerDavid S. Miller <davem@davemloft.net>2018-06-22 13:43:28 +0900
commitc0690016a73fe6bd456887bbbe6e10c7f0096554 (patch)
tree7b835ab9e83decb0f9be1f616daa5374dd47559d /lib/rhashtable.c
parent0ad66449aa3cbaedbdeaf55bffce74084bb7e9f9 (diff)
rhashtable: clean up dereference of ->future_tbl.
Using rht_dereference_bucket() to dereference ->future_tbl looks like a type error, and could be confusing. Using rht_dereference_rcu() to test a pointer for NULL adds an unnecessary barrier - rcu_access_pointer() is preferred for NULL tests when no lock is held. This uses 3 different ways to access ->future_tbl. - if we know the mutex is held, use rht_dereference() - if we don't hold the mutex, and are only testing for NULL, use rcu_access_pointer() - otherwise (using RCU protection for true dereference), use rht_dereference_rcu(). Note that this includes a simplification of the call to rhashtable_last_table() - we don't do an extra dereference before the call any more. Acked-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 52ec83212856..0e04947b7e0c 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -226,8 +226,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
- struct bucket_table *new_tbl = rhashtable_last_table(ht,
- rht_dereference_rcu(old_tbl->future_tbl, ht));
+ struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
int err = -EAGAIN;
struct rhash_head *head, *next, *entry;
@@ -467,7 +466,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
fail:
/* Do not fail the insert if someone else did a rehash. */
- if (likely(rcu_dereference_raw(tbl->future_tbl)))
+ if (likely(rcu_access_pointer(tbl->future_tbl)))
return 0;
/* Schedule async rehash to retry allocation in process context. */
@@ -540,7 +539,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
return ERR_CAST(data);
- new_tbl = rcu_dereference(tbl->future_tbl);
+ new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (new_tbl)
return new_tbl;
@@ -599,7 +598,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
break;
spin_unlock_bh(lock);
- tbl = rcu_dereference(tbl->future_tbl);
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
}
data = rhashtable_lookup_one(ht, tbl, hash, key, obj);