summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorDamien Le Moal <damien.lemoal@wdc.com>2018-10-17 18:05:07 +0900
committerMike Snitzer <snitzer@redhat.com>2018-10-18 15:15:23 -0400
commit33c2865f8d011a2ca9f67124ddab9dc89382e9f1 (patch)
tree5680e9ef136dd26d8d313380bbd87f758128d349 /drivers/md
parentd857ad75edf3c0066fcd920746f9dc75382b3324 (diff)
dm zoned: fix metadata block ref counting
Since the ref field of struct dmz_mblock is always used with the spinlock of struct dmz_metadata locked, there is no need to use an atomic_t type. Change the type of the ref field to an unsigne integer. Fixes: 3b1a94c88b79 ("dm zoned: drive-managed zoned block device target") Cc: stable@vger.kernel.org Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-zoned-metadata.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 969954915566..67b71f6e3bda 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -99,7 +99,7 @@ struct dmz_mblock {
struct rb_node node;
struct list_head link;
sector_t no;
- atomic_t ref;
+ unsigned int ref;
unsigned long state;
struct page *page;
void *data;
@@ -296,7 +296,7 @@ static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
RB_CLEAR_NODE(&mblk->node);
INIT_LIST_HEAD(&mblk->link);
- atomic_set(&mblk->ref, 0);
+ mblk->ref = 0;
mblk->state = 0;
mblk->no = mblk_no;
mblk->data = page_address(mblk->page);
@@ -397,7 +397,7 @@ static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd,
return NULL;
spin_lock(&zmd->mblk_lock);
- atomic_inc(&mblk->ref);
+ mblk->ref++;
set_bit(DMZ_META_READING, &mblk->state);
dmz_insert_mblock(zmd, mblk);
spin_unlock(&zmd->mblk_lock);
@@ -484,7 +484,8 @@ static void dmz_release_mblock(struct dmz_metadata *zmd,
spin_lock(&zmd->mblk_lock);
- if (atomic_dec_and_test(&mblk->ref)) {
+ mblk->ref--;
+ if (mblk->ref == 0) {
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
rb_erase(&mblk->node, &zmd->mblk_rbtree);
dmz_free_mblock(zmd, mblk);
@@ -511,7 +512,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
mblk = dmz_lookup_mblock(zmd, mblk_no);
if (mblk) {
/* Cache hit: remove block from LRU list */
- if (atomic_inc_return(&mblk->ref) == 1 &&
+ mblk->ref++;
+ if (mblk->ref == 1 &&
!test_bit(DMZ_META_DIRTY, &mblk->state))
list_del_init(&mblk->link);
}
@@ -753,7 +755,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
spin_lock(&zmd->mblk_lock);
clear_bit(DMZ_META_DIRTY, &mblk->state);
- if (atomic_read(&mblk->ref) == 0)
+ if (mblk->ref == 0)
list_add_tail(&mblk->link, &zmd->mblk_lru_list);
spin_unlock(&zmd->mblk_lock);
}
@@ -2308,7 +2310,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
mblk = list_first_entry(&zmd->mblk_dirty_list,
struct dmz_mblock, link);
dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
- (u64)mblk->no, atomic_read(&mblk->ref));
+ (u64)mblk->no, mblk->ref);
list_del_init(&mblk->link);
rb_erase(&mblk->node, &zmd->mblk_rbtree);
dmz_free_mblock(zmd, mblk);
@@ -2326,8 +2328,8 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
root = &zmd->mblk_rbtree;
rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
- (u64)mblk->no, atomic_read(&mblk->ref));
- atomic_set(&mblk->ref, 0);
+ (u64)mblk->no, mblk->ref);
+ mblk->ref = 0;
dmz_free_mblock(zmd, mblk);
}