summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_iget.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2010-12-16 17:08:41 +1100
committerDave Chinner <david@fromorbit.com>2010-12-16 17:08:41 +1100
commit1a427ab0c1b205d1bda8da0b77ea9d295ac23c57 (patch)
treedc0fffd22282d0da29da43b3ebdeed7c3f5fac1d /fs/xfs/xfs_iget.c
parent1a3e8f3da09c7082d25b512a0ffe569391e4c09a (diff)
xfs: convert pag_ici_lock to a spin lock
now that we are using RCU protection for the inode cache lookups, the lock is only needed on the modification side. Hence it is not necessary for the lock to be a rwlock as there are no read side holders anymore. Convert it to a spin lock to reflect it's exclusive nature. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Alex Elder <aelder@sgi.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/xfs/xfs_iget.c')
-rw-r--r--fs/xfs/xfs_iget.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 04ed09b907b8..3ecad00e8409 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -260,7 +260,7 @@ xfs_iget_cache_hit(
goto out_error;
}
- write_lock(&pag->pag_ici_lock);
+ spin_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
ip->i_flags |= XFS_INEW;
@@ -273,7 +273,7 @@ xfs_iget_cache_hit(
&xfs_iolock_active, "xfs_iolock_active");
spin_unlock(&ip->i_flags_lock);
- write_unlock(&pag->pag_ici_lock);
+ spin_unlock(&pag->pag_ici_lock);
} else {
/* If the VFS inode is being torn down, pause and try again. */
if (!igrab(inode)) {
@@ -351,7 +351,7 @@ xfs_iget_cache_miss(
BUG();
}
- write_lock(&pag->pag_ici_lock);
+ spin_lock(&pag->pag_ici_lock);
/* insert the new inode */
error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
@@ -366,14 +366,14 @@ xfs_iget_cache_miss(
ip->i_udquot = ip->i_gdquot = NULL;
xfs_iflags_set(ip, XFS_INEW);
- write_unlock(&pag->pag_ici_lock);
+ spin_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
*ipp = ip;
return 0;
out_preload_end:
- write_unlock(&pag->pag_ici_lock);
+ spin_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
if (lock_flags)
xfs_iunlock(ip, lock_flags);