summaryrefslogtreecommitdiff
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-02-04 09:24:25 -0500
committerChris Mason <chris.mason@oracle.com>2009-02-04 09:24:25 -0500
commitc487685d7c18a8481900755aa5c56a7a74193101 (patch)
treef2ff6c059e24d7fdf2e1fff498bbef0bb90bfbfc /fs/btrfs
parent3935127c50c84106d654ef14962cff28c660bc62 (diff)
Btrfs: hash_lock is no longer needed
Before metadata is written to disk, it is updated to reflect that writeout has begun. Once this update is done, the block must be cow'd before it can be modified again. This update was originally synchronized by using a per-fs spinlock. Today the buffers for the metadata blocks are locked before writeout begins, and everyone that tests the flag has the buffer locked as well. So, the per-fs spinlock (called hash_lock for no good reason) is no longer required. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/ctree.c7
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/disk-io.c4
3 files changed, 1 insertions, 11 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 2603ee539b7a..3b6e35aafc9e 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -388,16 +388,14 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
WARN_ON(1);
}
- spin_lock(&root->fs_info->hash_lock);
if (btrfs_header_generation(buf) == trans->transid &&
btrfs_header_owner(buf) == root->root_key.objectid &&
!btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
*cow_ret = buf;
- spin_unlock(&root->fs_info->hash_lock);
WARN_ON(prealloc_dest);
return 0;
}
- spin_unlock(&root->fs_info->hash_lock);
+
search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
ret = __btrfs_cow_block(trans, root, buf, parent,
parent_slot, cow_ret, search_start, 0,
@@ -1376,14 +1374,11 @@ again:
int wret;
/* is a cow on this block not required */
- spin_lock(&root->fs_info->hash_lock);
if (btrfs_header_generation(b) == trans->transid &&
btrfs_header_owner(b) == root->root_key.objectid &&
!btrfs_header_flag(b, BTRFS_HEADER_FLAG_WRITTEN)) {
- spin_unlock(&root->fs_info->hash_lock);
goto cow_done;
}
- spin_unlock(&root->fs_info->hash_lock);
/* ok, we have to cow, is our old prealloc the right
* size?
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index de103a8a815e..f2b8d26b0472 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -703,7 +703,6 @@ struct btrfs_fs_info {
struct super_block *sb;
struct inode *btree_inode;
struct backing_dev_info bdi;
- spinlock_t hash_lock;
struct mutex trans_mutex;
struct mutex tree_log_mutex;
struct mutex transaction_kthread_mutex;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 9c3810047976..549271607c17 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1503,7 +1503,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->hashers);
INIT_LIST_HEAD(&fs_info->delalloc_inodes);
- spin_lock_init(&fs_info->hash_lock);
spin_lock_init(&fs_info->delalloc_lock);
spin_lock_init(&fs_info->new_trans_lock);
spin_lock_init(&fs_info->ref_cache_lock);
@@ -2361,7 +2360,6 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
int btree_lock_page_hook(struct page *page)
{
struct inode *inode = page->mapping->host;
- struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_buffer *eb;
unsigned long len;
@@ -2376,9 +2374,7 @@ int btree_lock_page_hook(struct page *page)
goto out;
btrfs_tree_lock(eb);
- spin_lock(&root->fs_info->hash_lock);
btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
- spin_unlock(&root->fs_info->hash_lock);
btrfs_tree_unlock(eb);
free_extent_buffer(eb);
out: