From 49958fd7dbb83cd4d65179d025940e01fe1fbacd Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 2 Feb 2010 21:48:28 +0000 Subject: Btrfs: change the ordered tree to use a spinlock instead of a mutex The ordered tree used to need a mutex, but currently all we use it for is to protect the rb_tree, and a spin_lock is just fine for that. Using a spin_lock instead makes dbench run a little faster, 58 mb/s instead of 51 mb/s, and have less latency, 3445.138 ms instead of 3820.633 ms. Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ordered-data.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 5c2a9e78a949..d56f732ba95e 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -174,7 +174,6 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, if (!entry) return -ENOMEM; - mutex_lock(&tree->mutex); entry->file_offset = file_offset; entry->start = start; entry->len = len; @@ -190,16 +189,17 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, INIT_LIST_HEAD(&entry->list); INIT_LIST_HEAD(&entry->root_extent_list); + spin_lock(&tree->lock); node = tree_insert(&tree->tree, file_offset, &entry->rb_node); BUG_ON(node); + spin_unlock(&tree->lock); spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); list_add_tail(&entry->root_extent_list, &BTRFS_I(inode)->root->fs_info->ordered_extents); spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); - mutex_unlock(&tree->mutex); BUG_ON(node); return 0; } @@ -216,9 +216,9 @@ int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_inode_tree *tree; tree = &BTRFS_I(inode)->ordered_tree; - mutex_lock(&tree->mutex); + spin_lock(&tree->lock); list_add_tail(&sum->list, &entry->list); - mutex_unlock(&tree->mutex); + spin_unlock(&tree->lock); return 0; } @@ -240,7 +240,7 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, int ret; tree = &BTRFS_I(inode)->ordered_tree; - mutex_lock(&tree->mutex); + spin_lock(&tree->lock); node = tree_search(tree, file_offset); if (!node) { ret = 1; @@ -264,7 +264,7 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, else ret = 1; out: - mutex_unlock(&tree->mutex); + spin_unlock(&tree->lock); return ret == 0; } @@ -291,7 +291,7 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) /* * remove an ordered extent from the tree. No references are dropped - * and you must wake_up entry->wait. You must hold the tree mutex + * and you must wake_up entry->wait. You must hold the tree lock * while you call this function. */ static int __btrfs_remove_ordered_extent(struct inode *inode, @@ -340,9 +340,9 @@ int btrfs_remove_ordered_extent(struct inode *inode, int ret; tree = &BTRFS_I(inode)->ordered_tree; - mutex_lock(&tree->mutex); + spin_lock(&tree->lock); ret = __btrfs_remove_ordered_extent(inode, entry); - mutex_unlock(&tree->mutex); + spin_unlock(&tree->lock); wake_up(&entry->wait); return ret; @@ -567,7 +567,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry = NULL; tree = &BTRFS_I(inode)->ordered_tree; - mutex_lock(&tree->mutex); + spin_lock(&tree->lock); node = tree_search(tree, file_offset); if (!node) goto out; @@ -578,7 +578,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, if (entry) atomic_inc(&entry->refs); out: - mutex_unlock(&tree->mutex); + spin_unlock(&tree->lock); return entry; } @@ -594,7 +594,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) struct btrfs_ordered_extent *entry = NULL; tree = &BTRFS_I(inode)->ordered_tree; - mutex_lock(&tree->mutex); + spin_lock(&tree->lock); node = tree_search(tree, file_offset); if (!node) goto out; @@ -602,7 +602,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); atomic_inc(&entry->refs); out: - mutex_unlock(&tree->mutex); + spin_unlock(&tree->lock); return entry; } @@ -629,7 +629,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, else offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); - mutex_lock(&tree->mutex); + spin_lock(&tree->lock); disk_i_size = BTRFS_I(inode)->disk_i_size; /* truncate file */ @@ -735,7 +735,7 @@ out: */ if (ordered) __btrfs_remove_ordered_extent(inode, ordered); - mutex_unlock(&tree->mutex); + spin_unlock(&tree->lock); if (ordered) wake_up(&ordered->wait); return ret; @@ -762,7 +762,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, if (!ordered) return 1; - mutex_lock(&tree->mutex); + spin_lock(&tree->lock); list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { if (disk_bytenr >= ordered_sum->bytenr) { num_sectors = ordered_sum->len / sectorsize; @@ -777,7 +777,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, } } out: - mutex_unlock(&tree->mutex); + spin_unlock(&tree->lock); btrfs_put_ordered_extent(ordered); return ret; } -- cgit v1.2.3 From 5a1a3df1f6c86926cfe8657e6f9b4b4c2f467d60 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 2 Feb 2010 20:51:14 +0000 Subject: Btrfs: cache ordered extent when completing io When finishing io we run btrfs_dec_test_ordered_pending, and then immediately run btrfs_lookup_ordered_extent, but btrfs_dec_test_ordered_pending does that already, so we're searching twice when we don't have to. This patch lets us pass a btrfs_ordered_extent in to btrfs_dec_test_ordered_pending so if we do complete io on that ordered extent we can just use the one we found then instead of having to do another btrfs_lookup_ordered_extent. This made my fio job with the other patch go from 24 mb/s to 29 mb/s. Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ordered-data.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index d56f732ba95e..a8ffecd0b491 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -232,11 +232,12 @@ int btrfs_add_ordered_sum(struct inode *inode, * to make sure this function only returns 1 once for a given ordered extent. */ int btrfs_dec_test_ordered_pending(struct inode *inode, + struct btrfs_ordered_extent **cached, u64 file_offset, u64 io_size) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; - struct btrfs_ordered_extent *entry; + struct btrfs_ordered_extent *entry = NULL; int ret; tree = &BTRFS_I(inode)->ordered_tree; @@ -264,6 +265,10 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, else ret = 1; out: + if (!ret && cached && entry) { + *cached = entry; + atomic_inc(&entry->refs); + } spin_unlock(&tree->lock); return ret == 0; } -- cgit v1.2.3 From 5a0e3ad6af8660be21ca98a971cd00f331318c05 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Mar 2010 17:04:11 +0900 Subject: include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo Guess-its-ok-by: Christoph Lameter Cc: Ingo Molnar Cc: Lee Schermerhorn --- fs/btrfs/ordered-data.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index a8ffecd0b491..ecb22ff7d1fd 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -16,7 +16,6 @@ * Boston, MA 021110-1307, USA. */ -#include #include #include #include -- cgit v1.2.3 From 287a0ab91d25ca982f895a76402e5893b47ed7a6 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 19 Mar 2010 18:07:23 +0000 Subject: Btrfs: kill max_extent mount option As Yan pointed out, theres not much reason for all this complicated math to account for file extents being split up into max_extent chunks, since they are likely to all end up in the same leaf anyway. Since there isn't much reason to use max_extent, just remove the option altogether so we have one less thing we need to test. Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ordered-data.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index a8ffecd0b491..5c99882b9763 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -303,6 +303,7 @@ static int __btrfs_remove_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry) { struct btrfs_ordered_inode_tree *tree; + struct btrfs_root *root = BTRFS_I(inode)->root; struct rb_node *node; tree = &BTRFS_I(inode)->ordered_tree; @@ -312,12 +313,13 @@ static int __btrfs_remove_ordered_extent(struct inode *inode, set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); spin_lock(&BTRFS_I(inode)->accounting_lock); + WARN_ON(!BTRFS_I(inode)->outstanding_extents); BTRFS_I(inode)->outstanding_extents--; spin_unlock(&BTRFS_I(inode)->accounting_lock); btrfs_unreserve_metadata_for_delalloc(BTRFS_I(inode)->root, inode, 1); - spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); + spin_lock(&root->fs_info->ordered_extent_lock); list_del_init(&entry->root_extent_list); /* @@ -329,7 +331,7 @@ static int __btrfs_remove_ordered_extent(struct inode *inode, !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { list_del_init(&BTRFS_I(inode)->ordered_operations); } - spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); + spin_unlock(&root->fs_info->ordered_extent_lock); return 0; } -- cgit v1.2.3