summaryrefslogtreecommitdiff
path: root/fs/fs-writeback.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2009-09-16 19:22:48 +0200
committerJens Axboe <jens.axboe@oracle.com>2009-09-25 18:08:24 +0200
commita5989bdc981ec85e0734ac22519cc0b780813d7b (patch)
tree74cc93bd420b7bd044e3fd5451c652b3742015e6 /fs/fs-writeback.c
parent7fa07729e439a6184bd824746d06a49cca553f15 (diff)
fs: Fix busyloop in wb_writeback()
If all inodes are under writeback (e.g. in case when there's only one inode with dirty pages), wb_writeback() with WB_SYNC_NONE work basically degrades to busylooping until I_SYNC flags of the inode is cleared. Fix the problem by waiting on I_SYNC flags of an inode on b_more_io list in case we failed to write anything. Tested-by: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r--fs/fs-writeback.c19
1 files changed, 18 insertions, 1 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 8e1e5e19d21e..c59d6737036c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -706,6 +706,7 @@ static long wb_writeback(struct bdi_writeback *wb,
};
unsigned long oldest_jif;
long wrote = 0;
+ struct inode *inode;
if (wbc.for_kupdate) {
wbc.older_than_this = &oldest_jif;
@@ -747,8 +748,24 @@ static long wb_writeback(struct bdi_writeback *wb,
* If we ran out of stuff to write, bail unless more_io got set
*/
if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
- if (wbc.more_io && !wbc.for_kupdate)
+ if (wbc.more_io && !wbc.for_kupdate) {
+ if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
+ continue;
+ /*
+ * Nothing written. Wait for some inode to
+ * become available for writeback. Otherwise
+ * we'll just busyloop.
+ */
+ spin_lock(&inode_lock);
+ if (!list_empty(&wb->b_more_io)) {
+ inode = list_entry(
+ wb->b_more_io.prev,
+ struct inode, i_list);
+ inode_wait_for_writeback(inode);
+ }
+ spin_unlock(&inode_lock);
continue;
+ }
break;
}
}