summaryrefslogtreecommitdiff
path: root/fs/f2fs/recovery.c
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk.kim@samsung.com>2013-03-08 21:29:23 +0900
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2013-03-20 18:30:06 +0900
commit393ff91f57c87d48ffed30878be6e3e486d3a00a (patch)
treec80fe33bcf8546ebce9ab6fc043b99889e67536f /fs/f2fs/recovery.c
parent25c0a6e529b56ca010e1f46239edd07c1b484b63 (diff)
f2fs: reduce unncessary locking pages during read
This patch reduces redundant locking and unlocking pages during read operations. In f2fs_readpage, let's use wait_on_page_locked() instead of lock_page. And then, when we need to modify any data finally, let's lock the page so that we can avoid lock contention. [readpage rule] - The f2fs_readpage returns unlocked page, or released page too in error cases. - Its caller should handle read error, -EIO, after locking the page, which indicates read completion. - Its caller should check PageUptodate after grab_cache_page. Signed-off-by: Changman Lee <cm224.lee@samsung.com> Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs/f2fs/recovery.c')
-rw-r--r--fs/f2fs/recovery.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 6b82e2034cfd..2d86eb26c493 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -112,11 +112,16 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
while (1) {
struct fsync_inode_entry *entry;
- if (f2fs_readpage(sbi, page, blkaddr, READ_SYNC))
+ err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
+ if (err)
goto out;
- if (cp_ver != cpver_of_node(page))
- goto out;
+ lock_page(page);
+
+ if (cp_ver != cpver_of_node(page)) {
+ err = -EINVAL;
+ goto unlock_out;
+ }
if (!is_fsync_dnode(page))
goto next;
@@ -131,7 +136,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
if (IS_INODE(page) && is_dent_dnode(page)) {
if (recover_inode_page(sbi, page)) {
err = -ENOMEM;
- goto out;
+ goto unlock_out;
}
}
@@ -139,14 +144,14 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
if (!entry) {
err = -ENOMEM;
- goto out;
+ goto unlock_out;
}
entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
if (IS_ERR(entry->inode)) {
err = PTR_ERR(entry->inode);
kmem_cache_free(fsync_entry_slab, entry);
- goto out;
+ goto unlock_out;
}
list_add_tail(&entry->list, head);
@@ -155,15 +160,15 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
if (IS_INODE(page)) {
err = recover_inode(entry->inode, page);
if (err)
- goto out;
+ goto unlock_out;
}
next:
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
- ClearPageUptodate(page);
}
-out:
+unlock_out:
unlock_page(page);
+out:
__free_pages(page, 0);
return err;
}
@@ -319,8 +324,10 @@ static void recover_data(struct f2fs_sb_info *sbi,
if (f2fs_readpage(sbi, page, blkaddr, READ_SYNC))
goto out;
+ lock_page(page);
+
if (cp_ver != cpver_of_node(page))
- goto out;
+ goto unlock_out;
entry = get_fsync_inode(head, ino_of_node(page));
if (!entry)
@@ -336,10 +343,10 @@ static void recover_data(struct f2fs_sb_info *sbi,
next:
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
- ClearPageUptodate(page);
}
-out:
+unlock_out:
unlock_page(page);
+out:
__free_pages(page, 0);
allocate_new_segments(sbi);