summaryrefslogtreecommitdiff
path: root/fs/f2fs
diff options
context:
space:
mode:
authorChao Yu <chao2.yu@samsung.com>2015-09-28 17:42:24 +0800
committerJaegeuk Kim <jaegeuk@kernel.org>2015-10-09 16:20:54 -0700
commit45fe8492ccbe561c4b8918c2d4c83a0501e50646 (patch)
tree2a4892de2fb741302a73483bd079b10ef7ad5729 /fs/f2fs
parent345a6b2ee2987a11bc8e9c08ff2b68a973fd912c (diff)
f2fs: fix to correct freed section number during gc
This patch fixes to maintain the right section count freed in garbage collecting when triggering a foreground gc. Besides, when a foreground gc is running on current selected section, once we fail to gc one segment, it's better to abandon gcing the left segments in current section, because anyway we will select next victim for foreground gc, so gc on the left segments in previous section will become overhead and also cause the long latency for caller. Signed-off-by: Chao Yu <chao2.yu@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs')
-rw-r--r--fs/f2fs/gc.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 782b8e72c094..b6e03ebc703c 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -802,7 +802,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi)
unsigned int segno = NULL_SEGNO;
unsigned int i;
int gc_type = BG_GC;
- int nfree = 0;
+ int sec_freed = 0;
int ret = -1;
struct cp_control cpc;
struct gc_inode_list gc_list = {
@@ -817,7 +817,7 @@ gc_more:
if (unlikely(f2fs_cp_error(sbi)))
goto stop;
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) {
gc_type = FG_GC;
if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
write_checkpoint(sbi, &cpc);
@@ -832,13 +832,23 @@ gc_more:
ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
META_SSA);
- for (i = 0; i < sbi->segs_per_sec; i++)
- nfree += do_garbage_collect(sbi, segno + i, &gc_list, gc_type);
+ for (i = 0; i < sbi->segs_per_sec; i++) {
+ /*
+ * for FG_GC case, halt gcing left segments once failed one
+ * of segments in selected section to avoid long latency.
+ */
+ if (!do_garbage_collect(sbi, segno + i, &gc_list, gc_type) &&
+ gc_type == FG_GC)
+ break;
+ }
+
+ if (i == sbi->segs_per_sec && gc_type == FG_GC)
+ sec_freed++;
if (gc_type == FG_GC)
sbi->cur_victim_sec = NULL_SEGNO;
- if (has_not_enough_free_secs(sbi, nfree))
+ if (has_not_enough_free_secs(sbi, sec_freed))
goto gc_more;
if (gc_type == FG_GC)