summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/ext4/extents.c1
-rw-r--r--fs/ext4/inode.c15
-rw-r--r--fs/mpage.c6
3 files changed, 19 insertions, 3 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index e3a55eb8b26a..a953214f2829 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3150,6 +3150,7 @@ retry:
ret = PTR_ERR(handle);
break;
}
+ map_bh.b_state = 0;
ret = ext4_get_blocks_wrap(handle, inode, block,
max_blocks, &map_bh,
EXT4_CREATE_UNINITIALIZED_EXT, 0, 0);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 2a9ffd528dd1..d7ad0bb73cd5 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2055,7 +2055,20 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
if ((mpd->b_state & (1 << BH_Mapped)) &&
!(mpd->b_state & (1 << BH_Delay)))
return 0;
- new.b_state = mpd->b_state;
+ /*
+ * We need to make sure the BH_Delay flag is passed down to
+ * ext4_da_get_block_write(), since it calls
+ * ext4_get_blocks_wrap() with the EXT4_DELALLOC_RSVED flag.
+ * This flag causes ext4_get_blocks_wrap() to call
+ * ext4_da_update_reserve_space() if the passed buffer head
+ * has the BH_Delay flag set. In the future, once we clean up
+ * the interfaces to ext4_get_blocks_wrap(), we should pass in
+ * a separate flag which requests that the delayed allocation
+ * statistics should be updated, instead of depending on the
+ * state information getting passed down via the map_bh's
+ * state bitmasks plus the magic EXT4_DELALLOC_RSVED flag.
+ */
+ new.b_state = mpd->b_state & (1 << BH_Delay);
new.b_blocknr = 0;
new.b_size = mpd->b_size;
next = mpd->b_blocknr;
diff --git a/fs/mpage.c b/fs/mpage.c
index 680ba60863ff..42381bd6543b 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -379,7 +379,8 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
struct buffer_head map_bh;
unsigned long first_logical_block = 0;
- clear_buffer_mapped(&map_bh);
+ map_bh.b_state = 0;
+ map_bh.b_size = 0;
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
struct page *page = list_entry(pages->prev, struct page, lru);
@@ -412,7 +413,8 @@ int mpage_readpage(struct page *page, get_block_t get_block)
struct buffer_head map_bh;
unsigned long first_logical_block = 0;
- clear_buffer_mapped(&map_bh);
+ map_bh.b_state = 0;
+ map_bh.b_size = 0;
bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
&map_bh, &first_logical_block, get_block);
if (bio)