summaryrefslogtreecommitdiff
path: root/fs/ntfs/file.c
diff options
context:
space:
mode:
authorNate Diller <nate.diller@gmail.com>2007-05-12 10:36:54 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-12 10:55:39 -0700
commite3bf460f3eb86cdbc76725a0dac1f191e796676c (patch)
tree7f11e16b4196735f62d4e3848f9084000b5e6b55 /fs/ntfs/file.c
parent6d690dcac92a84f98fd774862628ff871b713660 (diff)
ntfs: use zero_user_page
Use zero_user_page() instead of open-coding it. [akpm@linux-foundation.org: kmap-type fixes] Signed-off-by: Nate Diller <nate.diller@gmail.com> Acked-by: Anton Altaparmakov <aia21@cantab.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/ntfs/file.c')
-rw-r--r--fs/ntfs/file.c59
1 files changed, 17 insertions, 42 deletions
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 621de369e6f8..39a1669506bd 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -606,11 +606,8 @@ do_next_page:
ntfs_submit_bh_for_read(bh);
*wait_bh++ = bh;
} else {
- u8 *kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + bh_offset(bh), 0,
- blocksize);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
+ zero_user_page(page, bh_offset(bh),
+ blocksize, KM_USER0);
set_buffer_uptodate(bh);
}
}
@@ -685,12 +682,9 @@ map_buffer_cached:
ntfs_submit_bh_for_read(bh);
*wait_bh++ = bh;
} else {
- u8 *kaddr = kmap_atomic(page,
- KM_USER0);
- memset(kaddr + bh_offset(bh),
- 0, blocksize);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
+ zero_user_page(page,
+ bh_offset(bh),
+ blocksize, KM_USER0);
set_buffer_uptodate(bh);
}
}
@@ -708,11 +702,8 @@ map_buffer_cached:
*/
if (bh_end <= pos || bh_pos >= end) {
if (!buffer_uptodate(bh)) {
- u8 *kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + bh_offset(bh), 0,
- blocksize);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
+ zero_user_page(page, bh_offset(bh),
+ blocksize, KM_USER0);
set_buffer_uptodate(bh);
}
mark_buffer_dirty(bh);
@@ -751,10 +742,8 @@ map_buffer_cached:
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh)) {
- u8 *kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + bh_offset(bh), 0, blocksize);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
+ zero_user_page(page, bh_offset(bh), blocksize,
+ KM_USER0);
set_buffer_uptodate(bh);
}
continue;
@@ -878,11 +867,8 @@ rl_not_mapped_enoent:
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh)) {
- u8 *kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + bh_offset(bh), 0,
- blocksize);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
+ zero_user_page(page, bh_offset(bh),
+ blocksize, KM_USER0);
set_buffer_uptodate(bh);
}
continue;
@@ -1137,16 +1123,12 @@ rl_not_mapped_enoent:
* to zero the overflowing region.
*/
if (unlikely(bh_pos + blocksize > initialized_size)) {
- u8 *kaddr;
int ofs = 0;
if (likely(bh_pos < initialized_size))
ofs = initialized_size - bh_pos;
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + bh_offset(bh) + ofs, 0,
- blocksize - ofs);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
+ zero_user_page(page, bh_offset(bh) + ofs,
+ blocksize - ofs, KM_USER0);
}
} else /* if (unlikely(!buffer_uptodate(bh))) */
err = -EIO;
@@ -1286,11 +1268,8 @@ rl_not_mapped_enoent:
if (PageUptodate(page))
set_buffer_uptodate(bh);
else {
- u8 *kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + bh_offset(bh), 0,
- blocksize);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
+ zero_user_page(page, bh_offset(bh),
+ blocksize, KM_USER0);
set_buffer_uptodate(bh);
}
}
@@ -1350,9 +1329,7 @@ err_out:
len = PAGE_CACHE_SIZE;
if (len > bytes)
len = bytes;
- kaddr = kmap_atomic(*pages, KM_USER0);
- memset(kaddr, 0, len);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(*pages, 0, len, KM_USER0);
}
goto out;
}
@@ -1473,9 +1450,7 @@ err_out:
len = PAGE_CACHE_SIZE;
if (len > bytes)
len = bytes;
- kaddr = kmap_atomic(*pages, KM_USER0);
- memset(kaddr, 0, len);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(*pages, 0, len, KM_USER0);
}
goto out;
}