summaryrefslogtreecommitdiff
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-10-16 01:24:59 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 09:42:55 -0700
commit2f718ffc16c43a435d12919c75dbfad518abd056 (patch)
tree55588cb2815d844e9d0b2404cf8ceafe98b5c55d /mm/filemap.c
parent08291429cfa6258c4cd95d8833beb40f828b194e (diff)
mm: buffered write iterator
Add an iterator data structure to operate over an iovec. Add usercopy operators needed by generic_file_buffered_write, and convert that function over. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c144
1 files changed, 117 insertions, 27 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 557fd887254f..67a03a0a9aee 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -30,7 +30,7 @@
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/cpuset.h>
-#include "filemap.h"
+#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
#include "internal.h"
/*
@@ -1635,8 +1635,7 @@ int remove_suid(struct dentry *dentry)
}
EXPORT_SYMBOL(remove_suid);
-size_t
-__filemap_copy_from_user_iovec_inatomic(char *vaddr,
+static size_t __iovec_copy_from_user_inatomic(char *vaddr,
const struct iovec *iov, size_t base, size_t bytes)
{
size_t copied = 0, left = 0;
@@ -1659,6 +1658,110 @@ __filemap_copy_from_user_iovec_inatomic(char *vaddr,
}
/*
+ * Copy as much as we can into the page and return the number of bytes which
+ * were sucessfully copied. If a fault is encountered then return the number of
+ * bytes which were copied.
+ */
+size_t iov_iter_copy_from_user_atomic(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ char *kaddr;
+ size_t copied;
+
+ BUG_ON(!in_atomic());
+ kaddr = kmap_atomic(page, KM_USER0);
+ if (likely(i->nr_segs == 1)) {
+ int left;
+ char __user *buf = i->iov->iov_base + i->iov_offset;
+ left = __copy_from_user_inatomic_nocache(kaddr + offset,
+ buf, bytes);
+ copied = bytes - left;
+ } else {
+ copied = __iovec_copy_from_user_inatomic(kaddr + offset,
+ i->iov, i->iov_offset, bytes);
+ }
+ kunmap_atomic(kaddr, KM_USER0);
+
+ return copied;
+}
+
+/*
+ * This has the same sideeffects and return value as
+ * iov_iter_copy_from_user_atomic().
+ * The difference is that it attempts to resolve faults.
+ * Page must not be locked.
+ */
+size_t iov_iter_copy_from_user(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ char *kaddr;
+ size_t copied;
+
+ kaddr = kmap(page);
+ if (likely(i->nr_segs == 1)) {
+ int left;
+ char __user *buf = i->iov->iov_base + i->iov_offset;
+ left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
+ copied = bytes - left;
+ } else {
+ copied = __iovec_copy_from_user_inatomic(kaddr + offset,
+ i->iov, i->iov_offset, bytes);
+ }
+ kunmap(page);
+ return copied;
+}
+
+static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
+{
+ if (likely(i->nr_segs == 1)) {
+ i->iov_offset += bytes;
+ } else {
+ const struct iovec *iov = i->iov;
+ size_t base = i->iov_offset;
+
+ while (bytes) {
+ int copy = min(bytes, iov->iov_len - base);
+
+ bytes -= copy;
+ base += copy;
+ if (iov->iov_len == base) {
+ iov++;
+ base = 0;
+ }
+ }
+ i->iov = iov;
+ i->iov_offset = base;
+ }
+}
+
+void iov_iter_advance(struct iov_iter *i, size_t bytes)
+{
+ BUG_ON(i->count < bytes);
+
+ __iov_iter_advance_iov(i, bytes);
+ i->count -= bytes;
+}
+
+int iov_iter_fault_in_readable(struct iov_iter *i)
+{
+ size_t seglen = min(i->iov->iov_len - i->iov_offset, i->count);
+ char __user *buf = i->iov->iov_base + i->iov_offset;
+ return fault_in_pages_readable(buf, seglen);
+}
+
+/*
+ * Return the count of just the current iov_iter segment.
+ */
+size_t iov_iter_single_seg_count(struct iov_iter *i)
+{
+ const struct iovec *iov = i->iov;
+ if (i->nr_segs == 1)
+ return i->count;
+ else
+ return min(i->count, iov->iov_len - i->iov_offset);
+}
+
+/*
* Performs necessary checks before doing a write
*
* Can adjust writing position or amount of bytes to write.
@@ -1816,30 +1919,22 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
const struct address_space_operations *a_ops = mapping->a_ops;
struct inode *inode = mapping->host;
long status = 0;
- const struct iovec *cur_iov = iov; /* current iovec */
- size_t iov_offset = 0; /* offset in the current iovec */
- char __user *buf;
+ struct iov_iter i;
- /*
- * handle partial DIO write. Adjust cur_iov if needed.
- */
- filemap_set_next_iovec(&cur_iov, nr_segs, &iov_offset, written);
+ iov_iter_init(&i, iov, nr_segs, count, written);
do {
struct page *src_page;
struct page *page;
pgoff_t index; /* Pagecache index for current page */
unsigned long offset; /* Offset into pagecache page */
- unsigned long seglen; /* Bytes remaining in current iovec */
unsigned long bytes; /* Bytes to write to page */
size_t copied; /* Bytes copied from user */
- buf = cur_iov->iov_base + iov_offset;
offset = (pos & (PAGE_CACHE_SIZE - 1));
index = pos >> PAGE_CACHE_SHIFT;
- bytes = PAGE_CACHE_SIZE - offset;
- if (bytes > count)
- bytes = count;
+ bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
+ iov_iter_count(&i));
/*
* a non-NULL src_page indicates that we're doing the
@@ -1847,10 +1942,6 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
*/
src_page = NULL;
- seglen = cur_iov->iov_len - iov_offset;
- if (seglen > bytes)
- seglen = bytes;
-
/*
* Bring in the user page that we will copy from _first_.
* Otherwise there's a nasty deadlock on copying from the
@@ -1861,7 +1952,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
* to check that the address is actually valid, when atomic
* usercopies are used, below.
*/
- if (unlikely(fault_in_pages_readable(buf, seglen))) {
+ if (unlikely(iov_iter_fault_in_readable(&i))) {
status = -EFAULT;
break;
}
@@ -1892,8 +1983,8 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
* same reason as we can't take a page fault with a
* page locked (as explained below).
*/
- copied = filemap_copy_from_user(src_page, offset,
- cur_iov, nr_segs, iov_offset, bytes);
+ copied = iov_iter_copy_from_user(src_page, &i,
+ offset, bytes);
if (unlikely(copied == 0)) {
status = -EFAULT;
page_cache_release(page);
@@ -1939,8 +2030,8 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
* really matter.
*/
pagefault_disable();
- copied = filemap_copy_from_user_atomic(page, offset,
- cur_iov, nr_segs, iov_offset, bytes);
+ copied = iov_iter_copy_from_user_atomic(page, &i,
+ offset, bytes);
pagefault_enable();
} else {
void *src, *dst;
@@ -1965,10 +2056,9 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
if (src_page)
page_cache_release(src_page);
+ iov_iter_advance(&i, copied);
written += copied;
- count -= copied;
pos += copied;
- filemap_set_next_iovec(&cur_iov, nr_segs, &iov_offset, copied);
balance_dirty_pages_ratelimited(mapping);
cond_resched();
@@ -1992,7 +2082,7 @@ fs_write_aop_error:
continue;
else
break;
- } while (count);
+ } while (iov_iter_count(&i));
*ppos = pos;
/*