summaryrefslogtreecommitdiff
path: root/fs/cifs/file.c
diff options
context:
space:
mode:
authorPavel Shilovsky <piastryyy@gmail.com>2011-01-24 14:16:35 -0500
committerSteve French <sfrench@us.ibm.com>2011-01-25 19:30:13 +0000
commit72432ffcf555decbbae47f1be338e1d2f210aa69 (patch)
tree4293cb6e5b880071099756b7523f8f36bb7e16cc /fs/cifs/file.c
parent93c100c0b423266c0ee28497e90fdf27c05e6b8e (diff)
CIFS: Implement cifs_strict_writev (try #4)
If we don't have Exclusive oplock we write a data to the server. Also set invalidate_mapping flag on the inode if we wrote something to the server. Add cifs_iovec_write to let the client write iovec buffers through CIFSSMBWrite2. Signed-off-by: Pavel Shilovsky <piastryyy@gmail.com> Reviewed-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Steve French <sfrench@us.ibm.com>
Diffstat (limited to 'fs/cifs/file.c')
-rw-r--r--fs/cifs/file.c202
1 files changed, 201 insertions, 1 deletions
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index d7d65a70678e..0de17c1db608 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -848,7 +848,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
}
/* update the file size (if needed) after a write */
-static void
+void
cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
unsigned int bytes_written)
{
@@ -1619,6 +1619,206 @@ int cifs_flush(struct file *file, fl_owner_t id)
return rc;
}
+static int
+cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
+{
+ int rc = 0;
+ unsigned long i;
+
+ for (i = 0; i < num_pages; i++) {
+ pages[i] = alloc_page(__GFP_HIGHMEM);
+ if (!pages[i]) {
+ /*
+ * save number of pages we have already allocated and
+ * return with ENOMEM error
+ */
+ num_pages = i;
+ rc = -ENOMEM;
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ for (i = 0; i < num_pages; i++)
+ put_page(pages[i]);
+ return rc;
+}
+
+static inline
+size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
+{
+ size_t num_pages;
+ size_t clen;
+
+ clen = min_t(const size_t, len, wsize);
+ num_pages = clen / PAGE_CACHE_SIZE;
+ if (clen % PAGE_CACHE_SIZE)
+ num_pages++;
+
+ if (cur_len)
+ *cur_len = clen;
+
+ return num_pages;
+}
+
+static ssize_t
+cifs_iovec_write(struct file *file, const struct iovec *iov,
+ unsigned long nr_segs, loff_t *poffset)
+{
+ size_t total_written = 0, written = 0;
+ unsigned long num_pages, npages;
+ size_t copied, len, cur_len, i;
+ struct kvec *to_send;
+ struct page **pages;
+ struct iov_iter it;
+ struct inode *inode;
+ struct cifsFileInfo *open_file;
+ struct cifsTconInfo *pTcon;
+ struct cifs_sb_info *cifs_sb;
+ int xid, rc;
+
+ len = iov_length(iov, nr_segs);
+ if (!len)
+ return 0;
+
+ rc = generic_write_checks(file, poffset, &len, 0);
+ if (rc)
+ return rc;
+
+ cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
+ num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
+
+ pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
+ if (!to_send) {
+ kfree(pages);
+ return -ENOMEM;
+ }
+
+ rc = cifs_write_allocate_pages(pages, num_pages);
+ if (rc) {
+ kfree(pages);
+ kfree(to_send);
+ return rc;
+ }
+
+ xid = GetXid();
+ open_file = file->private_data;
+ pTcon = tlink_tcon(open_file->tlink);
+ inode = file->f_path.dentry->d_inode;
+
+ iov_iter_init(&it, iov, nr_segs, len, 0);
+ npages = num_pages;
+
+ do {
+ size_t save_len = cur_len;
+ for (i = 0; i < npages; i++) {
+ copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
+ copied = iov_iter_copy_from_user(pages[i], &it, 0,
+ copied);
+ cur_len -= copied;
+ iov_iter_advance(&it, copied);
+ to_send[i+1].iov_base = kmap(pages[i]);
+ to_send[i+1].iov_len = copied;
+ }
+
+ cur_len = save_len - cur_len;
+
+ do {
+ if (open_file->invalidHandle) {
+ rc = cifs_reopen_file(open_file, false);
+ if (rc != 0)
+ break;
+ }
+ rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid,
+ cur_len, *poffset, &written,
+ to_send, npages, 0);
+ } while (rc == -EAGAIN);
+
+ for (i = 0; i < npages; i++)
+ kunmap(pages[i]);
+
+ if (written) {
+ len -= written;
+ total_written += written;
+ cifs_update_eof(CIFS_I(inode), *poffset, written);
+ *poffset += written;
+ } else if (rc < 0) {
+ if (!total_written)
+ total_written = rc;
+ break;
+ }
+
+ /* get length and number of kvecs of the next write */
+ npages = get_numpages(cifs_sb->wsize, len, &cur_len);
+ } while (len > 0);
+
+ if (total_written > 0) {
+ spin_lock(&inode->i_lock);
+ if (*poffset > inode->i_size)
+ i_size_write(inode, *poffset);
+ spin_unlock(&inode->i_lock);
+ }
+
+ cifs_stats_bytes_written(pTcon, total_written);
+ mark_inode_dirty_sync(inode);
+
+ for (i = 0; i < num_pages; i++)
+ put_page(pages[i]);
+ kfree(to_send);
+ kfree(pages);
+ FreeXid(xid);
+ return total_written;
+}
+
+static ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ ssize_t written;
+ struct inode *inode;
+
+ inode = iocb->ki_filp->f_path.dentry->d_inode;
+
+ /*
+ * BB - optimize the way when signing is disabled. We can drop this
+ * extra memory-to-memory copying and use iovec buffers for constructing
+ * write request.
+ */
+
+ written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
+ if (written > 0) {
+ CIFS_I(inode)->invalid_mapping = true;
+ iocb->ki_pos = pos;
+ }
+
+ return written;
+}
+
+ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct inode *inode;
+
+ inode = iocb->ki_filp->f_path.dentry->d_inode;
+
+ if (CIFS_I(inode)->clientCanCacheAll)
+ return generic_file_aio_write(iocb, iov, nr_segs, pos);
+
+ /*
+ * In strict cache mode we need to write the data to the server exactly
+ * from the pos to pos+len-1 rather than flush all affected pages
+ * because it may cause a error with mandatory locks on these pages but
+ * not on the region from pos to ppos+len-1.
+ */
+
+ return cifs_user_writev(iocb, iov, nr_segs, pos);
+}
+
static ssize_t
cifs_iovec_read(struct file *file, const struct iovec *iov,
unsigned long nr_segs, loff_t *poffset)