summaryrefslogtreecommitdiff
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2005-06-22 17:16:30 +0000
committerTrond Myklebust <Trond.Myklebust@netapp.com>2005-06-22 16:07:38 -0400
commitab0a3dbedc51037f3d2e22ef67717a987b3d15e2 (patch)
treebda40d85d59a729fb7a9c2573a43d1820df9de3c /fs/nfs/write.c
parentfe51beecc55d0b0dce289e4758e7c529a642f63e (diff)
[PATCH] NFS: Write optimization for short files and small O_SYNC writes.
Use stable writes if we can see that we are only going to put a single write on the wire. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index c574d551f029..79b621a545b2 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -750,7 +750,7 @@ int nfs_updatepage(struct file *file, struct page *page,
* is entirely in cache, it may be more efficient to avoid
* fragmenting write requests.
*/
- if (PageUptodate(page) && inode->i_flock == NULL) {
+ if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
loff_t end_offs = i_size_read(inode) - 1;
unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;
@@ -1342,8 +1342,16 @@ static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
spin_lock(&nfsi->req_lock);
res = nfs_scan_dirty(inode, &head, idx_start, npages);
spin_unlock(&nfsi->req_lock);
- if (res)
- error = nfs_flush_list(&head, NFS_SERVER(inode)->wpages, how);
+ if (res) {
+ struct nfs_server *server = NFS_SERVER(inode);
+
+ /* For single writes, FLUSH_STABLE is more efficient */
+ if (res == nfsi->npages && nfsi->npages <= server->wpages) {
+ if (res > 1 || nfs_list_entry(head.next)->wb_bytes <= server->wsize)
+ how |= FLUSH_STABLE;
+ }
+ error = nfs_flush_list(&head, server->wpages, how);
+ }
if (error < 0)
return error;
return res;