summaryrefslogtreecommitdiff
path: root/fs/nfs
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2007-04-10 09:26:35 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2007-04-30 22:17:07 -0700
commit8d5658c949e6d89edc579a1f112aeee3bc232a8e (patch)
treef206d3f6809eeb0ca23c1999cf79aa294968b113 /fs/nfs
parentc63c7b051395368573779c8309aa5c990dcf2f96 (diff)
NFS: Fix a buffer overflow in the allocation of struct nfs_read/writedata
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs')
-rw-r--r--fs/nfs/direct.c5
-rw-r--r--fs/nfs/internal.h12
-rw-r--r--fs/nfs/pagelist.c10
-rw-r--r--fs/nfs/read.c19
-rw-r--r--fs/nfs/write.c11
5 files changed, 37 insertions, 20 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 2877744cb606..889de60f8a84 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -54,6 +54,7 @@
#include <asm/uaccess.h>
#include <asm/atomic.h>
+#include "internal.h"
#include "iostat.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -271,7 +272,7 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo
bytes = min(rsize,count);
result = -ENOMEM;
- data = nfs_readdata_alloc(pgbase + bytes);
+ data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes));
if (unlikely(!data))
break;
@@ -602,7 +603,7 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l
bytes = min(wsize,count);
result = -ENOMEM;
- data = nfs_writedata_alloc(pgbase + bytes);
+ data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes));
if (unlikely(!data))
break;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 6610f2b02077..ad2b40db1e65 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -231,3 +231,15 @@ unsigned int nfs_page_length(struct page *page)
}
return 0;
}
+
+/*
+ * Determine the number of pages in an array of length 'len' and
+ * with a base offset of 'base'
+ */
+static inline
+unsigned int nfs_page_array_len(unsigned int base, size_t len)
+{
+ return ((unsigned long)len + (unsigned long)base +
+ PAGE_SIZE - 1) >> PAGE_SHIFT;
+}
+
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index ea1a85df9ab1..096efd73eb4c 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -18,6 +18,8 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
+#include "internal.h"
+
#define NFS_PARANOIA 1
static struct kmem_cache *nfs_page_cachep;
@@ -231,7 +233,7 @@ out:
*/
void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
struct inode *inode,
- int (*doio)(struct inode *, struct list_head *, size_t, int),
+ int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int),
unsigned int bsize,
int io_flags)
{
@@ -298,8 +300,10 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
* since nfs_flush_multi and nfs_pagein_multi assume you
* can have only one struct nfs_page.
*/
+ if (desc->pg_bsize < PAGE_SIZE)
+ return 0;
newlen += desc->pg_count;
- if (desc->pg_base + newlen > desc->pg_bsize)
+ if (newlen > desc->pg_bsize)
return 0;
prev = nfs_list_entry(desc->pg_list.prev);
if (!nfs_can_coalesce_requests(prev, req))
@@ -320,6 +324,8 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
if (!list_empty(&desc->pg_list)) {
int error = desc->pg_doio(desc->pg_inode,
&desc->pg_list,
+ nfs_page_array_len(desc->pg_base,
+ desc->pg_count),
desc->pg_count,
desc->pg_ioflags);
if (error < 0)
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index f0016062340d..9a55807b2a70 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -27,8 +27,8 @@
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
-static int nfs_pagein_multi(struct inode *, struct list_head *, size_t, int);
-static int nfs_pagein_one(struct inode *, struct list_head *, size_t, int);
+static int nfs_pagein_multi(struct inode *, struct list_head *, unsigned int, size_t, int);
+static int nfs_pagein_one(struct inode *, struct list_head *, unsigned int, size_t, int);
static const struct rpc_call_ops nfs_read_partial_ops;
static const struct rpc_call_ops nfs_read_full_ops;
@@ -37,9 +37,8 @@ static mempool_t *nfs_rdata_mempool;
#define MIN_POOL_READ (32)
-struct nfs_read_data *nfs_readdata_alloc(size_t len)
+struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
{
- unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
if (p) {
@@ -135,9 +134,9 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
nfs_list_add_request(new, &one_request);
if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
- nfs_pagein_multi(inode, &one_request, len, 0);
+ nfs_pagein_multi(inode, &one_request, 1, len, 0);
else
- nfs_pagein_one(inode, &one_request, len, 0);
+ nfs_pagein_one(inode, &one_request, 1, len, 0);
return 0;
}
@@ -234,7 +233,7 @@ static void nfs_execute_read(struct nfs_read_data *data)
* won't see the new data until our attribute cache is updated. This is more
* or less conventional NFS client behavior.
*/
-static int nfs_pagein_multi(struct inode *inode, struct list_head *head, size_t count, int flags)
+static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
{
struct nfs_page *req = nfs_list_entry(head->next);
struct page *page = req->wb_page;
@@ -250,7 +249,7 @@ static int nfs_pagein_multi(struct inode *inode, struct list_head *head, size_t
do {
size_t len = min(nbytes,rsize);
- data = nfs_readdata_alloc(len);
+ data = nfs_readdata_alloc(1);
if (!data)
goto out_bad;
INIT_LIST_HEAD(&data->pages);
@@ -291,13 +290,13 @@ out_bad:
return -ENOMEM;
}
-static int nfs_pagein_one(struct inode *inode, struct list_head *head, size_t count, int flags)
+static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
{
struct nfs_page *req;
struct page **pages;
struct nfs_read_data *data;
- data = nfs_readdata_alloc(count);
+ data = nfs_readdata_alloc(npages);
if (!data)
goto out_bad;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 6ce2d94e7b3f..0a8bbc399689 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -72,9 +72,8 @@ void nfs_commit_free(struct nfs_write_data *wdata)
call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free);
}
-struct nfs_write_data *nfs_writedata_alloc(size_t len)
+struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
{
- unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
if (p) {
@@ -832,7 +831,7 @@ static void nfs_execute_write(struct nfs_write_data *data)
* Generate multiple small requests to write out a single
* contiguous dirty area on one page.
*/
-static int nfs_flush_multi(struct inode *inode, struct list_head *head, size_t count, int how)
+static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
{
struct nfs_page *req = nfs_list_entry(head->next);
struct page *page = req->wb_page;
@@ -848,7 +847,7 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, size_t c
do {
size_t len = min(nbytes, wsize);
- data = nfs_writedata_alloc(len);
+ data = nfs_writedata_alloc(1);
if (!data)
goto out_bad;
list_add(&data->pages, &list);
@@ -897,13 +896,13 @@ out_bad:
* This is the case if nfs_updatepage detects a conflicting request
* that has been written but not committed.
*/
-static int nfs_flush_one(struct inode *inode, struct list_head *head, size_t count, int how)
+static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
{
struct nfs_page *req;
struct page **pages;
struct nfs_write_data *data;
- data = nfs_writedata_alloc(count);
+ data = nfs_writedata_alloc(npages);
if (!data)
goto out_bad;