summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-12 14:49:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-12 14:49:50 -0700
commit5166701b368caea89d57b14bf41cf39e819dad51 (patch)
treec73b9d4860809e3afa9359be9d03ba2d8d98a18e /include
parent0a7418f5f569512e98789c439198eed4b507cce3 (diff)
parenta786c06d9f2719203c00b3d97b21f9a96980d0b5 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs updates from Al Viro: "The first vfs pile, with deep apologies for being very late in this window. Assorted cleanups and fixes, plus a large preparatory part of iov_iter work. There's a lot more of that, but it'll probably go into the next merge window - it *does* shape up nicely, removes a lot of boilerplate, gets rid of locking inconsistencie between aio_write and splice_write and I hope to get Kent's direct-io rewrite merged into the same queue, but some of the stuff after this point is having (mostly trivial) conflicts with the things already merged into mainline and with some I want more testing. This one passes LTP and xfstests without regressions, in addition to usual beating. BTW, readahead02 in ltp syscalls testsuite has started giving failures since "mm/readahead.c: fix readahead failure for memoryless NUMA nodes and limit readahead pages" - might be a false positive, might be a real regression..." * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (63 commits) missing bits of "splice: fix racy pipe->buffers uses" cifs: fix the race in cifs_writev() ceph_sync_{,direct_}write: fix an oops on ceph_osdc_new_request() failure kill generic_file_buffered_write() ocfs2_file_aio_write(): switch to generic_perform_write() ceph_aio_write(): switch to generic_perform_write() xfs_file_buffered_aio_write(): switch to generic_perform_write() export generic_perform_write(), start getting rid of generic_file_buffer_write() generic_file_direct_write(): get rid of ppos argument btrfs_file_aio_write(): get rid of ppos kill the 5th argument of generic_file_buffered_write() kill the 4th argument of __generic_file_aio_write() lustre: don't open-code kernel_recvmsg() ocfs2: don't open-code kernel_recvmsg() drbd: don't open-code kernel_recvmsg() constify blk_rq_map_user_iov() and friends lustre: switch to kernel_sendmsg() ocfs2: don't open-code kernel_sendmsg() take iov_iter stuff to mm/iov_iter.c process_vm_access: tidy up a bit ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/bio.h5
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/buffer_head.h4
-rw-r--r--include/linux/fdtable.h2
-rw-r--r--include/linux/fs.h97
-rw-r--r--include/linux/mount.h3
-rw-r--r--include/linux/nbd.h3
-rw-r--r--include/linux/pipe_fs_i.h19
-rw-r--r--include/linux/uio.h52
9 files changed, 71 insertions, 118 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5aa372a7380c..bba550826921 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -388,7 +388,7 @@ struct sg_iovec;
struct rq_map_data;
extern struct bio *bio_map_user_iov(struct request_queue *,
struct block_device *,
- struct sg_iovec *, int, int, gfp_t);
+ const struct sg_iovec *, int, int, gfp_t);
extern void bio_unmap_user(struct bio *);
extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
gfp_t);
@@ -414,7 +414,8 @@ extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
unsigned long, unsigned int, int, gfp_t);
extern struct bio *bio_copy_user_iov(struct request_queue *,
- struct rq_map_data *, struct sg_iovec *,
+ struct rq_map_data *,
+ const struct sg_iovec *,
int, int, gfp_t);
extern int bio_uncopy_user(struct bio *);
void zero_fill_bio(struct bio *bio);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 99617cf7dd1a..0d84981ee03f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -835,8 +835,8 @@ extern int blk_rq_map_user(struct request_queue *, struct request *,
extern int blk_rq_unmap_user(struct bio *);
extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
+ struct rq_map_data *, const struct sg_iovec *,
+ int, unsigned int, gfp_t);
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index d77797a52b7b..c40302f909ce 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -210,8 +210,8 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
int block_write_full_page_endio(struct page *page, get_block_t *get_block,
struct writeback_control *wbc, bh_end_io_t *handler);
int block_read_full_page(struct page*, get_block_t*);
-int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
- unsigned long from);
+int block_is_partially_uptodate(struct page *page, unsigned long from,
+ unsigned long count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
unsigned flags, struct page **pagep, get_block_t *get_block);
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 70e8e21c0a30..230f87bdf5ad 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -63,8 +63,6 @@ struct file_operations;
struct vfsmount;
struct dentry;
-extern void __init files_defer_init(void);
-
#define rcu_dereference_check_fdtable(files, fdtfd) \
rcu_dereference_check((fdtfd), lockdep_is_held(&(files)->file_lock))
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 81048f9bc783..7a9c5bca2b76 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -48,6 +48,7 @@ struct cred;
struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
+struct iov_iter;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -125,6 +126,8 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* File needs atomic accesses to f_pos */
#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000)
+/* Write access to underlying fs */
+#define FMODE_WRITER ((__force fmode_t)0x10000)
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
@@ -293,38 +296,6 @@ struct page;
struct address_space;
struct writeback_control;
-struct iov_iter {
- const struct iovec *iov;
- unsigned long nr_segs;
- size_t iov_offset;
- size_t count;
-};
-
-size_t iov_iter_copy_from_user_atomic(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes);
-size_t iov_iter_copy_from_user(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes);
-void iov_iter_advance(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-size_t iov_iter_single_seg_count(const struct iov_iter *i);
-
-static inline void iov_iter_init(struct iov_iter *i,
- const struct iovec *iov, unsigned long nr_segs,
- size_t count, size_t written)
-{
- i->iov = iov;
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count + written;
-
- iov_iter_advance(i, written);
-}
-
-static inline size_t iov_iter_count(struct iov_iter *i)
-{
- return i->count;
-}
-
/*
* "descriptor" for what we're up to with a read.
* This allows us to use the same read code yet
@@ -383,7 +354,7 @@ struct address_space_operations {
int (*migratepage) (struct address_space *,
struct page *, struct page *, enum migrate_mode);
int (*launder_page) (struct page *);
- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
+ int (*is_partially_uptodate) (struct page *, unsigned long,
unsigned long);
void (*is_dirty_writeback) (struct page *, bool *, bool *);
int (*error_remove_page)(struct address_space *, struct page *);
@@ -770,9 +741,6 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
index < ra->start + ra->size);
}
-#define FILE_MNT_WRITE_TAKEN 1
-#define FILE_MNT_WRITE_RELEASED 2
-
struct file {
union {
struct llist_node fu_llist;
@@ -810,9 +778,6 @@ struct file {
struct list_head f_tfile_llink;
#endif /* #ifdef CONFIG_EPOLL */
struct address_space *f_mapping;
-#ifdef CONFIG_DEBUG_WRITECOUNT
- unsigned long f_mnt_write_state;
-#endif
} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
struct file_handle {
@@ -830,49 +795,6 @@ static inline struct file *get_file(struct file *f)
#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
#define file_count(x) atomic_long_read(&(x)->f_count)
-#ifdef CONFIG_DEBUG_WRITECOUNT
-static inline void file_take_write(struct file *f)
-{
- WARN_ON(f->f_mnt_write_state != 0);
- f->f_mnt_write_state = FILE_MNT_WRITE_TAKEN;
-}
-static inline void file_release_write(struct file *f)
-{
- f->f_mnt_write_state |= FILE_MNT_WRITE_RELEASED;
-}
-static inline void file_reset_write(struct file *f)
-{
- f->f_mnt_write_state = 0;
-}
-static inline void file_check_state(struct file *f)
-{
- /*
- * At this point, either both or neither of these bits
- * should be set.
- */
- WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN);
- WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_RELEASED);
-}
-static inline int file_check_writeable(struct file *f)
-{
- if (f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN)
- return 0;
- printk(KERN_WARNING "writeable file with no "
- "mnt_want_write()\n");
- WARN_ON(1);
- return -EINVAL;
-}
-#else /* !CONFIG_DEBUG_WRITECOUNT */
-static inline void file_take_write(struct file *filp) {}
-static inline void file_release_write(struct file *filp) {}
-static inline void file_reset_write(struct file *filp) {}
-static inline void file_check_state(struct file *filp) {}
-static inline int file_check_writeable(struct file *filp)
-{
- return 0;
-}
-#endif /* CONFIG_DEBUG_WRITECOUNT */
-
#define MAX_NON_LFS ((1UL<<31) - 1)
/* Page cache limit. The filesystems should put that into their s_maxbytes
@@ -2481,16 +2403,13 @@ extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr,
unsigned long size, pgoff_t pgoff);
-extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
-extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long,
- loff_t *);
+extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long);
extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
- unsigned long *, loff_t, loff_t *, size_t, size_t);
-extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
- unsigned long, loff_t, loff_t *, size_t, ssize_t);
+ unsigned long *, loff_t, size_t, size_t);
+extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
extern int generic_segment_checks(const struct iovec *iov,
@@ -2582,7 +2501,7 @@ extern const struct file_operations generic_ro_fops;
#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
-extern int vfs_readlink(struct dentry *, char __user *, int, const char *);
+extern int readlink_copy(char __user *, int, const char *);
extern int page_readlink(struct dentry *, char __user *, int);
extern void *page_follow_link_light(struct dentry *, struct nameidata *);
extern void page_put_link(struct dentry *, struct nameidata *, void *);
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 371d346fa270..839bac270904 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -44,6 +44,8 @@ struct mnt_namespace;
#define MNT_SHARED_MASK (MNT_UNBINDABLE)
#define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE)
+#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
+ MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED)
#define MNT_INTERNAL 0x4000
@@ -51,6 +53,7 @@ struct mnt_namespace;
#define MNT_LOCKED 0x800000
#define MNT_DOOMED 0x1000000
#define MNT_SYNC_UMOUNT 0x2000000
+#define MNT_MARKED 0x4000000
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index ae4981ebd18e..f62f78aef4ac 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -24,8 +24,7 @@ struct request;
struct nbd_device {
int flags;
int harderror; /* Code of hard error */
- struct socket * sock;
- struct file * file; /* If == NULL, device is not ready, yet */
+ struct socket * sock; /* If == NULL, device is not ready, yet */
int magic;
spinlock_t queue_lock;
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 4d9389c79e61..eb8b8ac6df3c 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -83,23 +83,6 @@ struct pipe_buf_operations {
int can_merge;
/*
- * ->map() returns a virtual address mapping of the pipe buffer.
- * The last integer flag reflects whether this should be an atomic
- * mapping or not. The atomic map is faster, however you can't take
- * page faults before calling ->unmap() again. So if you need to eg
- * access user data through copy_to/from_user(), then you must get
- * a non-atomic map. ->map() uses the kmap_atomic slot for
- * atomic maps, you have to be careful if mapping another page as
- * source or destination for a copy.
- */
- void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int);
-
- /*
- * Undoes ->map(), finishes the virtual mapping of the pipe buffer.
- */
- void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *, void *);
-
- /*
* ->confirm() verifies that the data in the pipe buffer is there
* and that the contents are good. If the pages in the pipe belong
* to a file system, we may need to wait for IO completion in this
@@ -150,8 +133,6 @@ struct pipe_inode_info *alloc_pipe_info(void);
void free_pipe_info(struct pipe_inode_info *);
/* Generic pipe buffer ops functions */
-void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *, int);
-void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void *);
void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
diff --git a/include/linux/uio.h b/include/linux/uio.h
index c55ce243cc09..199bcc34241b 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -9,14 +9,23 @@
#ifndef __LINUX_UIO_H
#define __LINUX_UIO_H
+#include <linux/kernel.h>
#include <uapi/linux/uio.h>
+struct page;
struct kvec {
void *iov_base; /* and that should *never* hold a userland pointer */
size_t iov_len;
};
+struct iov_iter {
+ const struct iovec *iov;
+ unsigned long nr_segs;
+ size_t iov_offset;
+ size_t count;
+};
+
/*
* Total number of bytes covered by an iovec.
*
@@ -34,8 +43,51 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
return ret;
}
+static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
+{
+ return (struct iovec) {
+ .iov_base = iter->iov->iov_base + iter->iov_offset,
+ .iov_len = min(iter->count,
+ iter->iov->iov_len - iter->iov_offset),
+ };
+}
+
+#define iov_for_each(iov, iter, start) \
+ for (iter = (start); \
+ (iter).count && \
+ ((iov = iov_iter_iovec(&(iter))), 1); \
+ iov_iter_advance(&(iter), (iov).iov_len))
+
unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
+size_t iov_iter_copy_from_user_atomic(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes);
+size_t iov_iter_copy_from_user(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes);
+void iov_iter_advance(struct iov_iter *i, size_t bytes);
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+size_t iov_iter_single_seg_count(const struct iov_iter *i);
+size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i);
+
+static inline void iov_iter_init(struct iov_iter *i,
+ const struct iovec *iov, unsigned long nr_segs,
+ size_t count, size_t written)
+{
+ i->iov = iov;
+ i->nr_segs = nr_segs;
+ i->iov_offset = 0;
+ i->count = count + written;
+
+ iov_iter_advance(i, written);
+}
+
+static inline size_t iov_iter_count(struct iov_iter *i)
+{
+ return i->count;
+}
+
int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
+
#endif