summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorOtavio Salvador <otavio@ossystems.com.br>2016-10-26 10:28:23 -0200
committerOtavio Salvador <otavio@ossystems.com.br>2016-10-26 10:28:23 -0200
commitd7568534f54f5eb1efa4143ba09a77116f964c80 (patch)
treef0baf6887b0634658ca57ce2cdfda7b5355a7df7 /include
parent34002442b17af7220fa52b2d153db90685113947 (diff)
parent9ca365c0c8bdd8552ec064f0e696600cf7ea66dd (diff)
Merge tag 'v4.1.35' into 4.1-2.0.x-imx
Linux 4.1.35 * tag 'v4.1.35': (98 commits) Linux 4.1.35 xhci: fix usb2 resume timing and races. mm: remove gup_flags FOLL_WRITE games from __get_user_pages() timekeeping: Fix __ktime_get_fast_ns() regression time: Add cycles to nanoseconds translation Linux 4.1.34 openrisc: fix the fix of copy_from_user() avr32: fix 'undefined reference to `___copy_from_user' fix memory leaks in tracing_buffers_splice_read() tracing: Move mutex to protect against resetting of seq data MIPS: SMP: Fix possibility of deadlock when bringing CPUs online MIPS: Fix pre-r6 emulation FPU initialisation btrfs: ensure that file descriptor used with subvol ioctls is a dir fix fault_in_multipages_...() on architectures with no-op access_ok() ocfs2: fix start offset to ocfs2_zero_range_for_truncate() fanotify: fix list corruption in fanotify_get_response() fsnotify: add a way to stop queueing events on group shutdown autofs: use dentry flags to block walks during expire autofs races ocfs2/dlm: fix race between convert and migration ... Signed-off-by: Otavio Salvador <otavio@ossystems.com.br>
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/uaccess.h30
-rw-r--r--include/linux/fsnotify_backend.h6
-rw-r--r--include/linux/irq.h10
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/pagemap.h38
-rw-r--r--include/linux/uio.h2
6 files changed, 53 insertions, 34 deletions
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 72d8803832ff..32901d11f8c4 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -163,9 +163,10 @@ static inline __must_check long __copy_to_user(void __user *to,
#define put_user(x, ptr) \
({ \
+ void *__p = (ptr); \
might_fault(); \
- access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ? \
- __put_user(x, ptr) : \
+ access_ok(VERIFY_WRITE, __p, sizeof(*ptr)) ? \
+ __put_user((x), ((__typeof__(*(ptr)) *)__p)) : \
-EFAULT; \
})
@@ -225,17 +226,22 @@ extern int __put_user_bad(void) __attribute__((noreturn));
#define get_user(x, ptr) \
({ \
+ const void *__p = (ptr); \
might_fault(); \
- access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \
- __get_user(x, ptr) : \
- -EFAULT; \
+ access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \
+ __get_user((x), (__typeof__(*(ptr)) *)__p) : \
+ ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
})
#ifndef __get_user_fn
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
- size = __copy_from_user(x, ptr, size);
- return size ? -EFAULT : size;
+ size_t n = __copy_from_user(x, ptr, size);
+ if (unlikely(n)) {
+ memset(x + (size - n), 0, n);
+ return -EFAULT;
+ }
+ return 0;
}
#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
@@ -255,11 +261,13 @@ extern int __get_user_bad(void) __attribute__((noreturn));
static inline long copy_from_user(void *to,
const void __user * from, unsigned long n)
{
+ unsigned long res = n;
might_fault();
- if (access_ok(VERIFY_READ, from, n))
- return __copy_from_user(to, from, n);
- else
- return n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = __copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
static inline long copy_to_user(void __user *to,
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 0f313f93c586..46dde3a3c891 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -150,6 +150,7 @@ struct fsnotify_group {
#define FS_PRIO_1 1 /* fanotify content based access control */
#define FS_PRIO_2 2 /* fanotify pre-content access */
unsigned int priority;
+ bool shutdown; /* group is being shut down, don't queue more events */
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
struct mutex mark_mutex; /* protect marks_list */
@@ -181,7 +182,6 @@ struct fsnotify_group {
spinlock_t access_lock;
struct list_head access_list;
wait_queue_head_t access_waitq;
- atomic_t bypass_perm;
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
int f_flags;
unsigned int max_marks;
@@ -301,6 +301,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op
extern void fsnotify_get_group(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_alloc_group */
extern void fsnotify_put_group(struct fsnotify_group *group);
+/* group destruction begins, stop queuing new events */
+extern void fsnotify_group_stop_queueing(struct fsnotify_group *group);
/* destroy group */
extern void fsnotify_destroy_group(struct fsnotify_group *group);
/* fasync handler function */
@@ -313,8 +315,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
struct fsnotify_event *event,
int (*merge)(struct list_head *,
struct fsnotify_event *));
-/* Remove passed event from groups notification queue */
-extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
/* true if the group notification queue is empty */
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 3532dca843f4..33475a37f1bb 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -852,6 +852,16 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
#endif
+/*
+ * The irqsave variants are for usage in non interrupt code. Do not use
+ * them in irq_chip callbacks. Use irq_gc_lock() instead.
+ */
+#define irq_gc_lock_irqsave(gc, flags) \
+ raw_spin_lock_irqsave(&(gc)->lock, flags)
+
+#define irq_gc_unlock_irqrestore(gc, flags) \
+ raw_spin_unlock_irqrestore(&(gc)->lock, flags)
+
static inline void irq_reg_writel(struct irq_chip_generic *gc,
u32 val, int reg_offset)
{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6b85ec64d302..7cadf0a660e7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2064,6 +2064,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
+#define FOLL_COW 0x4000 /* internal GUP flag */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 4b3736f7065c..30a8f531236c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -594,56 +594,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
*/
static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
{
- int ret = 0;
char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
- return ret;
+ return 0;
+ if (unlikely(uaddr > end))
+ return -EFAULT;
/*
* Writing zeroes into userspace here is OK, because we know that if
* the zero gets there, we'll be overwriting it.
*/
- while (uaddr <= end) {
- ret = __put_user(0, uaddr);
- if (ret != 0)
- return ret;
+ do {
+ if (unlikely(__put_user(0, uaddr) != 0))
+ return -EFAULT;
uaddr += PAGE_SIZE;
- }
+ } while (uaddr <= end);
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK))
- ret = __put_user(0, end);
+ return __put_user(0, end);
- return ret;
+ return 0;
}
static inline int fault_in_multipages_readable(const char __user *uaddr,
int size)
{
volatile char c;
- int ret = 0;
const char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
- return ret;
+ return 0;
- while (uaddr <= end) {
- ret = __get_user(c, uaddr);
- if (ret != 0)
- return ret;
+ if (unlikely(uaddr > end))
+ return -EFAULT;
+
+ do {
+ if (unlikely(__get_user(c, uaddr) != 0))
+ return -EFAULT;
uaddr += PAGE_SIZE;
- }
+ } while (uaddr <= end);
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK)) {
- ret = __get_user(c, end);
- (void)c;
+ return __get_user(c, end);
}
- return ret;
+ return 0;
}
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 8b01e1c3c614..5f9c59da978b 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -76,7 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes);
+#define iov_iter_fault_in_multipages_readable iov_iter_fault_in_readable
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);