summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/futex.h8
-rw-r--r--include/asm-generic/getorder.h50
-rw-r--r--include/drm/drm_debugfs_crc.h3
-rw-r--r--include/drm/drm_vma_manager.h1
-rw-r--r--include/dt-bindings/clock/rk3328-cru.h2
-rw-r--r--include/kvm/arm_vgic.h1
-rw-r--r--include/linux/acpi.h7
-rw-r--r--include/linux/atalk.h2
-rw-r--r--include/linux/bio.h9
-rw-r--r--include/linux/bitmap.h9
-rw-r--r--include/linux/blktrace_api.h8
-rw-r--r--include/linux/bug.h5
-rw-r--r--include/linux/ccp.h2
-rw-r--r--include/linux/ceph/buffer.h3
-rw-r--r--include/linux/cgroup-defs.h1
-rw-r--r--include/linux/cgroup.h14
-rw-r--r--include/linux/coda.h3
-rw-r--r--include/linux/coda_psdev.h11
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/compiler.h27
-rw-r--r--include/linux/cpu.h30
-rw-r--r--include/linux/cpufeature.h2
-rw-r--r--include/linux/cpuhotplug.h4
-rw-r--r--include/linux/cred.h7
-rw-r--r--include/linux/dma-mapping.h3
-rw-r--r--include/linux/edac.h3
-rw-r--r--include/linux/fb.h3
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/futex.h44
-rw-r--r--include/linux/genalloc.h13
-rw-r--r--include/linux/gfp.h23
-rw-r--r--include/linux/gpio.h24
-rw-r--r--include/linux/gpio/consumer.h2
-rw-r--r--include/linux/ieee80211.h53
-rw-r--r--include/linux/if_pppox.h3
-rw-r--r--include/linux/intel-iommu.h6
-rw-r--r--include/linux/iova.h6
-rw-r--r--include/linux/jbd2.h26
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/kernfs.h1
-rw-r--r--include/linux/kvm_host.h12
-rw-r--r--include/linux/libfdt_env.h1
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mfd/intel_soc_pmic.h1
-rw-r--r--include/linux/mfd/max8997.h1
-rw-r--r--include/linux/mfd/mc13xxx.h1
-rw-r--r--include/linux/mfd/palmas.h3
-rw-r--r--include/linux/mfd/rk808.h2
-rw-r--r--include/linux/mlx5/fs.h1
-rw-r--r--include/linux/mm.h5
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/nfs_page.h10
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/page-flags.h20
-rw-r--r--include/linux/platform_data/dma-dw.h6
-rw-r--r--include/linux/platform_data/dma-ep93xx.h2
-rw-r--r--include/linux/qcom_scm.h3
-rw-r--r--include/linux/quotaops.h12
-rw-r--r--include/linux/rcupdate.h2
-rw-r--r--include/linux/regulator/consumer.h2
-rw-r--r--include/linux/reset-controller.h2
-rw-r--r--include/linux/scatterlist.h10
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/sched/mm.h10
-rw-r--r--include/linux/sched/numa_balancing.h4
-rw-r--r--include/linux/sched/task.h2
-rw-r--r--include/linux/serial_8250.h4
-rw-r--r--include/linux/serial_core.h37
-rw-r--r--include/linux/skbuff.h36
-rw-r--r--include/linux/sunrpc/sched.h2
-rw-r--r--include/linux/swap.h6
-rw-r--r--include/linux/tcp.h4
-rw-r--r--include/linux/tty.h7
-rw-r--r--include/linux/vmw_vmci_defs.h11
-rw-r--r--include/math-emu/soft-fp.h2
-rw-r--r--include/net/act_api.h4
-rw-r--r--include/net/af_vsock.h3
-rw-r--r--include/net/bluetooth/hci_core.h3
-rw-r--r--include/net/bonding.h3
-rw-r--r--include/net/busy_poll.h6
-rw-r--r--include/net/dst.h5
-rw-r--r--include/net/flow_dissector.h3
-rw-r--r--include/net/fq.h2
-rw-r--r--include/net/fq_impl.h4
-rw-r--r--include/net/ip6_tunnel.h9
-rw-r--r--include/net/ip_vs.h7
-rw-r--r--include/net/ipv6_frag.h1
-rw-r--r--include/net/llc.h1
-rw-r--r--include/net/llc_conn.h2
-rw-r--r--include/net/neighbour.h4
-rw-r--r--include/net/netfilter/nf_tables.h3
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/psample.h1
-rw-r--r--include/net/sch_generic.h5
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/net/sock.h21
-rw-r--r--include/net/tcp.h26
-rw-r--r--include/net/xfrm.h18
-rw-r--r--include/rdma/ib_verbs.h13
-rw-r--r--include/scsi/libfcoe.h1
-rw-r--r--include/scsi/scsi_dbg.h2
-rw-r--r--include/scsi/scsi_eh.h1
-rw-r--r--include/sound/compress_driver.h5
-rw-r--r--include/sound/soc-dapm.h2
-rw-r--r--include/trace/events/sched.h11
-rw-r--r--include/uapi/linux/cec.h4
-rw-r--r--include/uapi/linux/coda_psdev.h13
-rw-r--r--include/uapi/linux/isdn/capicmd.h1
-rw-r--r--include/uapi/linux/netfilter/xt_nfacct.h5
-rw-r--r--include/uapi/linux/nilfs2_ondisk.h24
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--include/xen/events.h3
116 files changed, 622 insertions, 264 deletions
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index fcb61b4659b3..8666fe7f35d7 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -23,7 +23,9 @@
*
* Return:
* 0 - On success
- * <0 - On error
+ * -EFAULT - User access resulted in a page fault
+ * -EAGAIN - Atomic operation was unable to complete due to contention
+ * -ENOSYS - Operation not supported
*/
static inline int
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
@@ -85,7 +87,9 @@ out_pagefault_enable:
*
* Return:
* 0 - On success
- * <0 - On error
+ * -EFAULT - User access resulted in a page fault
+ * -EAGAIN - Atomic operation was unable to complete due to contention
+ * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
*/
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
index c64bea7a52be..e9f20b813a69 100644
--- a/include/asm-generic/getorder.h
+++ b/include/asm-generic/getorder.h
@@ -7,24 +7,6 @@
#include <linux/compiler.h>
#include <linux/log2.h>
-/*
- * Runtime evaluation of get_order()
- */
-static inline __attribute_const__
-int __get_order(unsigned long size)
-{
- int order;
-
- size--;
- size >>= PAGE_SHIFT;
-#if BITS_PER_LONG == 32
- order = fls(size);
-#else
- order = fls64(size);
-#endif
- return order;
-}
-
/**
* get_order - Determine the allocation order of a memory size
* @size: The size for which to get the order
@@ -43,19 +25,27 @@ int __get_order(unsigned long size)
* to hold an object of the specified size.
*
* The result is undefined if the size is 0.
- *
- * This function may be used to initialise variables with compile time
- * evaluations of constants.
*/
-#define get_order(n) \
-( \
- __builtin_constant_p(n) ? ( \
- ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
- (((n) < (1UL << PAGE_SHIFT)) ? 0 : \
- ilog2((n) - 1) - PAGE_SHIFT + 1) \
- ) : \
- __get_order(n) \
-)
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+ if (__builtin_constant_p(size)) {
+ if (!size)
+ return BITS_PER_LONG - PAGE_SHIFT;
+
+ if (size < (1UL << PAGE_SHIFT))
+ return 0;
+
+ return ilog2((size) - 1) - PAGE_SHIFT + 1;
+ }
+
+ size--;
+ size >>= PAGE_SHIFT;
+#if BITS_PER_LONG == 32
+ return fls(size);
+#else
+ return fls64(size);
+#endif
+}
#endif /* __ASSEMBLY__ */
diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h
index 7d63b1d4adb9..b225eeb30d05 100644
--- a/include/drm/drm_debugfs_crc.h
+++ b/include/drm/drm_debugfs_crc.h
@@ -43,6 +43,7 @@ struct drm_crtc_crc_entry {
* @lock: protects the fields in this struct
* @source: name of the currently configured source of CRCs
* @opened: whether userspace has opened the data file for reading
+ * @overflow: whether an overflow occured.
* @entries: array of entries, with size of %DRM_CRC_ENTRIES_NR
* @head: head of circular queue
* @tail: tail of circular queue
@@ -52,7 +53,7 @@ struct drm_crtc_crc_entry {
struct drm_crtc_crc {
spinlock_t lock;
const char *source;
- bool opened;
+ bool opened, overflow;
struct drm_crtc_crc_entry *entries;
int head, tail;
size_t values_cnt;
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index d84d52f6d2b1..b54c98f05460 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -41,6 +41,7 @@ struct drm_vma_offset_node {
rwlock_t vm_lock;
struct drm_mm_node vm_node;
struct rb_root vm_files;
+ bool readonly:1;
};
struct drm_vma_offset_manager {
diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h
index d2b26a4b43eb..4a9db1b2669b 100644
--- a/include/dt-bindings/clock/rk3328-cru.h
+++ b/include/dt-bindings/clock/rk3328-cru.h
@@ -178,7 +178,7 @@
#define HCLK_TSP 309
#define HCLK_GMAC 310
#define HCLK_I2S0_8CH 311
-#define HCLK_I2S1_8CH 313
+#define HCLK_I2S1_8CH 312
#define HCLK_I2S2_2CH 313
#define HCLK_SPDIF_8CH 314
#define HCLK_VOP 315
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 34dba516ef24..d5c6637ed638 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -315,6 +315,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
void kvm_vgic_load(struct kvm_vcpu *vcpu);
void kvm_vgic_put(struct kvm_vcpu *vcpu);
+void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) ((k)->arch.vgic.initialized)
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 13c105121a18..4bb3bca75004 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -99,7 +99,7 @@ static inline bool has_acpi_companion(struct device *dev)
static inline void acpi_preset_companion(struct device *dev,
struct acpi_device *parent, u64 addr)
{
- ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, NULL));
+ ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, false));
}
static inline const char *acpi_dev_name(struct acpi_device *adev)
@@ -324,7 +324,10 @@ void acpi_set_irq_model(enum acpi_irq_model_id model,
#ifdef CONFIG_X86_IO_APIC
extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
#else
-#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
+static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
+{
+ return -1;
+}
#endif
/*
* This function undoes the effect of one call to acpi_register_gsi().
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index 03885e63f92b..2664b9e89f9b 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -108,7 +108,7 @@ static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb)
#define AARP_RESOLVE_TIME (10 * HZ)
extern struct datalink_proto *ddp_dl, *aarp_dl;
-extern void aarp_proto_init(void);
+extern int aarp_proto_init(void);
/* Inter module exports */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index d4b39caf081d..e260f000b9ac 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -123,6 +123,11 @@ static inline void *bio_data(struct bio *bio)
return NULL;
}
+static inline bool bio_full(struct bio *bio)
+{
+ return bio->bi_vcnt >= bio->bi_max_vecs;
+}
+
/*
* will die
*/
@@ -459,6 +464,10 @@ void bio_chain(struct bio *, struct bio *);
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
+bool __bio_try_merge_page(struct bio *bio, struct page *page,
+ unsigned int len, unsigned int off);
+void __bio_add_page(struct bio *bio, struct page *page,
+ unsigned int len, unsigned int off);
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
struct rq_map_data;
extern struct bio *bio_map_user_iov(struct request_queue *,
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 835c2271196a..aec255fb62aa 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -185,8 +185,13 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf,
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
+/*
+ * The static inlines below do not handle constant nbits==0 correctly,
+ * so make such users (should any ever turn up) call the out-of-line
+ * versions.
+ */
#define small_const_nbits(nbits) \
- (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
+ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
@@ -350,7 +355,7 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
}
static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, int nbits)
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 8804753805ac..7bb2d8de9f30 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -116,7 +116,13 @@ extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
static inline sector_t blk_rq_trace_sector(struct request *rq)
{
- return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq);
+ /*
+ * Tracing should ignore starting sector for passthrough requests and
+ * requests where starting sector didn't get set.
+ */
+ if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
+ return 0;
+ return blk_rq_pos(rq);
}
static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
diff --git a/include/linux/bug.h b/include/linux/bug.h
index da4231c905c8..f48597417791 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -45,6 +45,11 @@ int is_valid_bugaddr(unsigned long addr);
#else /* !CONFIG_GENERIC_BUG */
+static inline void *find_bug(unsigned long bugaddr)
+{
+ return NULL;
+}
+
static inline enum bug_trap_type report_bug(unsigned long bug_addr,
struct pt_regs *regs)
{
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 7e9c991c95e0..43ed9e77cf81 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -173,6 +173,8 @@ struct ccp_aes_engine {
enum ccp_aes_mode mode;
enum ccp_aes_action action;
+ u32 authsize;
+
struct scatterlist *key;
u32 key_len; /* In bytes */
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
index 5e58bb29b1a3..11cdc7c60480 100644
--- a/include/linux/ceph/buffer.h
+++ b/include/linux/ceph/buffer.h
@@ -30,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
static inline void ceph_buffer_put(struct ceph_buffer *b)
{
- kref_put(&b->kref, ceph_buffer_release);
+ if (b)
+ kref_put(&b->kref, ceph_buffer_release);
}
extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index eb396f71285f..8d4b92185a09 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -201,6 +201,7 @@ struct css_set {
*/
struct list_head tasks;
struct list_head mg_tasks;
+ struct list_head dying_tasks;
/* all css_task_iters currently walking this cset */
struct list_head task_iters;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 8e83c9055ccb..0e21619f1c03 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -42,6 +42,9 @@
/* walk all threaded css_sets in the domain */
#define CSS_TASK_ITER_THREADED (1U << 1)
+/* internal flags */
+#define CSS_TASK_ITER_SKIPPED (1U << 16)
+
/* a css_task_iter should be treated as an opaque object */
struct css_task_iter {
struct cgroup_subsys *ss;
@@ -56,6 +59,7 @@ struct css_task_iter {
struct list_head *task_pos;
struct list_head *tasks_head;
struct list_head *mg_tasks_head;
+ struct list_head *dying_tasks_head;
struct css_set *cur_cset;
struct css_set *cur_dcset;
@@ -484,7 +488,7 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
*
* Find the css for the (@task, @subsys_id) combination, increment a
* reference on and return it. This function is guaranteed to return a
- * valid css.
+ * valid css. The returned css may already have been offlined.
*/
static inline struct cgroup_subsys_state *
task_get_css(struct task_struct *task, int subsys_id)
@@ -494,7 +498,13 @@ task_get_css(struct task_struct *task, int subsys_id)
rcu_read_lock();
while (true) {
css = task_css(task, subsys_id);
- if (likely(css_tryget_online(css)))
+ /*
+ * Can't use css_tryget_online() here. A task which has
+ * PF_EXITING set may stay associated with an offline css.
+ * If such task calls this function, css_tryget_online()
+ * will keep failing.
+ */
+ if (likely(css_tryget(css)))
break;
cpu_relax();
}
diff --git a/include/linux/coda.h b/include/linux/coda.h
index d30209b9cef8..0ca0c83fdb1c 100644
--- a/include/linux/coda.h
+++ b/include/linux/coda.h
@@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance.
#ifndef _CODA_HEADER_
#define _CODA_HEADER_
-#if defined(__linux__)
typedef unsigned long long u_quad_t;
-#endif
+
#include <uapi/linux/coda.h>
#endif
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index 15170954aa2b..57d2b2faf6a3 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -19,6 +19,17 @@ struct venus_comm {
struct mutex vc_mutex;
};
+/* messages between coda filesystem in kernel and Venus */
+struct upc_req {
+ struct list_head uc_chain;
+ caddr_t uc_data;
+ u_short uc_flags;
+ u_short uc_inSize; /* Size is at most 5000 bytes */
+ u_short uc_outSize;
+ u_short uc_opcode; /* copied from data to save lookup */
+ int uc_unique;
+ wait_queue_head_t uc_sleep; /* process' wait queue */
+};
static inline struct venus_comm *coda_vcp(struct super_block *sb)
{
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 23909d12f729..cec96d4794d0 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -324,8 +324,6 @@ struct compat_kexec_segment;
struct compat_mq_attr;
struct compat_msgbuf;
-extern void compat_exit_robust_list(struct task_struct *curr);
-
asmlinkage long
compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
compat_size_t len);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 67c3934fb9ed..f84d332085c3 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -119,10 +119,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
# define ASM_UNREACHABLE
#endif
#ifndef unreachable
-# define unreachable() do { \
- annotate_unreachable(); \
- __builtin_unreachable(); \
-} while (0)
+# define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
#endif
/*
@@ -188,23 +185,21 @@ void __read_once_size(const volatile void *p, void *res, int size)
#ifdef CONFIG_KASAN
/*
- * This function is not 'inline' because __no_sanitize_address confilcts
+ * We can't declare function 'inline' because __no_sanitize_address confilcts
* with inlining. Attempt to inline it may cause a build failure.
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
-static __no_sanitize_address __maybe_unused
-void __read_once_size_nocheck(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
+# define __no_kasan_or_inline __no_sanitize_address __maybe_unused
#else
-static __always_inline
+# define __no_kasan_or_inline __always_inline
+#endif
+
+static __no_kasan_or_inline
void __read_once_size_nocheck(const volatile void *p, void *res, int size)
{
__READ_ONCE_SIZE;
}
-#endif
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
{
@@ -243,6 +238,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* required ordering.
*/
#include <asm/barrier.h>
+#include <linux/kasan-checks.h>
#define __READ_ONCE(x, check) \
({ \
@@ -262,6 +258,13 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
*/
#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
+static __no_kasan_or_inline
+unsigned long read_word_at_a_time(const void *addr)
+{
+ kasan_check_read(addr, 1);
+ return *(unsigned long *)addr;
+}
+
#define WRITE_ONCE(x, val) \
({ \
union { typeof(x) __val; char __c[1]; } __u = \
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index efc48efb0ec6..67e8ba81c35f 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -59,6 +59,11 @@ extern ssize_t cpu_show_l1tf(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_mds(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+extern ssize_t cpu_show_itlb_multihit(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
@@ -201,28 +206,7 @@ static inline int cpuhp_smt_enable(void) { return 0; }
static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
#endif
-/*
- * These are used for a global "mitigations=" cmdline option for toggling
- * optional CPU mitigations.
- */
-enum cpu_mitigations {
- CPU_MITIGATIONS_OFF,
- CPU_MITIGATIONS_AUTO,
- CPU_MITIGATIONS_AUTO_NOSMT,
-};
-
-extern enum cpu_mitigations cpu_mitigations;
-
-/* mitigations=off */
-static inline bool cpu_mitigations_off(void)
-{
- return cpu_mitigations == CPU_MITIGATIONS_OFF;
-}
-
-/* mitigations=auto,nosmt */
-static inline bool cpu_mitigations_auto_nosmt(void)
-{
- return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
-}
+extern bool cpu_mitigations_off(void);
+extern bool cpu_mitigations_auto_nosmt(void);
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
index 986c06c88d81..84d3c81b5978 100644
--- a/include/linux/cpufeature.h
+++ b/include/linux/cpufeature.h
@@ -45,7 +45,7 @@
* 'asm/cpufeature.h' of your favorite architecture.
*/
#define module_cpu_feature_match(x, __initfunc) \
-static struct cpu_feature const cpu_feature_match_ ## x[] = \
+static struct cpu_feature const __maybe_unused cpu_feature_match_ ## x[] = \
{ { .feature = cpu_feature(x) }, { } }; \
MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
\
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index fb83dee528a1..0834eb5ea9e6 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -100,6 +100,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
+ CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
@@ -114,10 +115,10 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_ACPI_STARTING,
CPUHP_AP_PERF_ARM_STARTING,
CPUHP_AP_ARM_L2X0_STARTING,
+ CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_JCORE_TIMER_STARTING,
- CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
CPUHP_AP_METAG_TIMER_STARTING,
CPUHP_AP_QCOM_TIMER_STARTING,
@@ -162,6 +163,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
+ CPUHP_AP_BASE_CACHEINFO_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
CPUHP_AP_X86_HPET_ONLINE,
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 631286535d0f..2a0d4674294e 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -145,7 +145,11 @@ struct cred {
struct user_struct *user; /* real user ID subscription */
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
struct group_info *group_info; /* supplementary groups for euid/fsgid */
- struct rcu_head rcu; /* RCU deletion hook */
+ /* RCU deletion */
+ union {
+ int non_rcu; /* Can we skip RCU deletion? */
+ struct rcu_head rcu; /* RCU deletion hook */
+ };
} __randomize_layout;
extern void __put_cred(struct cred *);
@@ -243,6 +247,7 @@ static inline const struct cred *get_cred(const struct cred *cred)
{
struct cred *nonconst_cred = (struct cred *) cred;
validate_creds(cred);
+ nonconst_cred->non_rcu = 0;
return get_new_cred(nonconst_cred);
}
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 7bf3b99e6fbb..9aee5f345e29 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -650,8 +650,7 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
return SZ_64K;
}
-static inline unsigned int dma_set_max_seg_size(struct device *dev,
- unsigned int size)
+static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
{
if (dev->dma_parms) {
dev->dma_parms->max_segment_size = size;
diff --git a/include/linux/edac.h b/include/linux/edac.h
index cd75c173fd00..90f72336aea6 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -17,6 +17,7 @@
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
+#include <linux/numa.h>
#define EDAC_DEVICE_NAME_LEN 31
@@ -667,6 +668,6 @@ struct mem_ctl_info {
/*
* Maximum number of memory controllers in the coherent fabric.
*/
-#define EDAC_MAX_MCS 16
+#define EDAC_MAX_MCS 2 * MAX_NUMNODES
#endif
diff --git a/include/linux/fb.h b/include/linux/fb.h
index bc24e48e396d..ccd1f74ca6ab 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -725,8 +725,6 @@ extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var);
extern const unsigned char *fb_firmware_edid(struct device *device);
extern void fb_edid_to_monspecs(unsigned char *edid,
struct fb_monspecs *specs);
-extern void fb_edid_add_monspecs(unsigned char *edid,
- struct fb_monspecs *specs);
extern void fb_destroy_modedb(struct fb_videomode *modedb);
extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb);
extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter);
@@ -800,7 +798,6 @@ struct dmt_videomode {
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
-extern const struct fb_videomode cea_modes[65];
extern const struct dmt_videomode dmt_modes[];
struct fb_modelist {
diff --git a/include/linux/filter.h b/include/linux/filter.h
index ac2272778f2e..5ca676d64652 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -729,6 +729,7 @@ struct sock *do_sk_redirect_map(struct sk_buff *skb);
extern int bpf_jit_enable;
extern int bpf_jit_harden;
extern int bpf_jit_kallsyms;
+extern long bpf_jit_limit;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index da56c796c5d8..f0fddf4ea828 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2580,6 +2580,8 @@ extern int filemap_flush(struct address_space *);
extern int filemap_fdatawait_keep_errors(struct address_space *mapping);
extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
loff_t lend);
+extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
+ loff_t start_byte, loff_t end_byte);
static inline int filemap_fdatawait(struct address_space *mapping)
{
diff --git a/include/linux/futex.h b/include/linux/futex.h
index c0fb9a24bbd2..a4b6cba699bf 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -2,7 +2,9 @@
#ifndef _LINUX_FUTEX_H
#define _LINUX_FUTEX_H
+#include <linux/sched.h>
#include <linux/ktime.h>
+
#include <uapi/linux/futex.h>
struct inode;
@@ -12,9 +14,6 @@ struct task_struct;
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3);
-extern int
-handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
-
/*
* Futexes are matched on equal values of this key.
* The key type depends on whether it's a shared or private mapping.
@@ -54,24 +53,35 @@ union futex_key {
#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
#ifdef CONFIG_FUTEX
-extern void exit_robust_list(struct task_struct *curr);
-#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
-#define futex_cmpxchg_enabled 1
-#else
-extern int futex_cmpxchg_enabled;
-#endif
-#else
-static inline void exit_robust_list(struct task_struct *curr)
+enum {
+ FUTEX_STATE_OK,
+ FUTEX_STATE_EXITING,
+ FUTEX_STATE_DEAD,
+};
+
+static inline void futex_init_task(struct task_struct *tsk)
{
-}
+ tsk->robust_list = NULL;
+#ifdef CONFIG_COMPAT
+ tsk->compat_robust_list = NULL;
#endif
+ INIT_LIST_HEAD(&tsk->pi_state_list);
+ tsk->pi_state_cache = NULL;
+ tsk->futex_state = FUTEX_STATE_OK;
+ mutex_init(&tsk->futex_exit_mutex);
+}
-#ifdef CONFIG_FUTEX_PI
-extern void exit_pi_state_list(struct task_struct *curr);
+void futex_exit_recursive(struct task_struct *tsk);
+void futex_exit_release(struct task_struct *tsk);
+void futex_exec_release(struct task_struct *tsk);
+
+long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+ u32 __user *uaddr2, u32 val2, u32 val3);
#else
-static inline void exit_pi_state_list(struct task_struct *curr)
-{
-}
+static inline void futex_init_task(struct task_struct *tsk) { }
+static inline void futex_exit_recursive(struct task_struct *tsk) { }
+static inline void futex_exit_release(struct task_struct *tsk) { }
+static inline void futex_exec_release(struct task_struct *tsk) { }
#endif
#endif
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 872f930f1b06..dd0a452373e7 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -51,7 +51,8 @@ typedef unsigned long (*genpool_algo_t)(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool,
+ unsigned long start_addr);
/*
* General purpose special memory pool descriptor.
@@ -131,24 +132,24 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool);
+ struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_fixed_alloc(unsigned long *map,
unsigned long size, unsigned long start, unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_first_fit_align(unsigned long *map,
unsigned long size, unsigned long start, unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
unsigned long size, unsigned long start, unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool);
+ struct gen_pool *pool, unsigned long start_addr);
extern struct gen_pool *devm_gen_pool_create(struct device *dev,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b041f94678de..79d3dab45ceb 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -313,6 +313,29 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
}
+/**
+ * gfpflags_normal_context - is gfp_flags a normal sleepable context?
+ * @gfp_flags: gfp_flags to test
+ *
+ * Test whether @gfp_flags indicates that the allocation is from the
+ * %current context and allowed to sleep.
+ *
+ * An allocation being allowed to block doesn't mean it owns the %current
+ * context. When direct reclaim path tries to allocate memory, the
+ * allocation context is nested inside whatever %current was doing at the
+ * time of the original allocation. The nested allocation may be allowed
+ * to block but modifying anything %current owns can corrupt the outer
+ * context's expectations.
+ *
+ * %true result from this function indicates that the allocation context
+ * can sleep and use anything that's associated with %current.
+ */
+static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
+{
+ return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
+ __GFP_DIRECT_RECLAIM;
+}
+
#ifdef CONFIG_HIGHMEM
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
#else
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 8ef7fc0ce0f0..b2f103b170a9 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -230,30 +230,6 @@ static inline int irq_to_gpio(unsigned irq)
return -EINVAL;
}
-static inline int
-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline int
-gpiochip_add_pingroup_range(struct gpio_chip *chip,
- struct pinctrl_dev *pctldev,
- unsigned int gpio_offset, const char *pin_group)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline void
-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
-{
- WARN_ON(1);
-}
-
static inline int devm_gpio_request(struct device *dev, unsigned gpio,
const char *label)
{
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index c4a350d83578..79ad4f8b889d 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -404,7 +404,7 @@ static inline int gpiod_to_irq(const struct gpio_desc *desc)
static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
{
- return ERR_PTR(-EINVAL);
+ return NULL;
}
static inline int desc_to_gpio(const struct gpio_desc *desc)
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 55a604ad459f..2e179778576c 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2743,4 +2743,57 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
return true;
}
+struct element {
+ u8 id;
+ u8 datalen;
+ u8 data[];
+} __packed;
+
+/* element iteration helpers */
+#define for_each_element(_elem, _data, _datalen) \
+ for (_elem = (const struct element *)(_data); \
+ (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \
+ (int)sizeof(*_elem) && \
+ (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \
+ (int)sizeof(*_elem) + _elem->datalen; \
+ _elem = (const struct element *)(_elem->data + _elem->datalen))
+
+#define for_each_element_id(element, _id, data, datalen) \
+ for_each_element(element, data, datalen) \
+ if (element->id == (_id))
+
+#define for_each_element_extid(element, extid, data, datalen) \
+ for_each_element(element, data, datalen) \
+ if (element->id == WLAN_EID_EXTENSION && \
+ element->datalen > 0 && \
+ element->data[0] == (extid))
+
+#define for_each_subelement(sub, element) \
+ for_each_element(sub, (element)->data, (element)->datalen)
+
+#define for_each_subelement_id(sub, id, element) \
+ for_each_element_id(sub, id, (element)->data, (element)->datalen)
+
+#define for_each_subelement_extid(sub, extid, element) \
+ for_each_element_extid(sub, extid, (element)->data, (element)->datalen)
+
+/**
+ * for_each_element_completed - determine if element parsing consumed all data
+ * @element: element pointer after for_each_element() or friends
+ * @data: same data pointer as passed to for_each_element() or friends
+ * @datalen: same data length as passed to for_each_element() or friends
+ *
+ * This function returns %true if all the data was parsed or considered
+ * while walking the elements. Only use this if your for_each_element()
+ * loop cannot be broken out of, otherwise it always returns %false.
+ *
+ * If some data was malformed, this returns %false since the last parsed
+ * element will not fill the whole remaining data.
+ */
+static inline bool for_each_element_completed(const struct element *element,
+ const void *data, size_t datalen)
+{
+ return (const u8 *)element == (const u8 *)data + datalen;
+}
+
#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index ba7a9b0c7c57..24e9b360da65 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -84,6 +84,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
extern void unregister_pppox_proto(int proto_num);
extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+
+#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t)
/* PPPoX socket states */
enum {
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index a6ab2f51f703..4def15c32a01 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -282,7 +282,8 @@ enum {
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
-#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
+#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
+ ((u64)((pfsid >> 4) & 0xfff) << 52))
#define QI_DEV_IOTLB_SIZE 1
#define QI_DEV_IOTLB_MAX_INVS 32
@@ -307,7 +308,8 @@ enum {
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
-#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
+#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
+ ((u64)((pfsid >> 4) & 0xfff) << 52))
#define QI_DEV_EIOTLB_MAX_INVS 32
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
diff --git a/include/linux/iova.h b/include/linux/iova.h
index d179b9bf7814..7d23bbb887f2 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -154,6 +154,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn, unsigned long pfn_32bit);
+bool has_iova_flush_queue(struct iova_domain *iovad);
int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
@@ -234,6 +235,11 @@ static inline void init_iova_domain(struct iova_domain *iovad,
{
}
+static inline bool has_iova_flush_queue(struct iova_domain *iovad)
+{
+ return false;
+}
+
static inline int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb,
iova_entry_dtor entry_dtor)
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 29290bfb94a8..cb41329a3ee4 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -454,6 +454,22 @@ struct jbd2_inode {
* @i_flags: Flags of inode [j_list_lock]
*/
unsigned long i_flags;
+
+ /**
+ * @i_dirty_start:
+ *
+ * Offset in bytes where the dirty range for this inode starts.
+ * [j_list_lock]
+ */
+ loff_t i_dirty_start;
+
+ /**
+ * @i_dirty_end:
+ *
+ * Inclusive offset in bytes where the dirty range for this inode
+ * ends. [j_list_lock]
+ */
+ loff_t i_dirty_end;
};
struct jbd2_revoke_table_s;
@@ -1399,6 +1415,12 @@ extern int jbd2_journal_force_commit(journal_t *);
extern int jbd2_journal_force_commit_nested(journal_t *);
extern int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode);
extern int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode);
+extern int jbd2_journal_inode_ranged_write(handle_t *handle,
+ struct jbd2_inode *inode, loff_t start_byte,
+ loff_t length);
+extern int jbd2_journal_inode_ranged_wait(handle_t *handle,
+ struct jbd2_inode *inode, loff_t start_byte,
+ loff_t length);
extern int jbd2_journal_begin_ordered_truncate(journal_t *journal,
struct jbd2_inode *inode, loff_t new_size);
extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode);
@@ -1562,7 +1584,7 @@ static inline int jbd2_space_needed(journal_t *journal)
static inline unsigned long jbd2_log_space_left(journal_t *journal)
{
/* Allow for rounding errors */
- unsigned long free = journal->j_free - 32;
+ long free = journal->j_free - 32;
if (journal->j_committing_transaction) {
unsigned long committing = atomic_read(&journal->
@@ -1571,7 +1593,7 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal)
/* Transaction + control blocks */
free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT);
}
- return free;
+ return max_t(long, free, 0);
}
/*
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 1c5469adaa85..bb7baecef002 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -101,7 +101,8 @@
#define DIV_ROUND_DOWN_ULL(ll, d) \
({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
-#define DIV_ROUND_UP_ULL(ll, d) DIV_ROUND_DOWN_ULL((ll) + (d) - 1, (d))
+#define DIV_ROUND_UP_ULL(ll, d) \
+ DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d))
#if BITS_PER_LONG == 32
# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index ab25c8b6d9e3..5e539e5bb70c 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -185,6 +185,7 @@ struct kernfs_root {
/* private fields, do not use outside kernfs proper */
struct idr ino_idr;
+ u32 last_ino;
u32 next_generation;
struct kernfs_syscall_ops *syscall_ops;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 026615e242d8..7668c68ddb5b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -140,7 +140,7 @@ static inline bool is_error_page(struct page *page)
extern struct kmem_cache *kvm_vcpu_cache;
-extern spinlock_t kvm_lock;
+extern struct mutex kvm_lock;
extern struct list_head vm_list;
struct kvm_io_range {
@@ -808,6 +808,7 @@ void kvm_arch_check_processor_compat(void *rtn);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
@@ -889,6 +890,7 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
+bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
struct kvm_irq_ack_notifier {
struct hlist_node link;
@@ -1012,6 +1014,7 @@ enum kvm_stat_kind {
struct kvm_stat_data {
int offset;
+ int mode;
struct kvm *kvm;
};
@@ -1019,6 +1022,7 @@ struct kvm_stats_debugfs_item {
const char *name;
int offset;
enum kvm_stat_kind kind;
+ int mode;
};
extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir;
@@ -1257,4 +1261,10 @@ static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
+typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
+
+int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
+ uintptr_t data, const char *name,
+ struct task_struct **thread_ptr);
+
#endif
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index 14997285e53d..1aa707ab19bb 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -2,6 +2,7 @@
#ifndef _LIBFDT_ENV_H
#define _LIBFDT_ENV_H
+#include <linux/kernel.h> /* For INT_MAX */
#include <linux/string.h>
#include <asm/byteorder.h>
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 58e110aee7ab..d36a02935391 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -316,6 +316,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {}
extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
+extern int __add_memory(int nid, u64 start, u64 size);
extern int add_memory(int nid, u64 start, u64 size);
extern int add_memory_resource(int nid, struct resource *resource, bool online);
extern int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock);
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h
index 5aacdb017a9f..806a4f095312 100644
--- a/include/linux/mfd/intel_soc_pmic.h
+++ b/include/linux/mfd/intel_soc_pmic.h
@@ -25,6 +25,7 @@ struct intel_soc_pmic {
int irq;
struct regmap *regmap;
struct regmap_irq_chip_data *irq_chip_data;
+ struct regmap_irq_chip_data *irq_chip_data_pwrbtn;
struct regmap_irq_chip_data *irq_chip_data_tmu;
struct regmap_irq_chip_data *irq_chip_data_bcu;
struct regmap_irq_chip_data *irq_chip_data_adc;
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index cf815577bd68..3ae1fe743bc3 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -178,7 +178,6 @@ struct max8997_led_platform_data {
struct max8997_platform_data {
/* IRQ */
int ono;
- int wakeup;
/* ---- PMIC ---- */
struct max8997_regulator_data *regulators;
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index 638222e43e48..93011c61aafd 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -247,6 +247,7 @@ struct mc13xxx_platform_data {
#define MC13XXX_ADC0_TSMOD0 (1 << 12)
#define MC13XXX_ADC0_TSMOD1 (1 << 13)
#define MC13XXX_ADC0_TSMOD2 (1 << 14)
+#define MC13XXX_ADC0_CHRGRAWDIV (1 << 15)
#define MC13XXX_ADC0_ADINC1 (1 << 16)
#define MC13XXX_ADC0_ADINC2 (1 << 17)
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 6dec43826303..cffb23b8bd70 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -3733,6 +3733,9 @@ enum usb_irq_events {
#define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01
#define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00
+/* POWERHOLD Mask field for PRIMARY_SECONDARY_PAD2 register */
+#define TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0xC
+
/* Registers for function RESOURCE */
#define TPS65917_REGEN1_CTRL 0x2
#define TPS65917_PLLEN_CTRL 0x3
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index d3156594674c..338e0f6e2226 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -443,7 +443,7 @@ enum {
enum {
RK805_ID = 0x8050,
RK808_ID = 0x0000,
- RK818_ID = 0x8181,
+ RK818_ID = 0x8180,
};
struct rk808 {
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index b25e7baa273e..dfe626ad818a 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -164,6 +164,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
+u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ee0eae215210..858ce84ac7c5 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -549,11 +549,6 @@ static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
extern void kvfree(const void *addr);
-static inline atomic_t *compound_mapcount_ptr(struct page *page)
-{
- return &page[1].compound_mapcount;
-}
-
static inline int compound_mapcount(struct page *page)
{
VM_BUG_ON_PAGE(!PageCompound(page), page);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index e41ef532c4ce..be5d445bac98 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -240,6 +240,11 @@ struct page_frag_cache {
typedef unsigned long vm_flags_t;
+static inline atomic_t *compound_mapcount_ptr(struct page *page)
+{
+ return &page[1].compound_mapcount;
+}
+
/*
* A region containing a mapping of a non-memory backed file under NOMMU
* conditions. These are held in a global tree and are pinned by the VMAs that
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 6cd0f6b7658b..aadc2ee050f1 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -401,7 +401,7 @@ static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
return dev_of_node(&mtd->dev);
}
-static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
+static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
{
return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
}
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 40b830d55fe5..4725a9d9597f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3522,7 +3522,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
if (debug_value == 0) /* no output */
return 0;
/* set low N bits */
- return (1 << debug_value) - 1;
+ return (1U << debug_value) - 1;
}
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index e27572d30d97..ad69430fd0eb 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -164,6 +164,16 @@ nfs_list_add_request(struct nfs_page *req, struct list_head *head)
list_add_tail(&req->wb_list, head);
}
+/**
+ * nfs_list_move_request - Move a request to a new list
+ * @req: request
+ * @head: head of list into which to insert the request.
+ */
+static inline void
+nfs_list_move_request(struct nfs_page *req, struct list_head *head)
+{
+ list_move_tail(&req->wb_list, head);
+}
/**
* nfs_list_remove_request - Remove a request from its wb_list
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 6959968dc36a..373fb26b5fed 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1520,7 +1520,7 @@ struct nfs_commit_data {
};
struct nfs_pgio_completion_ops {
- void (*error_cleanup)(struct list_head *head);
+ void (*error_cleanup)(struct list_head *head, int);
void (*init_hdr)(struct nfs_pgio_header *hdr);
void (*completion)(struct nfs_pgio_header *hdr);
void (*reschedule_io)(struct nfs_pgio_header *hdr);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 584b14c774c1..5f966c94732b 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -565,12 +565,28 @@ static inline int PageTransCompound(struct page *page)
*
* Unlike PageTransCompound, this is safe to be called only while
* split_huge_pmd() cannot run from under us, like if protected by the
- * MMU notifier, otherwise it may result in page->_mapcount < 0 false
+ * MMU notifier, otherwise it may result in page->_mapcount check false
* positives.
+ *
+ * We have to treat page cache THP differently since every subpage of it
+ * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE
+ * mapped in the current process so comparing subpage's _mapcount to
+ * compound_mapcount to filter out PTE mapped case.
*/
static inline int PageTransCompoundMap(struct page *page)
{
- return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
+ struct page *head;
+
+ if (!PageTransCompound(page))
+ return 0;
+
+ if (PageAnon(page))
+ return atomic_read(&page->_mapcount) < 0;
+
+ head = compound_head(page);
+ /* File THP is PMD mapped and not PTE mapped */
+ return atomic_read(&page->_mapcount) ==
+ atomic_read(compound_mapcount_ptr(head));
}
/*
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index 896cb71a382c..1a1d58ebffbf 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -49,6 +49,7 @@ struct dw_dma_slave {
* @data_width: Maximum data width supported by hardware per AHB master
* (in bytes, power of 2)
* @multi_block: Multi block transfers supported by hardware per channel.
+ * @protctl: Protection control signals setting per channel.
*/
struct dw_dma_platform_data {
unsigned int nr_channels;
@@ -65,6 +66,11 @@ struct dw_dma_platform_data {
unsigned char nr_masters;
unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS];
+#define CHAN_PROTCTL_PRIVILEGED BIT(0)
+#define CHAN_PROTCTL_BUFFERABLE BIT(1)
+#define CHAN_PROTCTL_CACHEABLE BIT(2)
+#define CHAN_PROTCTL_MASK GENMASK(2, 0)
+ unsigned char protctl;
};
#endif /* _PLATFORM_DATA_DMA_DW_H */
diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h
index f8f1f6b952a6..eb9805bb3fe8 100644
--- a/include/linux/platform_data/dma-ep93xx.h
+++ b/include/linux/platform_data/dma-ep93xx.h
@@ -85,7 +85,7 @@ static inline enum dma_transfer_direction
ep93xx_dma_chan_direction(struct dma_chan *chan)
{
if (!ep93xx_dma_chan_is_m2p(chan))
- return DMA_NONE;
+ return DMA_TRANS_NONE;
/* even channels are for TX, odd for RX */
return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index e5380471c2cd..428278a44c7d 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -44,6 +44,9 @@ extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
#else
+
+#include <linux/errno.h>
+
static inline
int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
{
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 2fb6fb11132e..50ab5d6ccc4e 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -22,7 +22,7 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
/* i_mutex must being held */
static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
{
- return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) ||
+ return (ia->ia_valid & ATTR_SIZE) ||
(ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) ||
(ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
}
@@ -51,6 +51,16 @@ static inline struct dquot *dqgrab(struct dquot *dquot)
atomic_inc(&dquot->dq_count);
return dquot;
}
+
+static inline bool dquot_is_busy(struct dquot *dquot)
+{
+ if (test_bit(DQ_MOD_B, &dquot->dq_flags))
+ return true;
+ if (atomic_read(&dquot->dq_count) > 1)
+ return true;
+ return false;
+}
+
void dqput(struct dquot *dquot);
int dquot_scan_active(struct super_block *sb,
int (*fn)(struct dquot *dquot, unsigned long priv),
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index f960c85cd9ec..8d570190e9b4 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -622,7 +622,7 @@ static inline void rcu_preempt_sleep_check(void) { }
* read-side critical sections may be preempted and they may also block, but
* only when acquiring spinlocks that are subject to priority inheritance.
*/
-static inline void rcu_read_lock(void)
+static __always_inline void rcu_read_lock(void)
{
__rcu_read_lock();
__acquire(RCU);
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index bb9693bed08a..82875120a9c4 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -511,7 +511,7 @@ static inline int regulator_get_error_flags(struct regulator *regulator,
static inline int regulator_set_load(struct regulator *regulator, int load_uA)
{
- return REGULATOR_MODE_NORMAL;
+ return 0;
}
static inline int regulator_allow_bypass(struct regulator *regulator,
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index adb88f8cefbc..576caaf0c9af 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -7,7 +7,7 @@
struct reset_controller_dev;
/**
- * struct reset_control_ops
+ * struct reset_control_ops - reset controller driver callbacks
*
* @reset: for self-deasserting resets, does all necessary
* things to reset the device
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index d87dfa41142d..8b7bce207229 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -267,6 +267,16 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
unsigned long offset, unsigned long size,
gfp_t gfp_mask);
+#ifdef CONFIG_SGL_ALLOC
+struct scatterlist *sgl_alloc_order(unsigned long long length,
+ unsigned int order, bool chainable,
+ gfp_t gfp, unsigned int *nent_p);
+struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
+ unsigned int *nent_p);
+void sgl_free_order(struct scatterlist *sgl, int order);
+void sgl_free(struct scatterlist *sgl);
+#endif /* CONFIG_SGL_ALLOC */
+
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
size_t buflen, off_t skip, bool to_buffer);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 866439c361a9..b06577652643 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -959,6 +959,8 @@ struct task_struct {
#endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
+ struct mutex futex_exit_mutex;
+ unsigned int futex_state;
#endif
#ifdef CONFIG_PERF_EVENTS
struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
@@ -1334,7 +1336,6 @@ extern struct pid *cad_pid;
*/
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* Getting shut down */
-#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index ef4ae0a545fe..efb9e12e7f91 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -62,6 +62,10 @@ static inline void mmdrop_async(struct mm_struct *mm)
* followed by taking the mmap_sem for writing before modifying the
* vmas or anything the coredump pretends not to change from under it.
*
+ * It also has to be called when mmgrab() is used in the context of
+ * the process, but then the mm_count refcount is transferred outside
+ * the context of the process to run down_write() on that pinned mm.
+ *
* NOTE: find_extend_vma() called from GUP context is the only place
* that can modify the "mm" (notably the vm_start/end) under mmap_sem
* for reading and outside the context of the process, so it is also
@@ -121,8 +125,10 @@ extern struct mm_struct *get_task_mm(struct task_struct *task);
* succeeds.
*/
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
-/* Remove the current tasks stale references to the old mm_struct */
-extern void mm_release(struct task_struct *, struct mm_struct *);
+/* Remove the current tasks stale references to the old mm_struct on exit() */
+extern void exit_mm_release(struct task_struct *, struct mm_struct *);
+/* Remove the current tasks stale references to the old mm_struct on exec() */
+extern void exec_mm_release(struct task_struct *, struct mm_struct *);
#ifdef CONFIG_MEMCG
extern void mm_update_next_owner(struct mm_struct *mm);
diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h
index e7dd04a84ba8..3988762efe15 100644
--- a/include/linux/sched/numa_balancing.h
+++ b/include/linux/sched/numa_balancing.h
@@ -19,7 +19,7 @@
extern void task_numa_fault(int last_node, int node, int pages, int flags);
extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
-extern void task_numa_free(struct task_struct *p);
+extern void task_numa_free(struct task_struct *p, bool final);
extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
int src_nid, int dst_cpu);
#else
@@ -34,7 +34,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p)
static inline void set_numabalancing_state(bool enabled)
{
}
-static inline void task_numa_free(struct task_struct *p)
+static inline void task_numa_free(struct task_struct *p, bool final)
{
}
static inline bool should_numa_migrate_memory(struct task_struct *p,
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index a74ec619ac51..11b4fba82950 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -39,6 +39,8 @@ void __noreturn do_task_dead(void);
extern void proc_caches_init(void);
+extern void fork_init(void);
+
extern void release_task(struct task_struct * p);
#ifdef CONFIG_HAVE_COPY_THREAD_TLS
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index a27ef5f56431..791a6be0e394 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -134,6 +134,10 @@ struct uart_8250_port {
void (*dl_write)(struct uart_8250_port *, int);
struct uart_8250_em485 *em485;
+
+ /* Serial port overrun backoff */
+ struct delayed_work overrun_backoff;
+ u32 overrun_backoff_time_ms;
};
static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up)
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 868b60a79c0b..b2a7b7c15451 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -166,6 +166,7 @@ struct uart_port {
struct console *cons; /* struct console, if any */
#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ)
unsigned long sysrq; /* sysrq timeout */
+ unsigned int sysrq_ch; /* char for sysrq */
#endif
/* flags must be updated while holding port mutex */
@@ -474,8 +475,42 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
}
return 0;
}
+static inline int
+uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+ if (port->sysrq) {
+ if (ch && time_before(jiffies, port->sysrq)) {
+ port->sysrq_ch = ch;
+ port->sysrq = 0;
+ return 1;
+ }
+ port->sysrq = 0;
+ }
+ return 0;
+}
+static inline void
+uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
+{
+ int sysrq_ch;
+
+ sysrq_ch = port->sysrq_ch;
+ port->sysrq_ch = 0;
+
+ spin_unlock_irqrestore(&port->lock, irqflags);
+
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+}
#else
-#define uart_handle_sysrq_char(port,ch) ({ (void)port; 0; })
+static inline int
+uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) { return 0; }
+static inline int
+uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) { return 0; }
+static inline void
+uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
+{
+ spin_unlock_irqrestore(&port->lock, irqflags);
+}
#endif
/*
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index fdb0cd0699b6..ec00d9264e5c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1228,7 +1228,8 @@ static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6
return skb->hash;
}
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+__u32 skb_get_hash_perturb(const struct sk_buff *skb,
+ const siphash_key_t *perturb);
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
@@ -1346,6 +1347,19 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
}
/**
+ * skb_queue_empty_lockless - check if a queue is empty
+ * @list: queue head
+ *
+ * Returns true if the queue is empty, false otherwise.
+ * This variant can be used in lockless contexts.
+ */
+static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
+{
+ return READ_ONCE(list->next) == (const struct sk_buff *) list;
+}
+
+
+/**
* skb_queue_is_last - check if skb is the last entry in the queue
* @list: queue head
* @skb: buffer
@@ -1709,9 +1723,11 @@ static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
{
- newsk->next = next;
- newsk->prev = prev;
- next->prev = prev->next = newsk;
+ /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
+ WRITE_ONCE(newsk->next, next);
+ WRITE_ONCE(newsk->prev, prev);
+ WRITE_ONCE(next->prev, newsk);
+ WRITE_ONCE(prev->next, newsk);
list->qlen++;
}
@@ -1722,11 +1738,11 @@ static inline void __skb_queue_splice(const struct sk_buff_head *list,
struct sk_buff *first = list->next;
struct sk_buff *last = list->prev;
- first->prev = prev;
- prev->next = first;
+ WRITE_ONCE(first->prev, prev);
+ WRITE_ONCE(prev->next, first);
- last->next = next;
- next->prev = last;
+ WRITE_ONCE(last->next, next);
+ WRITE_ONCE(next->prev, last);
}
/**
@@ -1867,8 +1883,8 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
next = skb->next;
prev = skb->prev;
skb->next = skb->prev = NULL;
- next->prev = prev;
- prev->next = next;
+ WRITE_ONCE(next->prev, prev);
+ WRITE_ONCE(prev->next, next);
}
/**
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index d96e74e114c0..c9548a63d09b 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -188,7 +188,6 @@ struct rpc_timer {
struct rpc_wait_queue {
spinlock_t lock;
struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
- pid_t owner; /* process id of last task serviced */
unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
unsigned char priority; /* current priority */
unsigned char nr; /* # tasks remaining for cookie */
@@ -204,7 +203,6 @@ struct rpc_wait_queue {
* from a single cookie. The aim is to improve
* performance of NFS operations such as read/write.
*/
-#define RPC_BATCH_COUNT 16
#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
/*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index e643866912b7..411953964c34 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -363,14 +363,8 @@ extern unsigned long vm_total_pages;
extern int node_reclaim_mode;
extern int sysctl_min_unmapped_ratio;
extern int sysctl_min_slab_ratio;
-extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
#else
#define node_reclaim_mode 0
-static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
- unsigned int order)
-{
- return 0;
-}
#endif
extern int page_evictable(struct page *page);
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fe322fa611e6..60aea230dc6a 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -450,4 +450,8 @@ static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
return (user_mss && user_mss < mss) ? user_mss : mss;
}
+
+int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount,
+ int shiftlen);
+
#endif /* _LINUX_TCP_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 1dd587ba6d88..0cd621d8c7f0 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -365,6 +365,7 @@ struct tty_file_private {
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
#define TTY_HUPPED 18 /* Post driver->hangup() */
#define TTY_HUPPING 19 /* Hangup in progress */
+#define TTY_LDISC_CHANGING 20 /* Change pending - non-block IO */
#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
/* Values for tty->flow_change */
@@ -382,6 +383,12 @@ static inline void tty_set_flow_change(struct tty_struct *tty, int val)
smp_mb();
}
+static inline bool tty_io_nonblock(struct tty_struct *tty, struct file *file)
+{
+ return file->f_flags & O_NONBLOCK ||
+ test_bit(TTY_LDISC_CHANGING, &tty->flags);
+}
+
static inline bool tty_io_error(struct tty_struct *tty)
{
return test_bit(TTY_IO_ERROR, &tty->flags);
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index b724ef7005de..53c5e40a2a8f 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -68,9 +68,18 @@ enum {
/*
* A single VMCI device has an upper limit of 128MB on the amount of
- * memory that can be used for queue pairs.
+ * memory that can be used for queue pairs. Since each queue pair
+ * consists of at least two pages, the memory limit also dictates the
+ * number of queue pairs a guest can create.
*/
#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
+#define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2)
+
+/*
+ * There can be at most PAGE_SIZE doorbells since there is one doorbell
+ * per byte in the doorbell bitmap page.
+ */
+#define VMCI_MAX_GUEST_DOORBELL_COUNT PAGE_SIZE
/*
* Queues with pre-mapped data pages must be small, so that we don't pin
diff --git a/include/math-emu/soft-fp.h b/include/math-emu/soft-fp.h
index 3f284bc03180..5650c1628383 100644
--- a/include/math-emu/soft-fp.h
+++ b/include/math-emu/soft-fp.h
@@ -138,7 +138,7 @@ do { \
_FP_FRAC_ADDI_##wc(X, _FP_WORK_ROUND); \
} while (0)
-#define _FP_ROUND_ZERO(wc, X) 0
+#define _FP_ROUND_ZERO(wc, X) (void)0
#define _FP_ROUND_PINF(wc, X) \
do { \
diff --git a/include/net/act_api.h b/include/net/act_api.h
index a10a3b1813f3..775387d6ca95 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -14,6 +14,7 @@
struct tcf_idrinfo {
spinlock_t lock;
struct idr action_idr;
+ struct net *net;
};
struct tc_action_ops;
@@ -104,7 +105,7 @@ struct tc_action_net {
};
static inline
-int tc_action_net_init(struct tc_action_net *tn,
+int tc_action_net_init(struct net *net, struct tc_action_net *tn,
const struct tc_action_ops *ops)
{
int err = 0;
@@ -113,6 +114,7 @@ int tc_action_net_init(struct tc_action_net *tn,
if (!tn->idrinfo)
return -ENOMEM;
tn->ops = ops;
+ tn->idrinfo->net = net;
spin_lock_init(&tn->idrinfo->lock);
idr_init(&tn->idrinfo->action_idr);
return err;
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 5fb3f6361090..d3775b5379e4 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -22,9 +22,6 @@
#include "vsock_addr.h"
-/* vsock-specific sock->sk_state constants */
-#define VSOCK_SS_LISTEN 255
-
#define LAST_RESERVED_PORT 1023
#define vsock_sk(__sk) ((struct vsock_sock *)__sk)
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index b619a190ff12..6cd4d5b48239 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -178,6 +178,9 @@ struct adv_info {
#define HCI_MAX_SHORT_NAME_LENGTH 10
+/* Min encryption key size to match with SMP */
+#define HCI_MIN_ENC_KEY_SIZE 7
+
/* Default LE RPA expiry time, 15 minutes */
#define HCI_DEFAULT_RPA_TIMEOUT (15 * 60)
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 04008209506a..b0f20bc0fd4a 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -149,7 +149,6 @@ struct slave {
unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
s8 link; /* one of BOND_LINK_XXXX */
s8 link_new_state; /* one of BOND_LINK_XXXX */
- s8 new_link;
u8 backup:1, /* indicates backup slave. Value corresponds with
BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
inactive:1, /* indicates inactive slave */
@@ -523,7 +522,7 @@ static inline void bond_propose_link_state(struct slave *slave, int state)
static inline void bond_commit_link_state(struct slave *slave, bool notify)
{
- if (slave->link == slave->link_new_state)
+ if (slave->link_new_state == BOND_LINK_NOCHANGE)
return;
slave->link = slave->link_new_state;
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 71c72a939bf8..c86fcadccbd7 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -134,7 +134,7 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
- sk->sk_napi_id = skb->napi_id;
+ WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
#endif
}
@@ -143,8 +143,8 @@ static inline void sk_mark_napi_id_once(struct sock *sk,
const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
- if (!sk->sk_napi_id)
- sk->sk_napi_id = skb->napi_id;
+ if (!READ_ONCE(sk->sk_napi_id))
+ WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
#endif
}
diff --git a/include/net/dst.h b/include/net/dst.h
index ebfb4328fdb1..2acd670fc86b 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -329,8 +329,9 @@ static inline bool dst_hold_safe(struct dst_entry *dst)
* @skb: buffer
*
* If dst is not yet refcounted and not destroyed, grab a ref on it.
+ * Returns true if dst is refcounted.
*/
-static inline void skb_dst_force(struct sk_buff *skb)
+static inline bool skb_dst_force(struct sk_buff *skb)
{
if (skb_dst_is_noref(skb)) {
struct dst_entry *dst = skb_dst(skb);
@@ -341,6 +342,8 @@ static inline void skb_dst_force(struct sk_buff *skb)
skb->_skb_refdst = (unsigned long)dst;
}
+
+ return skb->_skb_refdst != 0UL;
}
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 22aba321282d..227dc0a84172 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/in6.h>
+#include <linux/siphash.h>
#include <uapi/linux/if_ether.h>
/**
@@ -229,7 +230,7 @@ struct flow_dissector {
struct flow_keys {
struct flow_dissector_key_control control;
#define FLOW_KEYS_HASH_START_FIELD basic
- struct flow_dissector_key_basic basic;
+ struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT);
struct flow_dissector_key_tags tags;
struct flow_dissector_key_vlan vlan;
struct flow_dissector_key_keyid keyid;
diff --git a/include/net/fq.h b/include/net/fq.h
index 6d8521a30c5c..2c7687902789 100644
--- a/include/net/fq.h
+++ b/include/net/fq.h
@@ -70,7 +70,7 @@ struct fq {
struct list_head backlogs;
spinlock_t lock;
u32 flows_cnt;
- u32 perturbation;
+ siphash_key_t perturbation;
u32 limit;
u32 memory_limit;
u32 memory_usage;
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index ac1a2317941e..46903e23eab9 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -105,7 +105,7 @@ static struct fq_flow *fq_flow_classify(struct fq *fq,
lockdep_assert_held(&fq->lock);
- hash = skb_get_hash_perturb(skb, fq->perturbation);
+ hash = skb_get_hash_perturb(skb, &fq->perturbation);
idx = reciprocal_scale(hash, fq->flows_cnt);
flow = &fq->flows[idx];
@@ -255,7 +255,7 @@ static int fq_init(struct fq *fq, int flows_cnt)
INIT_LIST_HEAD(&fq->backlogs);
spin_lock_init(&fq->lock);
fq->flows_cnt = max_t(u32, flows_cnt, 1);
- fq->perturbation = prandom_u32();
+ get_random_bytes(&fq->perturbation, sizeof(fq->perturbation));
fq->quantum = 300;
fq->limit = 8192;
fq->memory_limit = 16 << 20; /* 16 MBytes */
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index d66f70f63734..3b0e3cdee1c3 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -152,9 +152,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
pkt_len = skb->len - skb_inner_network_offset(skb);
err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
- if (unlikely(net_xmit_eval(err)))
- pkt_len = -1;
- iptunnel_xmit_stats(dev, pkt_len);
+
+ if (dev) {
+ if (unlikely(net_xmit_eval(err)))
+ pkt_len = -1;
+ iptunnel_xmit_stats(dev, pkt_len);
+ }
}
#endif
#endif
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 5d08c1950e7d..f4e5ac8aa366 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -812,11 +812,12 @@ struct ipvs_master_sync_state {
struct ip_vs_sync_buff *sync_buff;
unsigned long sync_queue_len;
unsigned int sync_queue_delay;
- struct task_struct *master_thread;
struct delayed_work master_wakeup_work;
struct netns_ipvs *ipvs;
};
+struct ip_vs_sync_thread_data;
+
/* How much time to keep dests in trash */
#define IP_VS_DEST_TRASH_PERIOD (120 * HZ)
@@ -890,6 +891,7 @@ struct netns_ipvs {
struct delayed_work defense_work; /* Work handler */
int drop_rate;
int drop_counter;
+ int old_secure_tcp;
atomic_t dropentry;
/* locks in ctl.c */
spinlock_t dropentry_lock; /* drop entry handling */
@@ -947,7 +949,8 @@ struct netns_ipvs {
spinlock_t sync_lock;
struct ipvs_master_sync_state *ms;
spinlock_t sync_buff_lock;
- struct task_struct **backup_threads;
+ struct ip_vs_sync_thread_data *master_tinfo;
+ struct ip_vs_sync_thread_data *backup_tinfo;
int threads_mask;
volatile int sync_state;
struct mutex sync_mutex;
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
index 28aa9b30aece..1f77fb4dc79d 100644
--- a/include/net/ipv6_frag.h
+++ b/include/net/ipv6_frag.h
@@ -94,7 +94,6 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
goto out;
head->dev = dev;
- skb_get(head);
spin_unlock(&fq->q.lock);
icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
diff --git a/include/net/llc.h b/include/net/llc.h
index 890a87318014..df282d9b4017 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -66,6 +66,7 @@ struct llc_sap {
int sk_count;
struct hlist_nulls_head sk_laddr_hash[LLC_SK_LADDR_HASH_ENTRIES];
struct hlist_head sk_dev_hash[LLC_SK_DEV_HASH_ENTRIES];
+ struct rcu_head rcu;
};
static inline
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index df528a623548..ea985aa7a6c5 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk);
/* Access to a connection */
int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
-int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 393099b1901a..1d6b98119a1d 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -429,8 +429,8 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
{
unsigned long now = jiffies;
- if (neigh->used != now)
- neigh->used = now;
+ if (READ_ONCE(neigh->used) != now)
+ WRITE_ONCE(neigh->used, now);
if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
return __neigh_event_send(neigh, skb);
return 0;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 59a4f50ffe8d..a9704c57430d 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -759,7 +759,8 @@ struct nft_expr_ops {
*/
struct nft_expr {
const struct nft_expr_ops *ops;
- unsigned char data[];
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(u64))));
};
static inline void *nft_expr_priv(const struct nft_expr *expr)
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index a2e4adafd34d..e268c970ec54 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -107,6 +107,7 @@ struct netns_ipv4 {
#endif
int sysctl_tcp_mtu_probing;
int sysctl_tcp_base_mss;
+ int sysctl_tcp_min_snd_mss;
int sysctl_tcp_probe_threshold;
u32 sysctl_tcp_probe_interval;
diff --git a/include/net/psample.h b/include/net/psample.h
index 9b80f814ab04..94cb37a7bf75 100644
--- a/include/net/psample.h
+++ b/include/net/psample.h
@@ -12,6 +12,7 @@ struct psample_group {
u32 group_num;
u32 refcount;
u32 seq;
+ struct rcu_head rcu;
};
struct psample_group *psample_group_get(struct net *net, u32 group_num);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index f59acacaa265..37876d842f2e 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -305,6 +305,11 @@ static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
return q;
}
+static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
+{
+ return rcu_dereference_bh(qdisc->dev_queue->qdisc);
+}
+
static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->qdisc_sleeping;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 749a42882437..c713bd62428f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -103,6 +103,8 @@ void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
/*
* sctp/socket.c
*/
+int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags);
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 94c775773f58..c1f71dd464d3 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1181,6 +1181,9 @@ struct sctp_ep_common {
/* What socket does this endpoint belong to? */
struct sock *sk;
+ /* Cache netns and it won't change once set */
+ struct net *net;
+
/* This is where we receive inbound chunks. */
struct sctp_inq inqueue;
diff --git a/include/net/sock.h b/include/net/sock.h
index 60eef7f1ac05..0af46cbd3649 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -916,8 +916,8 @@ static inline void sk_incoming_cpu_update(struct sock *sk)
{
int cpu = raw_smp_processor_id();
- if (unlikely(sk->sk_incoming_cpu != cpu))
- sk->sk_incoming_cpu = cpu;
+ if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
+ WRITE_ONCE(sk->sk_incoming_cpu, cpu);
}
static inline void sock_rps_record_flow_hash(__u32 hash)
@@ -1232,7 +1232,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk)
percpu_counter_inc(sk->sk_prot->sockets_allocated);
}
-static inline int
+static inline u64
sk_sockets_allocated_read_positive(struct sock *sk)
{
return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
@@ -2131,12 +2131,17 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
* sk_page_frag - return an appropriate page_frag
* @sk: socket
*
- * If socket allocation mode allows current thread to sleep, it means its
- * safe to use the per task page_frag instead of the per socket one.
+ * Use the per task page_frag instead of the per socket one for
+ * optimization when we know that we're in the normal context and owns
+ * everything that's associated with %current.
+ *
+ * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
+ * inside other socket operations and end up recursing into sk_page_frag()
+ * while it's already in use.
*/
static inline struct page_frag *sk_page_frag(struct sock *sk)
{
- if (gfpflags_allow_blocking(sk->sk_allocation))
+ if (gfpflags_normal_context(sk->sk_allocation))
return &current->task_frag;
return &sk->sk_frag;
@@ -2224,7 +2229,7 @@ static inline ktime_t sock_read_timestamp(struct sock *sk)
return kt;
#else
- return sk->sk_stamp;
+ return READ_ONCE(sk->sk_stamp);
#endif
}
@@ -2235,7 +2240,7 @@ static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
sk->sk_stamp = kt;
write_sequnlock(&sk->sk_stamp_seq);
#else
- sk->sk_stamp = kt;
+ WRITE_ONCE(sk->sk_stamp, kt);
#endif
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index d0c2dbe94e21..785c4ef4e1bf 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -57,6 +57,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER (128 + MAX_HEADER)
#define MAX_TCP_OPTION_SPACE 40
+#define TCP_MIN_SND_MSS 48
+#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
/*
* Never offer a window over 32767 without using window scaling. Some
@@ -1041,7 +1043,8 @@ void tcp_get_default_congestion_control(char *name);
void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
-int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
+int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
+ bool reinit, bool cap_net_admin);
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
@@ -1685,6 +1688,27 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
tcp_sk(sk)->highest_sack = NULL;
}
+static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
+{
+ struct sk_buff *skb = tcp_write_queue_head(sk);
+
+ if (skb == tcp_send_head(sk))
+ skb = NULL;
+
+ return skb;
+}
+
+static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
+{
+ struct sk_buff *skb = tcp_send_head(sk);
+
+ /* empty retransmit queue, for example due to zero window */
+ if (skb == tcp_write_queue_head(sk))
+ return NULL;
+
+ return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk);
+}
+
static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
{
__skb_queue_tail(&sk->sk_write_queue, skb);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index db99efb2d1d0..57b8b11cf7d4 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -323,7 +323,6 @@ int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int fam
void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
void km_policy_notify(struct xfrm_policy *xp, int dir,
const struct km_event *c);
-void xfrm_policy_cache_flush(void);
void km_state_notify(struct xfrm_state *x, const struct km_event *c);
struct xfrm_tmpl;
@@ -1367,6 +1366,23 @@ static inline int xfrm_state_kern(const struct xfrm_state *x)
return atomic_read(&x->tunnel_users);
}
+static inline bool xfrm_id_proto_valid(u8 proto)
+{
+ switch (proto) {
+ case IPPROTO_AH:
+ case IPPROTO_ESP:
+ case IPPROTO_COMP:
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ROUTING:
+ case IPPROTO_DSTOPTS:
+#endif
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
{
return (!userproto || proto == userproto ||
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 9e76b2410d03..73cc5cfb72e0 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -68,6 +68,7 @@
extern struct workqueue_struct *ib_wq;
extern struct workqueue_struct *ib_comp_wq;
+extern struct workqueue_struct *ib_comp_unbound_wq;
union ib_gid {
u8 raw[16];
@@ -305,7 +306,7 @@ enum ib_cq_creation_flags {
struct ib_cq_init_attr {
unsigned int cqe;
- int comp_vector;
+ u32 comp_vector;
u32 flags;
};
@@ -1119,7 +1120,7 @@ struct ib_qp_init_attr {
struct ib_qp_cap cap;
enum ib_sig_type sq_sig_type;
enum ib_qp_type qp_type;
- enum ib_qp_create_flags create_flags;
+ u32 create_flags;
/*
* Only needed for special QP types, or when using the RW API.
@@ -1544,9 +1545,10 @@ struct ib_ah {
typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
enum ib_poll_context {
- IB_POLL_DIRECT, /* caller context, no hw completions */
- IB_POLL_SOFTIRQ, /* poll from softirq context */
- IB_POLL_WORKQUEUE, /* poll from workqueue */
+ IB_POLL_DIRECT, /* caller context, no hw completions */
+ IB_POLL_SOFTIRQ, /* poll from softirq context */
+ IB_POLL_WORKQUEUE, /* poll from workqueue */
+ IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
};
struct ib_cq {
@@ -1563,6 +1565,7 @@ struct ib_cq {
struct irq_poll iop;
struct work_struct work;
};
+ struct workqueue_struct *comp_wq;
};
struct ib_srq {
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index a4e41444f5fe..282358843659 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -241,6 +241,7 @@ struct fcoe_fcf {
* @vn_mac: VN_Node assigned MAC address for data
*/
struct fcoe_rport {
+ struct fc_rport_priv rdata;
unsigned long time;
u16 fcoe_len;
u16 flags;
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index 04e0679767f6..2b5dfae78272 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -6,8 +6,6 @@ struct scsi_cmnd;
struct scsi_device;
struct scsi_sense_hdr;
-#define SCSI_LOG_BUFSIZE 128
-
extern void scsi_print_command(struct scsi_cmnd *);
extern size_t __scsi_format_command(char *, size_t,
const unsigned char *, size_t);
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 2b7e227960e1..91f403341dd7 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -32,6 +32,7 @@ extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
struct scsi_eh_save {
/* saved state */
int result;
+ unsigned int resid_len;
int eh_eflags;
enum dma_data_direction data_direction;
unsigned underflow;
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 392bac18398b..33a07c3badf0 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -186,10 +186,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
if (snd_BUG_ON(!stream))
return;
- if (stream->direction == SND_COMPRESS_PLAYBACK)
- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
- else
- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
wake_up(&stream->runtime->sleep);
}
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 344b96c206a3..14a35befad6c 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -349,6 +349,8 @@ struct device;
#define SND_SOC_DAPM_WILL_PMD 0x80 /* called at start of sequence */
#define SND_SOC_DAPM_PRE_POST_PMD \
(SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD)
+#define SND_SOC_DAPM_PRE_POST_PMU \
+ (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU)
/* convenience event type detection */
#define SND_SOC_DAPM_EVENT_ON(e) \
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 6e692a52936c..18197e0bb510 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -169,9 +169,14 @@ TRACE_EVENT(sched_switch,
(__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
__print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
- { 0x01, "S" }, { 0x02, "D" }, { 0x04, "T" },
- { 0x08, "t" }, { 0x10, "X" }, { 0x20, "Z" },
- { 0x40, "P" }, { 0x80, "I" }) :
+ { TASK_INTERRUPTIBLE, "S" },
+ { TASK_UNINTERRUPTIBLE, "D" },
+ { __TASK_STOPPED, "T" },
+ { __TASK_TRACED, "t" },
+ { EXIT_DEAD, "X" },
+ { EXIT_ZOMBIE, "Z" },
+ { TASK_PARKED, "P" },
+ { TASK_DEAD, "I" }) :
"R",
__entry->prev_state & TASK_REPORT_MAX ? "+" : "",
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index c3114c989e91..f50dd34e4f7b 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -789,8 +789,8 @@ struct cec_event {
#define CEC_MSG_SELECT_DIGITAL_SERVICE 0x93
#define CEC_MSG_TUNER_DEVICE_STATUS 0x07
/* Recording Flag Operand (rec_flag) */
-#define CEC_OP_REC_FLAG_USED 0
-#define CEC_OP_REC_FLAG_NOT_USED 1
+#define CEC_OP_REC_FLAG_NOT_USED 0
+#define CEC_OP_REC_FLAG_USED 1
/* Tuner Display Info Operand (tuner_display_info) */
#define CEC_OP_TUNER_DISPLAY_INFO_DIGITAL 0
#define CEC_OP_TUNER_DISPLAY_INFO_NONE 1
diff --git a/include/uapi/linux/coda_psdev.h b/include/uapi/linux/coda_psdev.h
index aa6623efd2dd..d50d51a57fe4 100644
--- a/include/uapi/linux/coda_psdev.h
+++ b/include/uapi/linux/coda_psdev.h
@@ -7,19 +7,6 @@
#define CODA_PSDEV_MAJOR 67
#define MAX_CODADEVS 5 /* how many do we allow */
-
-/* messages between coda filesystem in kernel and Venus */
-struct upc_req {
- struct list_head uc_chain;
- caddr_t uc_data;
- u_short uc_flags;
- u_short uc_inSize; /* Size is at most 5000 bytes */
- u_short uc_outSize;
- u_short uc_opcode; /* copied from data to save lookup */
- int uc_unique;
- wait_queue_head_t uc_sleep; /* process' wait queue */
-};
-
#define CODA_REQ_ASYNC 0x1
#define CODA_REQ_READ 0x2
#define CODA_REQ_WRITE 0x4
diff --git a/include/uapi/linux/isdn/capicmd.h b/include/uapi/linux/isdn/capicmd.h
index 4941628a4fb9..5ec88e7548a9 100644
--- a/include/uapi/linux/isdn/capicmd.h
+++ b/include/uapi/linux/isdn/capicmd.h
@@ -16,6 +16,7 @@
#define CAPI_MSG_BASELEN 8
#define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2)
#define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2)
+#define CAPI_DISCONNECT_B3_RESP_LEN (CAPI_MSG_BASELEN+4)
/*----- CAPI commands -----*/
#define CAPI_ALERT 0x01
diff --git a/include/uapi/linux/netfilter/xt_nfacct.h b/include/uapi/linux/netfilter/xt_nfacct.h
index 5c8a4d760ee3..b5123ab8d54a 100644
--- a/include/uapi/linux/netfilter/xt_nfacct.h
+++ b/include/uapi/linux/netfilter/xt_nfacct.h
@@ -11,4 +11,9 @@ struct xt_nfacct_match_info {
struct nf_acct *nfacct;
};
+struct xt_nfacct_match_info_v1 {
+ char name[NFACCT_NAME_MAX];
+ struct nf_acct *nfacct __attribute__((aligned(8)));
+};
+
#endif /* _XT_NFACCT_MATCH_H */
diff --git a/include/uapi/linux/nilfs2_ondisk.h b/include/uapi/linux/nilfs2_ondisk.h
index a7e66ab11d1d..c23f91ae5fe8 100644
--- a/include/uapi/linux/nilfs2_ondisk.h
+++ b/include/uapi/linux/nilfs2_ondisk.h
@@ -29,7 +29,7 @@
#include <linux/types.h>
#include <linux/magic.h>
-
+#include <asm/byteorder.h>
#define NILFS_INODE_BMAP_SIZE 7
@@ -533,19 +533,19 @@ enum {
static inline void \
nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \
{ \
- cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \
- (1UL << NILFS_CHECKPOINT_##flag)); \
+ cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) | \
+ (1UL << NILFS_CHECKPOINT_##flag)); \
} \
static inline void \
nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \
{ \
- cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \
+ cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) & \
~(1UL << NILFS_CHECKPOINT_##flag)); \
} \
static inline int \
nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \
{ \
- return !!(le32_to_cpu(cp->cp_flags) & \
+ return !!(__le32_to_cpu(cp->cp_flags) & \
(1UL << NILFS_CHECKPOINT_##flag)); \
}
@@ -595,20 +595,20 @@ enum {
static inline void \
nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \
{ \
- su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \
+ su->su_flags = __cpu_to_le32(__le32_to_cpu(su->su_flags) | \
(1UL << NILFS_SEGMENT_USAGE_##flag));\
} \
static inline void \
nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \
{ \
su->su_flags = \
- cpu_to_le32(le32_to_cpu(su->su_flags) & \
+ __cpu_to_le32(__le32_to_cpu(su->su_flags) & \
~(1UL << NILFS_SEGMENT_USAGE_##flag)); \
} \
static inline int \
nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \
{ \
- return !!(le32_to_cpu(su->su_flags) & \
+ return !!(__le32_to_cpu(su->su_flags) & \
(1UL << NILFS_SEGMENT_USAGE_##flag)); \
}
@@ -619,15 +619,15 @@ NILFS_SEGMENT_USAGE_FNS(ERROR, error)
static inline void
nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su)
{
- su->su_lastmod = cpu_to_le64(0);
- su->su_nblocks = cpu_to_le32(0);
- su->su_flags = cpu_to_le32(0);
+ su->su_lastmod = __cpu_to_le64(0);
+ su->su_nblocks = __cpu_to_le32(0);
+ su->su_flags = __cpu_to_le32(0);
}
static inline int
nilfs_segment_usage_clean(const struct nilfs_segment_usage *su)
{
- return !le32_to_cpu(su->su_flags);
+ return !__le32_to_cpu(su->su_flags);
}
/**
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index f5d753e60836..bf31965355c6 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -278,6 +278,7 @@ enum
LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */
LINUX_MIB_TCPMTUPFAIL, /* TCPMTUPFail */
LINUX_MIB_TCPMTUPSUCCESS, /* TCPMTUPSuccess */
+ LINUX_MIB_TCPWQUEUETOOBIG, /* TCPWqueueTooBig */
__LINUX_MIB_MAX
};
diff --git a/include/xen/events.h b/include/xen/events.h
index c3e6bc643a7b..1650d39decae 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -3,6 +3,7 @@
#define _XEN_EVENTS_H
#include <linux/interrupt.h>
+#include <linux/irq.h>
#ifdef CONFIG_PCI_MSI
#include <linux/msi.h>
#endif
@@ -59,7 +60,7 @@ void evtchn_put(unsigned int evtchn);
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
void rebind_evtchn_irq(int evtchn, int irq);
-int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu);
+int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu);
static inline void notify_remote_via_evtchn(int port)
{