summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-07 16:38:06 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-07 16:38:06 -0700
commit26c12d93348f0bda0756aff83f4867d9ae58a5a6 (patch)
tree65221f6837c66a9260c5c973e5fb908b10e0d504 /include
parentdc5ed40686a4da95881c35d913b60f867755cbe2 (diff)
parentfdc5813fbbd484a54c88477f91a78934cda8bb32 (diff)
Merge branch 'akpm' (incoming from Andrew)
Merge second patch-bomb from Andrew Morton: - the rest of MM - zram updates - zswap updates - exit - procfs - exec - wait - crash dump - lib/idr - rapidio - adfs, affs, bfs, ufs - cris - Kconfig things - initramfs - small amount of IPC material - percpu enhancements - early ioremap support - various other misc things * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (156 commits) MAINTAINERS: update Intel C600 SAS driver maintainers fs/ufs: remove unused ufs_super_block_third pointer fs/ufs: remove unused ufs_super_block_second pointer fs/ufs: remove unused ufs_super_block_first pointer fs/ufs/super.c: add __init to init_inodecache() doc/kernel-parameters.txt: add early_ioremap_debug arm64: add early_ioremap support arm64: initialize pgprot info earlier in boot x86: use generic early_ioremap mm: create generic early_ioremap() support x86/mm: sparse warning fix for early_memremap lglock: map to spinlock when !CONFIG_SMP percpu: add preemption checks to __this_cpu ops vmstat: use raw_cpu_ops to avoid false positives on preemption checks slub: use raw_cpu_inc for incrementing statistics net: replace __this_cpu_inc in route.c with raw_cpu_inc modules: use raw_cpu_write for initialization of per cpu refcount. mm: use raw_cpu ops for determining current NUMA node percpu: add raw_cpu_ops slub: fix leak of 'name' in sysfs_slab_add ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/bug.h60
-rw-r--r--include/asm-generic/early_ioremap.h42
-rw-r--r--include/asm-generic/io.h4
-rw-r--r--include/asm-generic/iomap.h2
-rw-r--r--include/asm-generic/percpu.h13
-rw-r--r--include/linux/binfmts.h1
-rw-r--r--include/linux/crash_dump.h1
-rw-r--r--include/linux/idr.h63
-rw-r--r--include/linux/io.h2
-rw-r--r--include/linux/lglock.h16
-rw-r--r--include/linux/memcontrol.h23
-rw-r--r--include/linux/mempolicy.h3
-rw-r--r--include/linux/mm.h17
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mmdebug.h4
-rw-r--r--include/linux/percpu.h350
-rw-r--r--include/linux/res_counter.h6
-rw-r--r--include/linux/rio.h5
-rw-r--r--include/linux/sched.h15
-rw-r--r--include/linux/slab.h6
-rw-r--r--include/linux/slub_def.h3
-rw-r--r--include/linux/topology.h4
-rw-r--r--include/linux/vmacache.h38
-rw-r--r--include/linux/vmstat.h8
-rw-r--r--include/linux/writeback.h2
-rw-r--r--include/trace/events/task.h2
-rw-r--r--include/uapi/asm-generic/mman-common.h2
-rw-r--r--include/uapi/linux/prctl.h3
28 files changed, 417 insertions, 282 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 7d10f962aa13..630dd2372238 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -52,7 +52,7 @@ struct bug_entry {
#endif
#ifndef HAVE_ARCH_BUG_ON
-#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
+#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
#endif
/*
@@ -106,33 +106,6 @@ extern void warn_slowpath_null(const char *file, const int line);
unlikely(__ret_warn_on); \
})
-#else /* !CONFIG_BUG */
-#ifndef HAVE_ARCH_BUG
-#define BUG() do {} while(0)
-#endif
-
-#ifndef HAVE_ARCH_BUG_ON
-#define BUG_ON(condition) do { if (condition) ; } while(0)
-#endif
-
-#ifndef HAVE_ARCH_WARN_ON
-#define WARN_ON(condition) ({ \
- int __ret_warn_on = !!(condition); \
- unlikely(__ret_warn_on); \
-})
-#endif
-
-#ifndef WARN
-#define WARN(condition, format...) ({ \
- int __ret_warn_on = !!(condition); \
- unlikely(__ret_warn_on); \
-})
-#endif
-
-#define WARN_TAINT(condition, taint, format...) WARN_ON(condition)
-
-#endif
-
#define WARN_ON_ONCE(condition) ({ \
static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
@@ -163,6 +136,37 @@ extern void warn_slowpath_null(const char *file, const int line);
unlikely(__ret_warn_once); \
})
+#else /* !CONFIG_BUG */
+#ifndef HAVE_ARCH_BUG
+#define BUG() do {} while (1)
+#endif
+
+#ifndef HAVE_ARCH_BUG_ON
+#define BUG_ON(condition) do { if (condition) ; } while (0)
+#endif
+
+#ifndef HAVE_ARCH_WARN_ON
+#define WARN_ON(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#ifndef WARN
+#define WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ no_printk(format); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#define WARN_ON_ONCE(condition) WARN_ON(condition)
+#define WARN_ONCE(condition, format...) WARN(condition, format)
+#define WARN_TAINT(condition, taint, format...) WARN(condition, format)
+#define WARN_TAINT_ONCE(condition, taint, format...) WARN(condition, format)
+
+#endif
+
/*
* WARN_ON_SMP() is for cases that the warning is either
* meaningless for !SMP or may even cause failures.
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
new file mode 100644
index 000000000000..a5de55c04fb2
--- /dev/null
+++ b/include/asm-generic/early_ioremap.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_EARLY_IOREMAP_H_
+#define _ASM_EARLY_IOREMAP_H_
+
+#include <linux/types.h>
+
+/*
+ * early_ioremap() and early_iounmap() are for temporary early boot-time
+ * mappings, before the real ioremap() is functional.
+ */
+extern void __iomem *early_ioremap(resource_size_t phys_addr,
+ unsigned long size);
+extern void *early_memremap(resource_size_t phys_addr,
+ unsigned long size);
+extern void early_iounmap(void __iomem *addr, unsigned long size);
+extern void early_memunmap(void *addr, unsigned long size);
+
+/*
+ * Weak function called by early_ioremap_reset(). It does nothing, but
+ * architectures may provide their own version to do any needed cleanups.
+ */
+extern void early_ioremap_shutdown(void);
+
+#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU)
+/* Arch-specific initialization */
+extern void early_ioremap_init(void);
+
+/* Generic initialization called by architecture code */
+extern void early_ioremap_setup(void);
+
+/*
+ * Called as last step in paging_init() so library can act
+ * accordingly for subsequent map/unmap requests.
+ */
+extern void early_ioremap_reset(void);
+
+#else
+static inline void early_ioremap_init(void) { }
+static inline void early_ioremap_setup(void) { }
+static inline void early_ioremap_reset(void) { }
+#endif
+
+#endif /* _ASM_EARLY_IOREMAP_H_ */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index d5afe96adba6..975e1cc75edb 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -327,7 +327,7 @@ static inline void iounmap(void __iomem *addr)
}
#endif /* CONFIG_MMU */
-#ifdef CONFIG_HAS_IOPORT
+#ifdef CONFIG_HAS_IOPORT_MAP
#ifndef CONFIG_GENERIC_IOMAP
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
@@ -341,7 +341,7 @@ static inline void ioport_unmap(void __iomem *p)
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void __iomem *p);
#endif /* CONFIG_GENERIC_IOMAP */
-#endif /* CONFIG_HAS_IOPORT */
+#endif /* CONFIG_HAS_IOPORT_MAP */
#ifndef xlate_dev_kmem_ptr
#define xlate_dev_kmem_ptr(p) p
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 6afd7d6a9899..1b41011643a5 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -56,7 +56,7 @@ extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long coun
extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
-#ifdef CONFIG_HAS_IOPORT
+#ifdef CONFIG_HAS_IOPORT_MAP
/* Create a virtual mapping cookie for an IO port range */
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void __iomem *);
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index d17784ea37ff..0703aa75b5e8 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -56,17 +56,17 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define per_cpu(var, cpu) \
(*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
-#ifndef __this_cpu_ptr
-#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
+#ifndef raw_cpu_ptr
+#define raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
#endif
#ifdef CONFIG_DEBUG_PREEMPT
#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
#else
-#define this_cpu_ptr(ptr) __this_cpu_ptr(ptr)
+#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
#endif
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
-#define __raw_get_cpu_var(var) (*__this_cpu_ptr(&(var)))
+#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void setup_per_cpu_areas(void);
@@ -83,7 +83,7 @@ extern void setup_per_cpu_areas(void);
#define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
-#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
+#define raw_cpu_ptr(ptr) this_cpu_ptr(ptr)
#endif /* SMP */
@@ -122,4 +122,7 @@ extern void setup_per_cpu_areas(void);
#define PER_CPU_DEF_ATTRIBUTES
#endif
+/* Keep until we have removed all uses of __this_cpu_ptr */
+#define __this_cpu_ptr raw_cpu_ptr
+
#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index b4a745d7d9a9..61f29e5ea840 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -44,7 +44,6 @@ struct linux_binprm {
unsigned interp_flags;
unsigned interp_data;
unsigned long loader, exec;
- char tcomm[TASK_COMM_LEN];
};
#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 7032518f8542..72ab536ad3de 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -25,6 +25,7 @@ extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
unsigned long, int);
+void vmcore_cleanup(void);
/* Architecture code defines this if there are other possible ELF
* machine types, e.g. on bi-arch capable hardware. */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index f669585c4fc5..6af3400b9b2f 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -133,69 +133,6 @@ static inline void *idr_find(struct idr *idr, int id)
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
/*
- * Don't use the following functions. These exist only to suppress
- * deprecated warnings on EXPORT_SYMBOL()s.
- */
-int __idr_pre_get(struct idr *idp, gfp_t gfp_mask);
-int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
-void __idr_remove_all(struct idr *idp);
-
-/**
- * idr_pre_get - reserve resources for idr allocation
- * @idp: idr handle
- * @gfp_mask: memory allocation flags
- *
- * Part of old alloc interface. This is going away. Use
- * idr_preload[_end]() and idr_alloc() instead.
- */
-static inline int __deprecated idr_pre_get(struct idr *idp, gfp_t gfp_mask)
-{
- return __idr_pre_get(idp, gfp_mask);
-}
-
-/**
- * idr_get_new_above - allocate new idr entry above or equal to a start id
- * @idp: idr handle
- * @ptr: pointer you want associated with the id
- * @starting_id: id to start search at
- * @id: pointer to the allocated handle
- *
- * Part of old alloc interface. This is going away. Use
- * idr_preload[_end]() and idr_alloc() instead.
- */
-static inline int __deprecated idr_get_new_above(struct idr *idp, void *ptr,
- int starting_id, int *id)
-{
- return __idr_get_new_above(idp, ptr, starting_id, id);
-}
-
-/**
- * idr_get_new - allocate new idr entry
- * @idp: idr handle
- * @ptr: pointer you want associated with the id
- * @id: pointer to the allocated handle
- *
- * Part of old alloc interface. This is going away. Use
- * idr_preload[_end]() and idr_alloc() instead.
- */
-static inline int __deprecated idr_get_new(struct idr *idp, void *ptr, int *id)
-{
- return __idr_get_new_above(idp, ptr, 0, id);
-}
-
-/**
- * idr_remove_all - remove all ids from the given idr tree
- * @idp: idr handle
- *
- * If you're trying to destroy @idp, calling idr_destroy() is enough.
- * This is going away. Don't use.
- */
-static inline void __deprecated idr_remove_all(struct idr *idp)
-{
- __idr_remove_all(idp);
-}
-
-/*
* IDA - IDR based id allocator, use when translation from id to
* pointer isn't necessary.
*
diff --git a/include/linux/io.h b/include/linux/io.h
index 8a18e75600cc..b76e6e545806 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -41,7 +41,7 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end,
/*
* Managed iomap interface
*/
-#ifdef CONFIG_HAS_IOPORT
+#ifdef CONFIG_HAS_IOPORT_MAP
void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
unsigned int nr);
void devm_ioport_unmap(struct device *dev, void __iomem *addr);
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 96549abe8842..0081f000e34b 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -25,6 +25,8 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
+#ifdef CONFIG_SMP
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define LOCKDEP_INIT_MAP lockdep_init_map
#else
@@ -57,4 +59,18 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu);
void lg_global_lock(struct lglock *lg);
void lg_global_unlock(struct lglock *lg);
+#else
+/* When !CONFIG_SMP, map lglock to spinlock */
+#define lglock spinlock
+#define DEFINE_LGLOCK(name) DEFINE_SPINLOCK(name)
+#define DEFINE_STATIC_LGLOCK(name) static DEFINE_SPINLOCK(name)
+#define lg_lock_init(lg, name) spin_lock_init(lg)
+#define lg_local_lock spin_lock
+#define lg_local_unlock spin_unlock
+#define lg_local_lock_cpu(lg, cpu) spin_lock(lg)
+#define lg_local_unlock_cpu(lg, cpu) spin_unlock(lg)
+#define lg_global_lock spin_lock
+#define lg_global_unlock spin_unlock
+#endif
+
#endif
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index eccfb4a4b379..b569b8be5c5a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -65,7 +65,7 @@ struct mem_cgroup_reclaim_cookie {
* (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
*/
-extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
+extern int mem_cgroup_charge_anon(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);
/* for swap handling */
extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
@@ -74,7 +74,7 @@ extern void mem_cgroup_commit_charge_swapin(struct page *page,
struct mem_cgroup *memcg);
extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
-extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
+extern int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);
struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
@@ -94,7 +94,6 @@ bool task_in_mem_cgroup(struct task_struct *task,
extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
-extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
@@ -234,13 +233,13 @@ void mem_cgroup_print_bad_page(struct page *page);
#else /* CONFIG_MEMCG */
struct mem_cgroup;
-static inline int mem_cgroup_newpage_charge(struct page *page,
+static inline int mem_cgroup_charge_anon(struct page *page,
struct mm_struct *mm, gfp_t gfp_mask)
{
return 0;
}
-static inline int mem_cgroup_cache_charge(struct page *page,
+static inline int mem_cgroup_charge_file(struct page *page,
struct mm_struct *mm, gfp_t gfp_mask)
{
return 0;
@@ -294,11 +293,6 @@ static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
return NULL;
}
-static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
-{
- return NULL;
-}
-
static inline bool mm_match_cgroup(struct mm_struct *mm,
struct mem_cgroup *memcg)
{
@@ -497,6 +491,9 @@ void __memcg_kmem_commit_charge(struct page *page,
void __memcg_kmem_uncharge_pages(struct page *page, int order);
int memcg_cache_id(struct mem_cgroup *memcg);
+
+char *memcg_create_cache_name(struct mem_cgroup *memcg,
+ struct kmem_cache *root_cache);
int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
struct kmem_cache *root_cache);
void memcg_free_cache_params(struct kmem_cache *s);
@@ -510,7 +507,7 @@ struct kmem_cache *
__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
void mem_cgroup_destroy_cache(struct kmem_cache *cachep);
-void kmem_cache_destroy_memcg_children(struct kmem_cache *s);
+int __kmem_cache_destroy_memcg_children(struct kmem_cache *s);
/**
* memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
@@ -664,10 +661,6 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{
return cachep;
}
-
-static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
-{
-}
#endif /* CONFIG_MEMCG_KMEM */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5f1ea756aace..3c1b968da0ca 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -143,7 +143,6 @@ extern void numa_policy_init(void);
extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
enum mpol_rebind_step step);
extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
-extern void mpol_fix_fork_child_flag(struct task_struct *p);
extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
@@ -151,7 +150,7 @@ extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
const nodemask_t *mask);
-extern unsigned slab_node(void);
+extern unsigned int mempolicy_slab_node(void);
extern enum zone_type policy_zone;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 35300f390eb6..abc848412e3c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -177,6 +177,9 @@ extern unsigned int kobjsize(const void *objp);
*/
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
+/* This mask defines which mm->def_flags a process can inherit its parent */
+#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
+
/*
* mapping from the currently active vm_flags protection bits (the
* low four bits) to a page protection mask..
@@ -210,6 +213,10 @@ struct vm_fault {
* is set (which is also implied by
* VM_FAULT_ERROR).
*/
+ /* for ->map_pages() only */
+ pgoff_t max_pgoff; /* map pages for offset from pgoff till
+ * max_pgoff inclusive */
+ pte_t *pte; /* pte entry associated with ->pgoff */
};
/*
@@ -221,6 +228,7 @@ struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
@@ -581,6 +589,9 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
pte = pte_mkwrite(pte);
return pte;
}
+
+void do_set_pte(struct vm_area_struct *vma, unsigned long address,
+ struct page *page, pte_t *pte, bool write, bool anon);
#endif
/*
@@ -684,7 +695,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
-#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_WIDTH) - 1)
+#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
static inline enum zone_type page_zonenum(const struct page *page)
@@ -1836,6 +1847,7 @@ extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
+extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
/* mm/page-writeback.c */
@@ -1863,9 +1875,6 @@ void page_cache_async_readahead(struct address_space *mapping,
unsigned long size);
unsigned long max_sane_readahead(unsigned long nr);
-unsigned long ra_submit(struct file_ra_state *ra,
- struct address_space *mapping,
- struct file *filp);
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 290901a8c1de..2b58d192ea24 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -342,9 +342,9 @@ struct mm_rss_stat {
struct kioctx_table;
struct mm_struct {
- struct vm_area_struct * mmap; /* list of VMAs */
+ struct vm_area_struct *mmap; /* list of VMAs */
struct rb_root mm_rb;
- struct vm_area_struct * mmap_cache; /* last find_vma result */
+ u32 vmacache_seqnum; /* per-thread vmacache */
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 5042c036dda9..2d57efa64cc1 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -3,8 +3,8 @@
struct page;
-extern void dump_page(struct page *page, char *reason);
-extern void dump_page_badflags(struct page *page, char *reason,
+extern void dump_page(struct page *page, const char *reason);
+extern void dump_page_badflags(struct page *page, const char *reason,
unsigned long badflags);
#ifdef CONFIG_DEBUG_VM
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index e3817d2441b6..e7a0b95ed527 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -173,6 +173,12 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
extern void __bad_size_call_parameter(void);
+#ifdef CONFIG_DEBUG_PREEMPT
+extern void __this_cpu_preempt_check(const char *op);
+#else
+static inline void __this_cpu_preempt_check(const char *op) { }
+#endif
+
#define __pcpu_size_call_return(stem, variable) \
({ typeof(variable) pscr_ret__; \
__verify_pcpu_ptr(&(variable)); \
@@ -243,6 +249,8 @@ do { \
} while (0)
/*
+ * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
+ *
* Optimized manipulation for memory allocated through the per cpu
* allocator or for addresses of per cpu variables.
*
@@ -296,7 +304,7 @@ do { \
do { \
unsigned long flags; \
raw_local_irq_save(flags); \
- *__this_cpu_ptr(&(pcp)) op val; \
+ *raw_cpu_ptr(&(pcp)) op val; \
raw_local_irq_restore(flags); \
} while (0)
@@ -381,8 +389,8 @@ do { \
typeof(pcp) ret__; \
unsigned long flags; \
raw_local_irq_save(flags); \
- __this_cpu_add(pcp, val); \
- ret__ = __this_cpu_read(pcp); \
+ raw_cpu_add(pcp, val); \
+ ret__ = raw_cpu_read(pcp); \
raw_local_irq_restore(flags); \
ret__; \
})
@@ -411,8 +419,8 @@ do { \
({ typeof(pcp) ret__; \
unsigned long flags; \
raw_local_irq_save(flags); \
- ret__ = __this_cpu_read(pcp); \
- __this_cpu_write(pcp, nval); \
+ ret__ = raw_cpu_read(pcp); \
+ raw_cpu_write(pcp, nval); \
raw_local_irq_restore(flags); \
ret__; \
})
@@ -439,9 +447,9 @@ do { \
typeof(pcp) ret__; \
unsigned long flags; \
raw_local_irq_save(flags); \
- ret__ = __this_cpu_read(pcp); \
+ ret__ = raw_cpu_read(pcp); \
if (ret__ == (oval)) \
- __this_cpu_write(pcp, nval); \
+ raw_cpu_write(pcp, nval); \
raw_local_irq_restore(flags); \
ret__; \
})
@@ -476,7 +484,7 @@ do { \
int ret__; \
unsigned long flags; \
raw_local_irq_save(flags); \
- ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
+ ret__ = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \
oval1, oval2, nval1, nval2); \
raw_local_irq_restore(flags); \
ret__; \
@@ -504,12 +512,8 @@ do { \
#endif
/*
- * Generic percpu operations for context that are safe from preemption/interrupts.
- * Either we do not care about races or the caller has the
- * responsibility of handling preemption/interrupt issues. Arch code can still
- * override these instructions since the arch per cpu code may be more
- * efficient and may actually get race freeness for free (that is the
- * case for x86 for example).
+ * Generic percpu operations for contexts where we do not want to do
+ * any checks for preemptiosn.
*
* If there is no other protection through preempt disable and/or
* disabling interupts then one of these RMW operations can show unexpected
@@ -517,211 +521,285 @@ do { \
* or an interrupt occurred and the same percpu variable was modified from
* the interrupt context.
*/
-#ifndef __this_cpu_read
-# ifndef __this_cpu_read_1
-# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp)))
+#ifndef raw_cpu_read
+# ifndef raw_cpu_read_1
+# define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp)))
# endif
-# ifndef __this_cpu_read_2
-# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp)))
+# ifndef raw_cpu_read_2
+# define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp)))
# endif
-# ifndef __this_cpu_read_4
-# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp)))
+# ifndef raw_cpu_read_4
+# define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp)))
# endif
-# ifndef __this_cpu_read_8
-# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
+# ifndef raw_cpu_read_8
+# define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp)))
# endif
-# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp))
+# define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp))
#endif
-#define __this_cpu_generic_to_op(pcp, val, op) \
+#define raw_cpu_generic_to_op(pcp, val, op) \
do { \
- *__this_cpu_ptr(&(pcp)) op val; \
+ *raw_cpu_ptr(&(pcp)) op val; \
} while (0)
-#ifndef __this_cpu_write
-# ifndef __this_cpu_write_1
-# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+
+#ifndef raw_cpu_write
+# ifndef raw_cpu_write_1
+# define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
# endif
-# ifndef __this_cpu_write_2
-# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# ifndef raw_cpu_write_2
+# define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
# endif
-# ifndef __this_cpu_write_4
-# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# ifndef raw_cpu_write_4
+# define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
# endif
-# ifndef __this_cpu_write_8
-# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# ifndef raw_cpu_write_8
+# define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
# endif
-# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val))
+# define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val))
#endif
-#ifndef __this_cpu_add
-# ifndef __this_cpu_add_1
-# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+#ifndef raw_cpu_add
+# ifndef raw_cpu_add_1
+# define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
# endif
-# ifndef __this_cpu_add_2
-# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# ifndef raw_cpu_add_2
+# define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
# endif
-# ifndef __this_cpu_add_4
-# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# ifndef raw_cpu_add_4
+# define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
# endif
-# ifndef __this_cpu_add_8
-# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# ifndef raw_cpu_add_8
+# define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
# endif
-# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val))
+# define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val))
#endif
-#ifndef __this_cpu_sub
-# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val))
+#ifndef raw_cpu_sub
+# define raw_cpu_sub(pcp, val) raw_cpu_add((pcp), -(val))
#endif
-#ifndef __this_cpu_inc
-# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
+#ifndef raw_cpu_inc
+# define raw_cpu_inc(pcp) raw_cpu_add((pcp), 1)
#endif
-#ifndef __this_cpu_dec
-# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
+#ifndef raw_cpu_dec
+# define raw_cpu_dec(pcp) raw_cpu_sub((pcp), 1)
#endif
-#ifndef __this_cpu_and
-# ifndef __this_cpu_and_1
-# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+#ifndef raw_cpu_and
+# ifndef raw_cpu_and_1
+# define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
# endif
-# ifndef __this_cpu_and_2
-# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# ifndef raw_cpu_and_2
+# define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
# endif
-# ifndef __this_cpu_and_4
-# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# ifndef raw_cpu_and_4
+# define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
# endif
-# ifndef __this_cpu_and_8
-# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# ifndef raw_cpu_and_8
+# define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
# endif
-# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val))
+# define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val))
#endif
-#ifndef __this_cpu_or
-# ifndef __this_cpu_or_1
-# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+#ifndef raw_cpu_or
+# ifndef raw_cpu_or_1
+# define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
# endif
-# ifndef __this_cpu_or_2
-# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# ifndef raw_cpu_or_2
+# define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
# endif
-# ifndef __this_cpu_or_4
-# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# ifndef raw_cpu_or_4
+# define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
# endif
-# ifndef __this_cpu_or_8
-# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# ifndef raw_cpu_or_8
+# define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
# endif
-# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
+# define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val))
#endif
-#define __this_cpu_generic_add_return(pcp, val) \
+#define raw_cpu_generic_add_return(pcp, val) \
({ \
- __this_cpu_add(pcp, val); \
- __this_cpu_read(pcp); \
+ raw_cpu_add(pcp, val); \
+ raw_cpu_read(pcp); \
})
-#ifndef __this_cpu_add_return
-# ifndef __this_cpu_add_return_1
-# define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val)
+#ifndef raw_cpu_add_return
+# ifndef raw_cpu_add_return_1
+# define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val)
# endif
-# ifndef __this_cpu_add_return_2
-# define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val)
+# ifndef raw_cpu_add_return_2
+# define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val)
# endif
-# ifndef __this_cpu_add_return_4
-# define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val)
+# ifndef raw_cpu_add_return_4
+# define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val)
# endif
-# ifndef __this_cpu_add_return_8
-# define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val)
+# ifndef raw_cpu_add_return_8
+# define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val)
# endif
-# define __this_cpu_add_return(pcp, val) \
- __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val)
+# define raw_cpu_add_return(pcp, val) \
+ __pcpu_size_call_return2(raw_add_return_, pcp, val)
#endif
-#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
-#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
-#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
+#define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val))
+#define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1)
+#define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1)
-#define __this_cpu_generic_xchg(pcp, nval) \
+#define raw_cpu_generic_xchg(pcp, nval) \
({ typeof(pcp) ret__; \
- ret__ = __this_cpu_read(pcp); \
- __this_cpu_write(pcp, nval); \
+ ret__ = raw_cpu_read(pcp); \
+ raw_cpu_write(pcp, nval); \
ret__; \
})
-#ifndef __this_cpu_xchg
-# ifndef __this_cpu_xchg_1
-# define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+#ifndef raw_cpu_xchg
+# ifndef raw_cpu_xchg_1
+# define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
# endif
-# ifndef __this_cpu_xchg_2
-# define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# ifndef raw_cpu_xchg_2
+# define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
# endif
-# ifndef __this_cpu_xchg_4
-# define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# ifndef raw_cpu_xchg_4
+# define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
# endif
-# ifndef __this_cpu_xchg_8
-# define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# ifndef raw_cpu_xchg_8
+# define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
# endif
-# define __this_cpu_xchg(pcp, nval) \
- __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
+# define raw_cpu_xchg(pcp, nval) \
+ __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)
#endif
-#define __this_cpu_generic_cmpxchg(pcp, oval, nval) \
+#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
typeof(pcp) ret__; \
- ret__ = __this_cpu_read(pcp); \
+ ret__ = raw_cpu_read(pcp); \
if (ret__ == (oval)) \
- __this_cpu_write(pcp, nval); \
+ raw_cpu_write(pcp, nval); \
ret__; \
})
-#ifndef __this_cpu_cmpxchg
-# ifndef __this_cpu_cmpxchg_1
-# define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+#ifndef raw_cpu_cmpxchg
+# ifndef raw_cpu_cmpxchg_1
+# define raw_cpu_cmpxchg_1(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
-# ifndef __this_cpu_cmpxchg_2
-# define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# ifndef raw_cpu_cmpxchg_2
+# define raw_cpu_cmpxchg_2(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
-# ifndef __this_cpu_cmpxchg_4
-# define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# ifndef raw_cpu_cmpxchg_4
+# define raw_cpu_cmpxchg_4(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
-# ifndef __this_cpu_cmpxchg_8
-# define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# ifndef raw_cpu_cmpxchg_8
+# define raw_cpu_cmpxchg_8(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
-# define __this_cpu_cmpxchg(pcp, oval, nval) \
- __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
+# define raw_cpu_cmpxchg(pcp, oval, nval) \
+ __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
#endif
-#define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
({ \
int __ret = 0; \
- if (__this_cpu_read(pcp1) == (oval1) && \
- __this_cpu_read(pcp2) == (oval2)) { \
- __this_cpu_write(pcp1, (nval1)); \
- __this_cpu_write(pcp2, (nval2)); \
+ if (raw_cpu_read(pcp1) == (oval1) && \
+ raw_cpu_read(pcp2) == (oval2)) { \
+ raw_cpu_write(pcp1, (nval1)); \
+ raw_cpu_write(pcp2, (nval2)); \
__ret = 1; \
} \
(__ret); \
})
-#ifndef __this_cpu_cmpxchg_double
-# ifndef __this_cpu_cmpxchg_double_1
-# define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#ifndef raw_cpu_cmpxchg_double
+# ifndef raw_cpu_cmpxchg_double_1
+# define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
-# ifndef __this_cpu_cmpxchg_double_2
-# define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# ifndef raw_cpu_cmpxchg_double_2
+# define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
-# ifndef __this_cpu_cmpxchg_double_4
-# define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# ifndef raw_cpu_cmpxchg_double_4
+# define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
-# ifndef __this_cpu_cmpxchg_double_8
-# define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# ifndef raw_cpu_cmpxchg_double_8
+# define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
+# define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
+#endif
+
+/*
+ * Generic percpu operations for context that are safe from preemption/interrupts.
+ */
+#ifndef __this_cpu_read
+# define __this_cpu_read(pcp) \
+ (__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp)))
+#endif
+
+#ifndef __this_cpu_write
+# define __this_cpu_write(pcp, val) \
+do { __this_cpu_preempt_check("write"); \
+ __pcpu_size_call(raw_cpu_write_, (pcp), (val)); \
+} while (0)
+#endif
+
+#ifndef __this_cpu_add
+# define __this_cpu_add(pcp, val) \
+do { __this_cpu_preempt_check("add"); \
+ __pcpu_size_call(raw_cpu_add_, (pcp), (val)); \
+} while (0)
+#endif
+
+#ifndef __this_cpu_sub
+# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val))
+#endif
+
+#ifndef __this_cpu_inc
+# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
+#endif
+
+#ifndef __this_cpu_dec
+# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
+#endif
+
+#ifndef __this_cpu_and
+# define __this_cpu_and(pcp, val) \
+do { __this_cpu_preempt_check("and"); \
+ __pcpu_size_call(raw_cpu_and_, (pcp), (val)); \
+} while (0)
+
+#endif
+
+#ifndef __this_cpu_or
+# define __this_cpu_or(pcp, val) \
+do { __this_cpu_preempt_check("or"); \
+ __pcpu_size_call(raw_cpu_or_, (pcp), (val)); \
+} while (0)
+#endif
+
+#ifndef __this_cpu_add_return
+# define __this_cpu_add_return(pcp, val) \
+ (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val))
+#endif
+
+#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
+#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
+#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
+
+#ifndef __this_cpu_xchg
+# define __this_cpu_xchg(pcp, nval) \
+ (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval))
+#endif
+
+#ifndef __this_cpu_cmpxchg
+# define __this_cpu_cmpxchg(pcp, oval, nval) \
+ (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval))
+#endif
+
+#ifndef __this_cpu_cmpxchg_double
# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
+ (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)))
#endif
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 201a69749659..56b7bc32db4f 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -104,15 +104,13 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent);
* units, e.g. numbers, bytes, Kbytes, etc
*
* returns 0 on success and <0 if the counter->usage will exceed the
- * counter->limit _locked call expects the counter->lock to be taken
+ * counter->limit
*
* charge_nofail works the same, except that it charges the resource
* counter unconditionally, and returns < 0 if the after the current
* charge we are over limit.
*/
-int __must_check res_counter_charge_locked(struct res_counter *counter,
- unsigned long val, bool force);
int __must_check res_counter_charge(struct res_counter *counter,
unsigned long val, struct res_counter **limit_fail_at);
int res_counter_charge_nofail(struct res_counter *counter,
@@ -125,12 +123,10 @@ int res_counter_charge_nofail(struct res_counter *counter,
* @val: the amount of the resource
*
* these calls check for usage underflow and show a warning on the console
- * _locked call expects the counter->lock to be taken
*
* returns the total charges still present in @counter.
*/
-u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
u64 res_counter_uncharge(struct res_counter *counter, unsigned long val);
u64 res_counter_uncharge_until(struct res_counter *counter,
diff --git a/include/linux/rio.h b/include/linux/rio.h
index b71d5738e683..6bda06f21930 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -83,7 +83,7 @@
#define RIO_CTAG_UDEVID 0x0001ffff /* Unique device identifier */
extern struct bus_type rio_bus_type;
-extern struct device rio_bus;
+extern struct class rio_mport_class;
struct rio_mport;
struct rio_dev;
@@ -201,6 +201,7 @@ struct rio_dev {
#define rio_dev_f(n) list_entry(n, struct rio_dev, net_list)
#define to_rio_dev(n) container_of(n, struct rio_dev, dev)
#define sw_to_rio_dev(n) container_of(n, struct rio_dev, rswitch[0])
+#define to_rio_mport(n) container_of(n, struct rio_mport, dev)
/**
* struct rio_msg - RIO message event
@@ -248,6 +249,7 @@ enum rio_phy_type {
* @phy_type: RapidIO phy type
* @phys_efptr: RIO port extended features pointer
* @name: Port name string
+ * @dev: device structure associated with an mport
* @priv: Master port private data
* @dma: DMA device associated with mport
* @nscan: RapidIO network enumeration/discovery operations
@@ -272,6 +274,7 @@ struct rio_mport {
enum rio_phy_type phy_type; /* RapidIO phy type */
u32 phys_efptr;
unsigned char name[RIO_MAX_MPORT_NAME];
+ struct device dev;
void *priv; /* Master port private data */
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
struct dma_device dma;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7cb07fd26680..075b3056c0c0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -132,6 +132,10 @@ struct perf_event_context;
struct blk_plug;
struct filename;
+#define VMACACHE_BITS 2
+#define VMACACHE_SIZE (1U << VMACACHE_BITS)
+#define VMACACHE_MASK (VMACACHE_SIZE - 1)
+
/*
* List of flags we want to share for kernel threads,
* if only because they are not used by them anyway.
@@ -206,8 +210,9 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
#define __TASK_STOPPED 4
#define __TASK_TRACED 8
/* in tsk->exit_state */
-#define EXIT_ZOMBIE 16
-#define EXIT_DEAD 32
+#define EXIT_DEAD 16
+#define EXIT_ZOMBIE 32
+#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
/* in tsk->state again */
#define TASK_DEAD 64
#define TASK_WAKEKILL 128
@@ -1235,6 +1240,9 @@ struct task_struct {
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
+ /* per-thread vma caching */
+ u32 vmacache_seqnum;
+ struct vm_area_struct *vmacache[VMACACHE_SIZE];
#if defined(SPLIT_RSS_COUNTING)
struct task_rss_stat rss_stat;
#endif
@@ -1844,7 +1852,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
-#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
@@ -2351,7 +2358,7 @@ extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, i
struct task_struct *fork_idle(int);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-extern void set_task_comm(struct task_struct *tsk, char *from);
+extern void set_task_comm(struct task_struct *tsk, const char *from);
extern char *get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP
diff --git a/include/linux/slab.h b/include/linux/slab.h
index b5b2df60299e..3dd389aa91c7 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -115,9 +115,9 @@ int slab_is_available(void);
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
void (*)(void *));
-struct kmem_cache *
-kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t,
- unsigned long, void (*)(void *), struct kmem_cache *);
+#ifdef CONFIG_MEMCG_KMEM
+void kmem_cache_create_memcg(struct mem_cgroup *, struct kmem_cache *);
+#endif
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index f56bfa9e4526..f2f7398848cf 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -87,6 +87,9 @@ struct kmem_cache {
#ifdef CONFIG_MEMCG_KMEM
struct memcg_cache_params *memcg_params;
int max_attr_size; /* for propagation, maximum size of a stored attr */
+#ifdef CONFIG_SYSFS
+ struct kset *memcg_kset;
+#endif
#endif
#ifdef CONFIG_NUMA
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 12ae6ce997d6..7062330a1329 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -188,7 +188,7 @@ DECLARE_PER_CPU(int, numa_node);
/* Returns the number of the current Node. */
static inline int numa_node_id(void)
{
- return __this_cpu_read(numa_node);
+ return raw_cpu_read(numa_node);
}
#endif
@@ -245,7 +245,7 @@ static inline void set_numa_mem(int node)
/* Returns the number of the nearest Node with memory */
static inline int numa_mem_id(void)
{
- return __this_cpu_read(_numa_mem_);
+ return raw_cpu_read(_numa_mem_);
}
#endif
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
new file mode 100644
index 000000000000..c3fa0fd43949
--- /dev/null
+++ b/include/linux/vmacache.h
@@ -0,0 +1,38 @@
+#ifndef __LINUX_VMACACHE_H
+#define __LINUX_VMACACHE_H
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+/*
+ * Hash based on the page number. Provides a good hit rate for
+ * workloads with good locality and those with random accesses as well.
+ */
+#define VMACACHE_HASH(addr) ((addr >> PAGE_SHIFT) & VMACACHE_MASK)
+
+static inline void vmacache_flush(struct task_struct *tsk)
+{
+ memset(tsk->vmacache, 0, sizeof(tsk->vmacache));
+}
+
+extern void vmacache_flush_all(struct mm_struct *mm);
+extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
+extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
+ unsigned long addr);
+
+#ifndef CONFIG_MMU
+extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
+#endif
+
+static inline void vmacache_invalidate(struct mm_struct *mm)
+{
+ mm->vmacache_seqnum++;
+
+ /* deal with overflows */
+ if (unlikely(mm->vmacache_seqnum == 0))
+ vmacache_flush_all(mm);
+}
+
+#endif /* __LINUX_VMACACHE_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index ea4476157e00..45c9cd1daf7a 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -27,9 +27,13 @@ struct vm_event_state {
DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
+/*
+ * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
+ * local_irq_disable overhead.
+ */
static inline void __count_vm_event(enum vm_event_item item)
{
- __this_cpu_inc(vm_event_states.event[item]);
+ raw_cpu_inc(vm_event_states.event[item]);
}
static inline void count_vm_event(enum vm_event_item item)
@@ -39,7 +43,7 @@ static inline void count_vm_event(enum vm_event_item item)
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
- __this_cpu_add(vm_event_states.event[item], delta);
+ raw_cpu_add(vm_event_states.event[item], delta);
}
static inline void count_vm_events(enum vm_event_item item, long delta)
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 021b8a319b9e..5777c13849ba 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -178,7 +178,7 @@ int write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, writepage_t writepage,
void *data);
int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
-void set_page_dirty_balance(struct page *page, int page_mkwrite);
+void set_page_dirty_balance(struct page *page);
void writeback_set_ratelimit(void);
void tag_pages_for_writeback(struct address_space *mapping,
pgoff_t start, pgoff_t end);
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
index 102a646e1996..dee3bb1d5a6b 100644
--- a/include/trace/events/task.h
+++ b/include/trace/events/task.h
@@ -32,7 +32,7 @@ TRACE_EVENT(task_newtask,
TRACE_EVENT(task_rename,
- TP_PROTO(struct task_struct *task, char *comm),
+ TP_PROTO(struct task_struct *task, const char *comm),
TP_ARGS(task, comm),
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index 4164529a94f9..ddc3b36f1046 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -50,7 +50,7 @@
#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
overrides the coredump filter bits */
-#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
+#define MADV_DODUMP 17 /* Clear the MADV_DONTDUMP flag */
/* compatibility flags */
#define MAP_FILE 0
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 289760f424aa..58afc04c107e 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -149,4 +149,7 @@
#define PR_GET_TID_ADDRESS 40
+#define PR_SET_THP_DISABLE 41
+#define PR_GET_THP_DISABLE 42
+
#endif /* _LINUX_PRCTL_H */