summaryrefslogtreecommitdiff
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h127
1 files changed, 51 insertions, 76 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6507dde38b16..fb8e814f78dc 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -153,6 +153,7 @@ extern pgprot_t protection_map[16];
#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */
#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */
#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
+#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */
/*
* This interface is used by x86 PAT code to identify a pfn mapping that is
@@ -164,12 +165,12 @@ extern pgprot_t protection_map[16];
*/
static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
{
- return (vma->vm_flags & VM_PFN_AT_MMAP);
+ return !!(vma->vm_flags & VM_PFN_AT_MMAP);
}
static inline int is_pfn_mapping(struct vm_area_struct *vma)
{
- return (vma->vm_flags & VM_PFNMAP);
+ return !!(vma->vm_flags & VM_PFNMAP);
}
/*
@@ -604,10 +605,6 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
#define NODE_NOT_IN_PAGE_FLAGS
#endif
-#ifndef PFN_SECTION_SHIFT
-#define PFN_SECTION_SHIFT 0
-#endif
-
/*
* Define the bit shifts to access each section. For non-existent
* sections we define the shift as 0; that plus a 0 mask ensures
@@ -681,6 +678,12 @@ static inline struct zone *page_zone(struct page *page)
}
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
+static inline void set_page_section(struct page *page, unsigned long section)
+{
+ page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
+ page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
+}
+
static inline unsigned long page_to_section(struct page *page)
{
return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
@@ -699,18 +702,14 @@ static inline void set_page_node(struct page *page, unsigned long node)
page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
}
-static inline void set_page_section(struct page *page, unsigned long section)
-{
- page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
- page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
-}
-
static inline void set_page_links(struct page *page, enum zone_type zone,
unsigned long node, unsigned long pfn)
{
set_page_zone(page, zone);
set_page_node(page, node);
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
set_page_section(page, pfn_to_section_nr(pfn));
+#endif
}
/*
@@ -862,26 +861,18 @@ extern void pagefault_out_of_memory(void);
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
/*
- * Flags passed to show_mem() and __show_free_areas() to suppress output in
+ * Flags passed to show_mem() and show_free_areas() to suppress output in
* various contexts.
*/
#define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */
-extern void show_free_areas(void);
-extern void __show_free_areas(unsigned int flags);
+extern void show_free_areas(unsigned int flags);
+extern bool skip_free_areas_node(unsigned int flags, int nid);
int shmem_lock(struct file *file, int lock, struct user_struct *user);
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
int shmem_zero_setup(struct vm_area_struct *);
-#ifndef CONFIG_MMU
-extern unsigned long shmem_get_unmapped_area(struct file *file,
- unsigned long addr,
- unsigned long len,
- unsigned long pgoff,
- unsigned long flags);
-#endif
-
extern int can_do_mlock(void);
extern int user_shm_lock(size_t, struct user_struct *);
extern void user_shm_unlock(size_t, struct user_struct *);
@@ -894,8 +885,6 @@ struct zap_details {
struct address_space *check_mapping; /* Check page->mapping if set */
pgoff_t first_index; /* Lowest page->index to unmap */
pgoff_t last_index; /* Highest page->index to unmap */
- spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */
- unsigned long truncate_count; /* Compare vm_truncate_count */
};
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
@@ -905,7 +894,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *);
-unsigned long unmap_vmas(struct mmu_gather **tlb,
+unsigned long unmap_vmas(struct mmu_gather *tlb,
struct vm_area_struct *start_vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted,
struct zap_details *);
@@ -1056,65 +1045,35 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
/*
* per-process(per-mm_struct) statistics.
*/
-#if defined(SPLIT_RSS_COUNTING)
-/*
- * The mm counters are not protected by its page_table_lock,
- * so must be incremented atomically.
- */
static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
{
atomic_long_set(&mm->rss_stat.count[member], value);
}
+#if defined(SPLIT_RSS_COUNTING)
unsigned long get_mm_counter(struct mm_struct *mm, int member);
-
-static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
-{
- atomic_long_add(value, &mm->rss_stat.count[member]);
-}
-
-static inline void inc_mm_counter(struct mm_struct *mm, int member)
-{
- atomic_long_inc(&mm->rss_stat.count[member]);
-}
-
-static inline void dec_mm_counter(struct mm_struct *mm, int member)
-{
- atomic_long_dec(&mm->rss_stat.count[member]);
-}
-
-#else /* !USE_SPLIT_PTLOCKS */
-/*
- * The mm counters are protected by its page_table_lock,
- * so can be incremented directly.
- */
-static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
-{
- mm->rss_stat.count[member] = value;
-}
-
+#else
static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
{
- return mm->rss_stat.count[member];
+ return atomic_long_read(&mm->rss_stat.count[member]);
}
+#endif
static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
{
- mm->rss_stat.count[member] += value;
+ atomic_long_add(value, &mm->rss_stat.count[member]);
}
static inline void inc_mm_counter(struct mm_struct *mm, int member)
{
- mm->rss_stat.count[member]++;
+ atomic_long_inc(&mm->rss_stat.count[member]);
}
static inline void dec_mm_counter(struct mm_struct *mm, int member)
{
- mm->rss_stat.count[member]--;
+ atomic_long_dec(&mm->rss_stat.count[member]);
}
-#endif /* !USE_SPLIT_PTLOCKS */
-
static inline unsigned long get_mm_rss(struct mm_struct *mm)
{
return get_mm_counter(mm, MM_FILEPAGES) +
@@ -1163,13 +1122,24 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
#endif
/*
+ * This struct is used to pass information from page reclaim to the shrinkers.
+ * We consolidate the values for easier extention later.
+ */
+struct shrink_control {
+ gfp_t gfp_mask;
+
+ /* How many slab objects shrinker() should scan and try to reclaim */
+ unsigned long nr_to_scan;
+};
+
+/*
* A callback you can register to apply pressure to ageable caches.
*
- * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should
- * look through the least-recently-used 'nr_to_scan' entries and
- * attempt to free them up. It should return the number of objects
- * which remain in the cache. If it returns -1, it means it cannot do
- * any scanning at this time (eg. there is a risk of deadlock).
+ * 'sc' is passed shrink_control which includes a count 'nr_to_scan'
+ * and a 'gfpmask'. It should look through the least-recently-used
+ * 'nr_to_scan' entries and attempt to free them up. It should return
+ * the number of objects which remain in the cache. If it returns -1, it means
+ * it cannot do any scanning at this time (eg. there is a risk of deadlock).
*
* The 'gfpmask' refers to the allocation we are currently trying to
* fulfil.
@@ -1178,7 +1148,7 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
* querying the cache size, so a fastpath for that case is appropriate.
*/
struct shrinker {
- int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask);
+ int (*shrink)(struct shrinker *, struct shrink_control *sc);
int seeks; /* seeks to recreate an obj */
/* These are for internal use */
@@ -1380,7 +1350,7 @@ extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long,
unsigned long, enum memmap_context);
extern void setup_per_zone_wmarks(void);
-extern void calculate_zone_inactive_ratio(struct zone *zone);
+extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void __init mmap_init(void);
extern void show_mem(unsigned int flags);
@@ -1388,6 +1358,8 @@ extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
extern int after_bootmem;
+extern void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
+
extern void setup_per_cpu_pageset(void);
extern void zone_pcp_update(struct zone *zone);
@@ -1460,7 +1432,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long flag, unsigned long pgoff);
extern unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, unsigned long flags,
- unsigned int vm_flags, unsigned long pgoff);
+ vm_flags_t vm_flags, unsigned long pgoff);
static inline unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
@@ -1517,15 +1489,17 @@ unsigned long ra_submit(struct file_ra_state *ra,
struct address_space *mapping,
struct file *filp);
-/* Do stack extension */
+/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+
+/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
+extern int expand_downwards(struct vm_area_struct *vma,
+ unsigned long address);
#if VM_GROWSUP
extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
#else
#define expand_upwards(vma, address) do { } while (0)
#endif
-extern int expand_stack_downwards(struct vm_area_struct *vma,
- unsigned long address);
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
@@ -1627,8 +1601,9 @@ int in_gate_area_no_mm(unsigned long addr);
int drop_caches_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
-unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
- unsigned long lru_pages);
+unsigned long shrink_slab(struct shrink_control *shrink,
+ unsigned long nr_pages_scanned,
+ unsigned long lru_pages);
#ifndef CONFIG_MMU
#define randomize_va_space 0