summaryrefslogtreecommitdiff
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h73
1 files changed, 54 insertions, 19 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b695875d63e3..c31a9cd2a30e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -107,6 +107,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */
#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
+#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
@@ -164,8 +165,6 @@ struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
- struct page *(*nopage)(struct vm_area_struct *area,
- unsigned long address, int *type);
unsigned long (*nopfn)(struct vm_area_struct *area,
unsigned long address);
@@ -173,7 +172,25 @@ struct vm_operations_struct {
* writable, if an error is returned it will cause a SIGBUS */
int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
#ifdef CONFIG_NUMA
+ /*
+ * set_policy() op must add a reference to any non-NULL @new mempolicy
+ * to hold the policy upon return. Caller should pass NULL @new to
+ * remove a policy and fall back to surrounding context--i.e. do not
+ * install a MPOL_DEFAULT policy, nor the task or system default
+ * mempolicy.
+ */
int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
+
+ /*
+ * get_policy() op must add reference [mpol_get()] to any policy at
+ * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
+ * in mm/mempolicy.c will do this automatically.
+ * get_policy() must NOT add a ref if the policy at (vma,addr) is not
+ * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
+ * If no [shared/vma] mempolicy exists at the addr, get_policy() op
+ * must return NULL--i.e., do not "fallback" to task or system default
+ * policy.
+ */
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr);
int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
@@ -397,11 +414,11 @@ static inline void set_compound_order(struct page *page, unsigned long order)
* we have run out of space and have to fall back to an
* alternate (slower) way of determining the node.
*
- * No sparsemem: | NODE | ZONE | ... | FLAGS |
- * with space for node: | SECTION | NODE | ZONE | ... | FLAGS |
- * no space for node: | SECTION | ZONE | ... | FLAGS |
+ * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
+ * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
+ * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
*/
-#ifdef CONFIG_SPARSEMEM
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTIONS_WIDTH SECTIONS_SHIFT
#else
#define SECTIONS_WIDTH 0
@@ -409,9 +426,12 @@ static inline void set_compound_order(struct page *page, unsigned long order)
#define ZONES_WIDTH ZONES_SHIFT
-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED
+#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
#define NODES_WIDTH NODES_SHIFT
#else
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#error "Vmemmap: No space for nodes field in page flags"
+#endif
#define NODES_WIDTH 0
#endif
@@ -454,8 +474,8 @@ static inline void set_compound_order(struct page *page, unsigned long order)
#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
-#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
-#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
+#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
+#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#endif
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
@@ -504,10 +524,12 @@ static inline struct zone *page_zone(struct page *page)
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
static inline unsigned long page_to_section(struct page *page)
{
return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
+#endif
static inline void set_page_zone(struct page *page, enum zone_type zone)
{
@@ -602,9 +624,12 @@ static inline struct address_space *page_mapping(struct page *page)
struct address_space *mapping = page->mapping;
VM_BUG_ON(PageSlab(page));
+#ifdef CONFIG_SWAP
if (unlikely(PageSwapCache(page)))
mapping = &swapper_space;
- else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
+ else
+#endif
+ if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
mapping = NULL;
return mapping;
}
@@ -649,12 +674,6 @@ static inline int page_mapped(struct page *page)
}
/*
- * Error return values for the *_nopage functions
- */
-#define NOPAGE_SIGBUS (NULL)
-#define NOPAGE_OOM ((struct page *) (-1))
-
-/*
* Error return values for the *_nopfn functions
*/
#define NOPFN_SIGBUS ((unsigned long) -1)
@@ -720,7 +739,9 @@ struct zap_details {
unsigned long truncate_count; /* Compare vm_truncate_count */
};
-struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t);
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
+
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *);
unsigned long unmap_vmas(struct mmu_gather **tlb,
@@ -1045,6 +1066,19 @@ extern void unlink_file_vma(struct vm_area_struct *);
extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
unsigned long addr, unsigned long len, pgoff_t pgoff);
extern void exit_mmap(struct mm_struct *);
+
+#ifdef CONFIG_PROC_FS
+/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
+extern void added_exe_file_vma(struct mm_struct *mm);
+extern void removed_exe_file_vma(struct mm_struct *mm);
+#else
+static inline void added_exe_file_vma(struct mm_struct *mm)
+{}
+
+static inline void removed_exe_file_vma(struct mm_struct *mm)
+{}
+#endif /* CONFIG_PROC_FS */
+
extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
extern int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
@@ -1149,6 +1183,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
+int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
struct page *follow_page(struct vm_area_struct *, unsigned long address,
unsigned int foll_flags);
@@ -1207,8 +1243,6 @@ int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
unsigned long lru_pages);
-void drop_pagecache(void);
-void drop_slab(void);
#ifndef CONFIG_MMU
#define randomize_va_space 0
@@ -1229,6 +1263,7 @@ void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
int vmemmap_populate_basepages(struct page *start_page,
unsigned long pages, int node);
int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
+void vmemmap_populate_print_last(void);
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */