summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/proc/base.c35
-rw-r--r--fs/proc/task_mmu.c132
-rw-r--r--include/linux/mempolicy.h3
-rw-r--r--mm/mempolicy.c12
4 files changed, 176 insertions, 6 deletions
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 491f2d9f89ac..b796bf90a0b1 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -65,6 +65,7 @@ enum pid_directory_inos {
PROC_TGID_STAT,
PROC_TGID_STATM,
PROC_TGID_MAPS,
+ PROC_TGID_NUMA_MAPS,
PROC_TGID_MOUNTS,
PROC_TGID_WCHAN,
#ifdef CONFIG_SCHEDSTATS
@@ -102,6 +103,7 @@ enum pid_directory_inos {
PROC_TID_STAT,
PROC_TID_STATM,
PROC_TID_MAPS,
+ PROC_TID_NUMA_MAPS,
PROC_TID_MOUNTS,
PROC_TID_WCHAN,
#ifdef CONFIG_SCHEDSTATS
@@ -144,6 +146,9 @@ static struct pid_entry tgid_base_stuff[] = {
E(PROC_TGID_STAT, "stat", S_IFREG|S_IRUGO),
E(PROC_TGID_STATM, "statm", S_IFREG|S_IRUGO),
E(PROC_TGID_MAPS, "maps", S_IFREG|S_IRUGO),
+#ifdef CONFIG_NUMA
+ E(PROC_TGID_NUMA_MAPS, "numa_maps", S_IFREG|S_IRUGO),
+#endif
E(PROC_TGID_MEM, "mem", S_IFREG|S_IRUSR|S_IWUSR),
#ifdef CONFIG_SECCOMP
E(PROC_TGID_SECCOMP, "seccomp", S_IFREG|S_IRUSR|S_IWUSR),
@@ -180,6 +185,9 @@ static struct pid_entry tid_base_stuff[] = {
E(PROC_TID_STAT, "stat", S_IFREG|S_IRUGO),
E(PROC_TID_STATM, "statm", S_IFREG|S_IRUGO),
E(PROC_TID_MAPS, "maps", S_IFREG|S_IRUGO),
+#ifdef CONFIG_NUMA
+ E(PROC_TID_NUMA_MAPS, "numa_maps", S_IFREG|S_IRUGO),
+#endif
E(PROC_TID_MEM, "mem", S_IFREG|S_IRUSR|S_IWUSR),
#ifdef CONFIG_SECCOMP
E(PROC_TID_SECCOMP, "seccomp", S_IFREG|S_IRUSR|S_IWUSR),
@@ -515,6 +523,27 @@ static struct file_operations proc_maps_operations = {
.release = seq_release,
};
+#ifdef CONFIG_NUMA
+extern struct seq_operations proc_pid_numa_maps_op;
+static int numa_maps_open(struct inode *inode, struct file *file)
+{
+ struct task_struct *task = proc_task(inode);
+ int ret = seq_open(file, &proc_pid_numa_maps_op);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+ m->private = task;
+ }
+ return ret;
+}
+
+static struct file_operations proc_numa_maps_operations = {
+ .open = numa_maps_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+#endif
+
extern struct seq_operations mounts_op;
static int mounts_open(struct inode *inode, struct file *file)
{
@@ -1524,6 +1553,12 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
case PROC_TGID_MAPS:
inode->i_fop = &proc_maps_operations;
break;
+#ifdef CONFIG_NUMA
+ case PROC_TID_NUMA_MAPS:
+ case PROC_TGID_NUMA_MAPS:
+ inode->i_fop = &proc_numa_maps_operations;
+ break;
+#endif
case PROC_TID_MEM:
case PROC_TGID_MEM:
inode->i_op = &proc_mem_inode_operations;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 28b4a0253a92..64e84cadfa3c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -2,6 +2,8 @@
#include <linux/hugetlb.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
+#include <linux/pagemap.h>
+#include <linux/mempolicy.h>
#include <asm/elf.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -233,3 +235,133 @@ struct seq_operations proc_pid_maps_op = {
.stop = m_stop,
.show = show_map
};
+
+#ifdef CONFIG_NUMA
+
+struct numa_maps {
+ unsigned long pages;
+ unsigned long anon;
+ unsigned long mapped;
+ unsigned long mapcount_max;
+ unsigned long node[MAX_NUMNODES];
+};
+
+/*
+ * Calculate numa node maps for a vma
+ */
+static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma)
+{
+ struct page *page;
+ unsigned long vaddr;
+ struct mm_struct *mm = vma->vm_mm;
+ int i;
+ struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL);
+
+ if (!md)
+ return NULL;
+ md->pages = 0;
+ md->anon = 0;
+ md->mapped = 0;
+ md->mapcount_max = 0;
+ for_each_node(i)
+ md->node[i] =0;
+
+ spin_lock(&mm->page_table_lock);
+ for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
+ page = follow_page(mm, vaddr, 0);
+ if (page) {
+ int count = page_mapcount(page);
+
+ if (count)
+ md->mapped++;
+ if (count > md->mapcount_max)
+ md->mapcount_max = count;
+ md->pages++;
+ if (PageAnon(page))
+ md->anon++;
+ md->node[page_to_nid(page)]++;
+ }
+ }
+ spin_unlock(&mm->page_table_lock);
+ return md;
+}
+
+static int show_numa_map(struct seq_file *m, void *v)
+{
+ struct task_struct *task = m->private;
+ struct vm_area_struct *vma = v;
+ struct mempolicy *pol;
+ struct numa_maps *md;
+ struct zone **z;
+ int n;
+ int first;
+
+ if (!vma->vm_mm)
+ return 0;
+
+ md = get_numa_maps(vma);
+ if (!md)
+ return 0;
+
+ seq_printf(m, "%08lx", vma->vm_start);
+ pol = get_vma_policy(task, vma, vma->vm_start);
+ /* Print policy */
+ switch (pol->policy) {
+ case MPOL_PREFERRED:
+ seq_printf(m, " prefer=%d", pol->v.preferred_node);
+ break;
+ case MPOL_BIND:
+ seq_printf(m, " bind={");
+ first = 1;
+ for (z = pol->v.zonelist->zones; *z; z++) {
+
+ if (!first)
+ seq_putc(m, ',');
+ else
+ first = 0;
+ seq_printf(m, "%d/%s", (*z)->zone_pgdat->node_id,
+ (*z)->name);
+ }
+ seq_putc(m, '}');
+ break;
+ case MPOL_INTERLEAVE:
+ seq_printf(m, " interleave={");
+ first = 1;
+ for_each_node(n) {
+ if (test_bit(n, pol->v.nodes)) {
+ if (!first)
+ seq_putc(m,',');
+ else
+ first = 0;
+ seq_printf(m, "%d",n);
+ }
+ }
+ seq_putc(m, '}');
+ break;
+ default:
+ seq_printf(m," default");
+ break;
+ }
+ seq_printf(m, " MaxRef=%lu Pages=%lu Mapped=%lu",
+ md->mapcount_max, md->pages, md->mapped);
+ if (md->anon)
+ seq_printf(m," Anon=%lu",md->anon);
+
+ for_each_online_node(n) {
+ if (md->node[n])
+ seq_printf(m, " N%d=%lu", n, md->node[n]);
+ }
+ seq_putc(m, '\n');
+ kfree(md);
+ if (m->count < m->size) /* vma is copied successfully */
+ m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
+ return 0;
+}
+
+struct seq_operations proc_pid_numa_maps_op = {
+ .start = m_start,
+ .next = m_next,
+ .stop = m_stop,
+ .show = show_numa_map
+};
+#endif
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 8480aef10e62..94a46f38c532 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -150,6 +150,9 @@ void mpol_free_shared_policy(struct shared_policy *p);
struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
unsigned long idx);
+struct mempolicy *get_vma_policy(struct task_struct *task,
+ struct vm_area_struct *vma, unsigned long addr);
+
extern void numa_default_policy(void);
extern void numa_policy_init(void);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b4eababc8198..13492d66b7c8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -664,10 +664,10 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
#endif
/* Return effective policy for a VMA */
-static struct mempolicy *
-get_vma_policy(struct vm_area_struct *vma, unsigned long addr)
+struct mempolicy *
+get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned long addr)
{
- struct mempolicy *pol = current->mempolicy;
+ struct mempolicy *pol = task->mempolicy;
if (vma) {
if (vma->vm_ops && vma->vm_ops->get_policy)
@@ -786,7 +786,7 @@ static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned or
struct page *
alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned long addr)
{
- struct mempolicy *pol = get_vma_policy(vma, addr);
+ struct mempolicy *pol = get_vma_policy(current, vma, addr);
cpuset_update_current_mems_allowed();
@@ -908,7 +908,7 @@ void __mpol_free(struct mempolicy *p)
/* Find first node suitable for an allocation */
int mpol_first_node(struct vm_area_struct *vma, unsigned long addr)
{
- struct mempolicy *pol = get_vma_policy(vma, addr);
+ struct mempolicy *pol = get_vma_policy(current, vma, addr);
switch (pol->policy) {
case MPOL_DEFAULT:
@@ -928,7 +928,7 @@ int mpol_first_node(struct vm_area_struct *vma, unsigned long addr)
/* Find secondary valid nodes for an allocation */
int mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long addr)
{
- struct mempolicy *pol = get_vma_policy(vma, addr);
+ struct mempolicy *pol = get_vma_policy(current, vma, addr);
switch (pol->policy) {
case MPOL_PREFERRED: