summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorClark Williams <williams@redhat.com>2012-01-03 10:47:33 -0600
committerClark Williams <williams@redhat.com>2012-01-03 10:47:33 -0600
commit63076d7f7f7f2650e5d0a4fa5dd2c412a255c237 (patch)
treea489c1de793736fb0b379713245467f4358215fe /mm
parent8645bee979600b0ba2ce7bfc847688abd1c2d477 (diff)
parent5f0a6e2d503896062f641639dacfe5055c2f593b (diff)
Merge commit 'v3.2-rc7' into rt-3.2-rc7-rt9
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c7
-rw-r--r--mm/memcontrol.c3
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/percpu.c6
-rw-r--r--mm/vmalloc.c2
5 files changed, 9 insertions, 11 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index ebcb4138cf6d..59ef26c94966 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1828,7 +1828,7 @@ repeat:
page = __page_cache_alloc(gfp | __GFP_COLD);
if (!page)
return ERR_PTR(-ENOMEM);
- err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
+ err = add_to_page_cache_lru(page, mapping, index, gfp);
if (unlikely(err)) {
page_cache_release(page);
if (err == -EEXIST)
@@ -1925,10 +1925,7 @@ static struct page *wait_on_page_read(struct page *page)
* @gfp: the page allocator flags to use if allocating
*
* This is the same as "read_mapping_page(mapping, index, NULL)", but with
- * any new page allocations done using the specified allocation flags. Note
- * that the Radix tree operations will still use GFP_KERNEL, so you can't
- * expect to do this atomically or anything like that - but you can pass in
- * other page requirements.
+ * any new page allocations done using the specified allocation flags.
*
* If the page does not get brought uptodate, return -EIO.
*/
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8e62d3e2b897..a0733783e8d0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4904,9 +4904,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
int cpu;
enable_swap_cgroup();
parent = NULL;
- root_mem_cgroup = memcg;
if (mem_cgroup_soft_limit_tree_init())
goto free_out;
+ root_mem_cgroup = memcg;
for_each_possible_cpu(cpu) {
struct memcg_stock_pcp *stock =
&per_cpu(memcg_stock, cpu);
@@ -4945,7 +4945,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
return &memcg->css;
free_out:
__mem_cgroup_free(memcg);
- root_mem_cgroup = NULL;
return ERR_PTR(error);
}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 76f2c5ae908e..069b64e521fc 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -176,7 +176,7 @@ static bool oom_unkillable_task(struct task_struct *p,
unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
const nodemask_t *nodemask, unsigned long totalpages)
{
- int points;
+ long points;
if (oom_unkillable_task(p, mem, nodemask))
return 0;
diff --git a/mm/percpu.c b/mm/percpu.c
index 3bb810a72006..716eb4acf2fc 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1023,9 +1023,11 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
if (!is_vmalloc_addr(addr))
return __pa(addr);
else
- return page_to_phys(vmalloc_to_page(addr));
+ return page_to_phys(vmalloc_to_page(addr)) +
+ offset_in_page(addr);
} else
- return page_to_phys(pcpu_addr_to_page(addr));
+ return page_to_phys(pcpu_addr_to_page(addr)) +
+ offset_in_page(addr);
}
/**
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index fb96b2c20e3b..071155a6f83b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1292,7 +1292,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
unsigned long align, unsigned long flags, unsigned long start,
unsigned long end, int node, gfp_t gfp_mask, void *caller)
{
- static struct vmap_area *va;
+ struct vmap_area *va;
struct vm_struct *area;
BUG_ON(in_interrupt());