summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Hansen <dave@linux.vnet.ibm.com>2008-06-12 15:21:48 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-06-12 18:05:41 -0700
commitbcf8039ed45f56013c4afea5520bca7d909e5e61 (patch)
tree2c3348eb300fdd910df9e012882bd3d2f263a390
parent2165009bdf63f79716a36ad545df14c3cdf958b7 (diff)
pagemap: fix large pages in pagemap
We were walking right into huge page areas in the pagemap walker, and calling the pmds pmd_bad() and clearing them. That leaked huge pages. Bad. This patch at least works around that for now. It ignores huge pages in the pagemap walker for the time being, and won't leak those pages. Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Acked-by: Matt Mackall <mpm@selenic.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/proc/task_mmu.c39
1 files changed, 30 insertions, 9 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f0df3109343d..ab8ccc9d14ff 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -553,24 +553,45 @@ static u64 swap_pte_to_pagemap_entry(pte_t pte)
return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
}
+static unsigned long pte_to_pagemap_entry(pte_t pte)
+{
+ unsigned long pme = 0;
+ if (is_swap_pte(pte))
+ pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
+ | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
+ else if (pte_present(pte))
+ pme = PM_PFRAME(pte_pfn(pte))
+ | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
+ return pme;
+}
+
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
+ struct vm_area_struct *vma;
struct pagemapread *pm = walk->private;
pte_t *pte;
int err = 0;
+ /* find the first VMA at or above 'addr' */
+ vma = find_vma(walk->mm, addr);
for (; addr != end; addr += PAGE_SIZE) {
u64 pfn = PM_NOT_PRESENT;
- pte = pte_offset_map(pmd, addr);
- if (is_swap_pte(*pte))
- pfn = PM_PFRAME(swap_pte_to_pagemap_entry(*pte))
- | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
- else if (pte_present(*pte))
- pfn = PM_PFRAME(pte_pfn(*pte))
- | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
- /* unmap so we're not in atomic when we copy to userspace */
- pte_unmap(pte);
+
+ /* check to see if we've left 'vma' behind
+ * and need a new, higher one */
+ if (vma && (addr >= vma->vm_end))
+ vma = find_vma(walk->mm, addr);
+
+ /* check that 'vma' actually covers this address,
+ * and that it isn't a huge page vma */
+ if (vma && (vma->vm_start <= addr) &&
+ !is_vm_hugetlb_page(vma)) {
+ pte = pte_offset_map(pmd, addr);
+ pfn = pte_to_pagemap_entry(*pte);
+ /* unmap before userspace copy */
+ pte_unmap(pte);
+ }
err = add_to_pagemap(addr, pfn, pm);
if (err)
return err;