summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-19 11:47:44 -0500
committerScott Sweeny <scott.sweeny@timesys.com>2011-01-19 11:47:44 -0500
commitec454f99ac7eb50692d8be7dcdebfe30bc11f684 (patch)
tree4cc03c452f6357a4a694165d28cdd5b44d9aefb2 /mm
parent9fe6206f400646a2322096b56c59891d530e8d51 (diff)
Linux 2.6.35.3
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c32
-rw-r--r--mm/mlock.c8
-rw-r--r--mm/swapfile.c6
3 files changed, 40 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c
index bde42c6d3633..307bf77fc441 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2760,6 +2760,26 @@ out_release:
}
/*
+ * This is like a special single-page "expand_downwards()",
+ * except we must first make sure that 'address-PAGE_SIZE'
+ * doesn't hit another vma.
+ *
+ * The "find_vma()" will do the right thing even if we wrap
+ */
+static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+{
+ address &= PAGE_MASK;
+ if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+ address -= PAGE_SIZE;
+ if (find_vma(vma->vm_mm, address) != vma)
+ return -ENOMEM;
+
+ expand_stack(vma, address);
+ }
+ return 0;
+}
+
+/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -2772,19 +2792,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
spinlock_t *ptl;
pte_t entry;
+ pte_unmap(page_table);
+
+ /* Check if we need to add a guard page to the stack */
+ if (check_stack_guard_page(vma, address) < 0)
+ return VM_FAULT_SIGBUS;
+
+ /* Use the zero-page for reads */
if (!(flags & FAULT_FLAG_WRITE)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
vma->vm_page_prot));
- ptl = pte_lockptr(mm, pmd);
- spin_lock(ptl);
+ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!pte_none(*page_table))
goto unlock;
goto setpte;
}
/* Allocate our own private page. */
- pte_unmap(page_table);
-
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
diff --git a/mm/mlock.c b/mm/mlock.c
index 3f82720e0515..49e5e4cb8232 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -167,6 +167,14 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
if (vma->vm_flags & VM_WRITE)
gup_flags |= FOLL_WRITE;
+ /* We don't try to access the guard page of a stack vma */
+ if (vma->vm_flags & VM_GROWSDOWN) {
+ if (start == vma->vm_start) {
+ start += PAGE_SIZE;
+ nr_pages--;
+ }
+ }
+
while (nr_pages > 0) {
int i;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 03aa2d55f1a2..f08d165871b3 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -318,8 +318,10 @@ checks:
if (offset > si->highest_bit)
scan_base = offset = si->lowest_bit;
- /* reuse swap entry of cache-only swap if not busy. */
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ /* reuse swap entry of cache-only swap if not hibernation. */
+ if (vm_swap_full()
+ && usage == SWAP_HAS_CACHE
+ && si->swap_map[offset] == SWAP_HAS_CACHE) {
int swap_was_freed;
spin_unlock(&swap_lock);
swap_was_freed = __try_to_reclaim_swap(si, offset);