summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2020-08-06 23:26:25 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-08-26 10:40:48 +0200
commit9a05b774af30ec11913c770a2df2522409fb7447 (patch)
treebc56464f3f2adf43366836aae012982693ce40b4 /mm
parent8043d5ee916885a0d6d8974a486882c6b66b3f82 (diff)
khugepaged: khugepaged_test_exit() check mmget_still_valid()
[ Upstream commit bbe98f9cadff58cdd6a4acaeba0efa8565dabe65 ] Move collapse_huge_page()'s mmget_still_valid() check into khugepaged_test_exit() itself. collapse_huge_page() is used for anon THP only, and earned its mmget_still_valid() check because it inserts a huge pmd entry in place of the page table's pmd entry; whereas collapse_file()'s retract_page_tables() or collapse_pte_mapped_thp() merely clears the page table's pmd entry. But core dumping without mmap lock must have been as open to mistaking a racily cleared pmd entry for a page table at physical page 0, as exit_mmap() was. And we certainly have no interest in mapping as a THP once dumping core. Fixes: 59ea6d06cfa9 ("coredump: fix race condition between collapse_huge_page() and core dumping") Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Song Liu <songliubraving@fb.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: <stable@vger.kernel.org> [4.8+] Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2008021217020.27773@eggly.anvils Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/khugepaged.c5
1 files changed, 1 insertions, 4 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 719f49d1fba2..76e3e90dbc16 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -401,7 +401,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
static inline int khugepaged_test_exit(struct mm_struct *mm)
{
- return atomic_read(&mm->mm_users) == 0;
+ return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
}
static bool hugepage_vma_check(struct vm_area_struct *vma,
@@ -1019,9 +1019,6 @@ static void collapse_huge_page(struct mm_struct *mm,
* handled by the anon_vma lock + PG_lock.
*/
down_write(&mm->mmap_sem);
- result = SCAN_ANY_PROCESS;
- if (!mmget_still_valid(mm))
- goto out;
result = hugepage_vma_revalidate(mm, address, &vma);
if (result)
goto out;