summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorVandana Salve <vsalve@nvidia.com>2014-06-10 15:10:04 +0530
committerRiham Haidar <rhaidar@nvidia.com>2014-06-11 17:09:55 -0700
commite032a2e8d2604e150cb610562c3c457b91050d56 (patch)
tree7999fa5a48802e055fc09893e814001725144fcb /mm
parent0e7370ef6597150986099c837b26ccb526f1ac4a (diff)
mm: get_user_pages: migrate out CMA pages when FOLL_DURABLE flag is set
When __get_user_pages() is called with FOLL_DURABLE flag, ensure that no page in CMA pageblocks gets locked. This workarounds the permanent migration failures caused by locking the pages by get_user_pages() call for a long period of time. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> bug 1517584 Change-Id: I11b7c87e78f1022d6fded85a1ed6bac73c5f0a7c Signed-off-by: Vandana Salve <vsalve@nvidia.com> Reviewed-on: http://git-master/r/421678 Reviewed-by: Krishna Reddy <vdumpa@nvidia.com> Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h12
-rw-r--r--mm/memory.c43
2 files changed, 55 insertions, 0 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 8562de0a5197..a290d04fb084 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -105,6 +105,18 @@ extern void prep_compound_page(struct page *page, unsigned long order);
extern bool is_free_buddy_page(struct page *page);
#endif
+#ifdef CONFIG_CMA
+static inline int is_cma_page(struct page *page)
+{
+ unsigned mt = get_pageblock_migratetype(page);
+ if (mt == MIGRATE_ISOLATE || mt == MIGRATE_CMA)
+ return true;
+ return false;
+}
+#else
+#define is_cma_page(page) 0
+#endif
+
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/*
diff --git a/mm/memory.c b/mm/memory.c
index 4d1e120536eb..8e101b74df71 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1651,6 +1651,45 @@ static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long add
}
/**
+ * replace_cma_page() - migrate page out of CMA page blocks
+ * @page: source page to be migrated
+ *
+ * Returns either the old page (if migration was not possible) or the pointer
+ * to the newly allocated page (with additional reference taken).
+ *
+ * get_user_pages() might take a reference to a page for a long period of time,
+ * what prevent such page from migration. This is fatal to the preffered usage
+ * pattern of CMA pageblocks. This function replaces the given user page with
+ * a new one allocated from NON-MOVABLE pageblock, so locking CMA page can be
+ * avoided.
+ */
+static inline struct page *migrate_replace_cma_page(struct page *page)
+{
+ struct page *newpage = alloc_page(GFP_HIGHUSER);
+
+ if (!newpage)
+ goto out;
+
+ /*
+ * Take additional reference to the new page to ensure it won't get
+ * freed after migration procedure end.
+ */
+ get_page_foll(newpage);
+
+ if (migrate_replace_page(page, newpage) == 0)
+ return newpage;
+
+ put_page(newpage);
+ __free_page(newpage);
+out:
+ /*
+ * Migration errors in case of get_user_pages() might not
+ * be fatal to CMA itself, so better don't fail here.
+ */
+ return page;
+}
+
+/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
* @mm: mm_struct of target mm
@@ -1885,6 +1924,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
}
if (IS_ERR(page))
return i ? i : PTR_ERR(page);
+
+ if ((gup_flags & FOLL_DURABLE) && is_cma_page(page))
+ page = migrate_replace_cma_page(page);
+
if (pages) {
pages[i] = page;