summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorHenry Burns <henryburns@google.com>2019-08-24 17:55:03 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-09-06 10:19:40 +0200
commit2929516c3f391d6396dd8b11146fbb94d174db6a (patch)
tree1ea67823b832c92f7b9648647101bd831710a4a9 /mm
parentba37a9401e76bbb47bcad1c3421e834874e1c1b4 (diff)
mm/zsmalloc.c: migration can leave pages in ZS_EMPTY indefinitely
commit 1a87aa03597efa9641e92875b883c94c7f872ccb upstream. In zs_page_migrate() we call putback_zspage() after we have finished migrating all pages in this zspage. However, the return value is ignored. If a zs_free() races in between zs_page_isolate() and zs_page_migrate(), freeing the last object in the zspage, putback_zspage() will leave the page in ZS_EMPTY for potentially an unbounded amount of time. To fix this, we need to do the same thing as zs_page_putback() does: schedule free_work to occur. To avoid duplicated code, move the sequence to a new putback_zspage_deferred() function which both zs_page_migrate() and zs_page_putback() call. Link: http://lkml.kernel.org/r/20190809181751.219326-1-henryburns@google.com Fixes: 48b4800a1c6a ("zsmalloc: page migration support") Signed-off-by: Henry Burns <henryburns@google.com> Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Henry Burns <henrywolfeburns@gmail.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Shakeel Butt <shakeelb@google.com> Cc: Jonathan Adams <jwadams@google.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/zsmalloc.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index cf15851a7d2f..f624cc2d91d9 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1939,6 +1939,18 @@ static void dec_zspage_isolation(struct zspage *zspage)
zspage->isolated--;
}
+static void putback_zspage_deferred(struct zs_pool *pool,
+ struct size_class *class,
+ struct zspage *zspage)
+{
+ enum fullness_group fg;
+
+ fg = putback_zspage(class, zspage);
+ if (fg == ZS_EMPTY)
+ schedule_work(&pool->free_work);
+
+}
+
static void replace_sub_page(struct size_class *class, struct zspage *zspage,
struct page *newpage, struct page *oldpage)
{
@@ -2097,7 +2109,7 @@ int zs_page_migrate(struct address_space *mapping, struct page *newpage,
* the list if @page is final isolated subpage in the zspage.
*/
if (!is_zspage_isolated(zspage))
- putback_zspage(class, zspage);
+ putback_zspage_deferred(pool, class, zspage);
reset_page(page);
put_page(page);
@@ -2144,14 +2156,13 @@ void zs_page_putback(struct page *page)
spin_lock(&class->lock);
dec_zspage_isolation(zspage);
if (!is_zspage_isolated(zspage)) {
- fg = putback_zspage(class, zspage);
/*
* Due to page_lock, we cannot free zspage immediately
* so let's defer.
*/
- if (fg == ZS_EMPTY)
- schedule_work(&pool->free_work);
+ putback_zspage_deferred(pool, class, zspage);
}
+
spin_unlock(&class->lock);
}