summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-02-26 01:46:08 +0530
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-03-11 07:51:15 +0100
commit4057527a5e0428892f6bce5e0e78540e053bc136 (patch)
treeb4724db4de5ac113541da65d3b744f674d85e45c /include
parent4b2e97b1409adc49bc6db169f8066d01e79ac4fb (diff)
mm: make page ref count overflow check tighter and more explicit
commit f958d7b528b1b40c44cfda5eabe2d82760d868c3 upsteam. We have a VM_BUG_ON() to check that the page reference count doesn't underflow (or get close to overflow) by checking the sign of the count. That's all fine, but we actually want to allow people to use a "get page ref unless it's already very high" helper function, and we want that one to use the sign of the page ref (without triggering this VM_BUG_ON). Change the VM_BUG_ON to only check for small underflows (or _very_ close to overflowing), and ignore overflows which have strayed into negative territory. Acked-by: Matthew Wilcox <willy@infradead.org> Cc: Jann Horn <jannh@google.com> Cc: stable@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> [ 4.4.y backport notes: Ajay: Open-coded atomic refcount access due to missing page_ref_count() helper in 4.4.y Srivatsa: Added overflow check to get_page_foll() and related code. ] Signed-off-by: Srivatsa S. Bhat (VMware) <srivatsa@csail.mit.edu> Signed-off-by: Ajay Kaher <akaher@vmware.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 15f81b2b87ed..8e4eaaaa3b9f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -488,6 +488,10 @@ static inline void get_huge_page_tail(struct page *page)
extern bool __get_page_tail(struct page *page);
+/* 127: arbitrary random number, small enough to assemble well */
+#define page_ref_zero_or_close_to_overflow(page) \
+ ((unsigned int) atomic_read(&page->_count) + 127u <= 127u)
+
static inline void get_page(struct page *page)
{
if (unlikely(PageTail(page)))
@@ -497,7 +501,7 @@ static inline void get_page(struct page *page)
* Getting a normal page or the head of a compound page
* requires to already have an elevated page->_count.
*/
- VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
+ VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
atomic_inc(&page->_count);
}