From 607bf324ab3d780e1ec20b1b1a3bfaa3be58a957 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 12 Apr 2011 15:22:26 +0800 Subject: slub: Fix a typo in config name There's no config named SLAB_DEBUG, and it should be a typo of SLUB_DEBUG. Acked-by: Christoph Lameter Signed-off-by: Li Zefan Signed-off-by: Pekka Enberg --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index f881874843a5..129f10cdfc59 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3203,7 +3203,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) list_for_each_entry(p, &n->partial, lru) p->slab = s; -#ifdef CONFIG_SLAB_DEBUG +#ifdef CONFIG_SLUB_DEBUG list_for_each_entry(p, &n->full, lru) p->slab = s; #endif -- cgit v1.2.3 From 33de04ec4cb80b6bd0782e88a64954e60bc15dc1 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 15 Apr 2011 14:48:12 -0500 Subject: slub: Use NUMA_NO_NODE in get_partial A -1 was leftover during the conversion. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 129f10cdfc59..5284fb779670 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1487,7 +1487,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; page = get_partial_node(get_node(s, searchnode)); - if (page || node != -1) + if (page || node != NUMA_NO_NODE) return page; return get_any_partial(s, flags); -- cgit v1.2.3 From 5f80b13ae45df7da6646d1881da186318e70b6b6 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 15 Apr 2011 14:48:13 -0500 Subject: slub: get_map() function to establish map of free objects in a slab The bit map of free objects in a slab page is determined in various functions if debugging is enabled. Provide a common function for that purpose. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 5284fb779670..837f932671a1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -271,10 +271,6 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ __p += (__s)->size) -/* Scan freelist */ -#define for_each_free_object(__p, __s, __free) \ - for (__p = (__free); __p; __p = get_freepointer((__s), __p)) - /* Determine object index from a given position */ static inline int slab_index(void *p, struct kmem_cache *s, void *addr) { @@ -330,6 +326,21 @@ static inline int oo_objects(struct kmem_cache_order_objects x) return x.x & OO_MASK; } +/* + * Determine a map of object in use on a page. + * + * Slab lock or node listlock must be held to guarantee that the page does + * not vanish from under us. + */ +static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) +{ + void *p; + void *addr = page_address(page); + + for (p = page->freelist; p; p = get_freepointer(s, p)) + set_bit(slab_index(p, s, addr), map); +} + #ifdef CONFIG_SLUB_DEBUG /* * Debug settings: @@ -2673,9 +2684,8 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, return; slab_err(s, page, "%s", text); slab_lock(page); - for_each_free_object(p, s, page->freelist) - set_bit(slab_index(p, s, addr), map); + get_map(s, page, map); for_each_object(p, s, addr, page->objects) { if (!test_bit(slab_index(p, s, addr), map)) { @@ -3610,10 +3620,11 @@ static int validate_slab(struct kmem_cache *s, struct page *page, /* Now we know that a valid freelist exists */ bitmap_zero(map, page->objects); - for_each_free_object(p, s, page->freelist) { - set_bit(slab_index(p, s, addr), map); - if (!check_object(s, page, p, SLUB_RED_INACTIVE)) - return 0; + get_map(s, page, map); + for_each_object(p, s, addr, page->objects) { + if (test_bit(slab_index(p, s, addr), map)) + if (!check_object(s, page, p, SLUB_RED_INACTIVE)) + return 0; } for_each_object(p, s, addr, page->objects) @@ -3821,8 +3832,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s, void *p; bitmap_zero(map, page->objects); - for_each_free_object(p, s, page->freelist) - set_bit(slab_index(p, s, addr), map); + get_map(s, page, map); for_each_object(p, s, addr, page->objects) if (!test_bit(slab_index(p, s, addr), map)) -- cgit v1.2.3 From 01ad8a7bc226ddbbf90e4c15167d9e31a8d02930 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 15 Apr 2011 14:48:14 -0500 Subject: slub: Eliminate repeated use of c->page through a new page variable __slab_alloc is full of "c->page" repeats. Lets just use one local variable named "page" for this. Also avoids the need to a have another variable called "new". Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 41 ++++++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 19 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 837f932671a1..ab44368ed692 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1790,7 +1790,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, unsigned long addr, struct kmem_cache_cpu *c) { void **object; - struct page *new; + struct page *page; #ifdef CONFIG_CMPXCHG_LOCAL unsigned long flags; @@ -1808,28 +1808,30 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, /* We handle __GFP_ZERO in the caller */ gfpflags &= ~__GFP_ZERO; - if (!c->page) + page = c->page; + if (!page) goto new_slab; - slab_lock(c->page); + slab_lock(page); if (unlikely(!node_match(c, node))) goto another_slab; stat(s, ALLOC_REFILL); load_freelist: - object = c->page->freelist; + object = page->freelist; if (unlikely(!object)) goto another_slab; if (kmem_cache_debug(s)) goto debug; c->freelist = get_freepointer(s, object); - c->page->inuse = c->page->objects; - c->page->freelist = NULL; - c->node = page_to_nid(c->page); + page->inuse = page->objects; + page->freelist = NULL; + c->node = page_to_nid(page); + unlock_out: - slab_unlock(c->page); + slab_unlock(page); #ifdef CONFIG_CMPXCHG_LOCAL c->tid = next_tid(c->tid); local_irq_restore(flags); @@ -1841,9 +1843,9 @@ another_slab: deactivate_slab(s, c); new_slab: - new = get_partial(s, gfpflags, node); - if (new) { - c->page = new; + page = get_partial(s, gfpflags, node); + if (page) { + c->page = page; stat(s, ALLOC_FROM_PARTIAL); goto load_freelist; } @@ -1852,19 +1854,20 @@ new_slab: if (gfpflags & __GFP_WAIT) local_irq_enable(); - new = new_slab(s, gfpflags, node); + page = new_slab(s, gfpflags, node); if (gfpflags & __GFP_WAIT) local_irq_disable(); - if (new) { + if (page) { c = __this_cpu_ptr(s->cpu_slab); stat(s, ALLOC_SLAB); if (c->page) flush_slab(s, c); - slab_lock(new); - __SetPageSlubFrozen(new); - c->page = new; + + slab_lock(page); + __SetPageSlubFrozen(page); + c->page = page; goto load_freelist; } if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) @@ -1874,11 +1877,11 @@ new_slab: #endif return NULL; debug: - if (!alloc_debug_processing(s, c->page, object, addr)) + if (!alloc_debug_processing(s, page, object, addr)) goto another_slab; - c->page->inuse++; - c->page->freelist = get_freepointer(s, object); + page->inuse++; + page->freelist = get_freepointer(s, object); c->node = NUMA_NO_NODE; goto unlock_out; } -- cgit v1.2.3 From dc1fb7f43636754a4d06f7bdb8ea3269a7d71d6d Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 15 Apr 2011 14:48:15 -0500 Subject: slub: Move node determination out of hotpath If the node does not change then there is no need to recalculate the node from the page struct. So move the node determination into the places where we acquire a new slab page. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index ab44368ed692..301360bc00c6 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1828,7 +1828,6 @@ load_freelist: c->freelist = get_freepointer(s, object); page->inuse = page->objects; page->freelist = NULL; - c->node = page_to_nid(page); unlock_out: slab_unlock(page); @@ -1845,8 +1844,10 @@ another_slab: new_slab: page = get_partial(s, gfpflags, node); if (page) { - c->page = page; stat(s, ALLOC_FROM_PARTIAL); +load_from_page: + c->node = page_to_nid(page); + c->page = page; goto load_freelist; } @@ -1867,8 +1868,8 @@ new_slab: slab_lock(page); __SetPageSlubFrozen(page); - c->page = page; - goto load_freelist; + + goto load_from_page; } if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) slab_out_of_memory(s, gfpflags, node); -- cgit v1.2.3 From 8dc16c6c04b1a82d00a8464ccc08e1fe17d0ff82 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 15 Apr 2011 14:48:16 -0500 Subject: slub: Move debug handlign in __slab_free Its easier to read if its with the check for debugging flags. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 301360bc00c6..c952fac112e8 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2057,10 +2057,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page, slab_lock(page); stat(s, FREE_SLOWPATH); - if (kmem_cache_debug(s)) - goto debug; + if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr)) + goto out_unlock; -checks_ok: prior = page->freelist; set_freepointer(s, object, prior); page->freelist = object; @@ -2104,12 +2103,6 @@ slab_empty: #endif stat(s, FREE_SLAB); discard_slab(s, page); - return; - -debug: - if (!free_debug_processing(s, page, x, addr)) - goto out_unlock; - goto checks_ok; } /* -- cgit v1.2.3 From 1759415e630e5db0dd2390df9f94892cbfb9a8a2 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 5 May 2011 15:23:54 -0500 Subject: slub: Remove CONFIG_CMPXCHG_LOCAL ifdeffery Remove the #ifdefs. This means that the irqsafe_cpu_cmpxchg_double() is used everywhere. There may be performance implications since: A. We now have to manage a transaction ID for all arches B. The interrupt holdoff for arches not supporting CONFIG_CMPXCHG_LOCAL is reduced to a very short irqoff section. There are no multiple irqoff/irqon sequences as a result of this change. Even in the fallback case we only have to do one disable and enable like before. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 56 -------------------------------------------------------- 1 file changed, 56 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index c952fac112e8..461199f019d6 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1551,7 +1551,6 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) } } -#ifdef CONFIG_CMPXCHG_LOCAL #ifdef CONFIG_PREEMPT /* * Calculate the next globally unique transaction for disambiguiation @@ -1611,17 +1610,12 @@ static inline void note_cmpxchg_failure(const char *n, stat(s, CMPXCHG_DOUBLE_CPU_FAIL); } -#endif - void init_kmem_cache_cpus(struct kmem_cache *s) { -#ifdef CONFIG_CMPXCHG_LOCAL int cpu; for_each_possible_cpu(cpu) per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); -#endif - } /* * Remove the cpu slab @@ -1654,9 +1648,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) page->inuse--; } c->page = NULL; -#ifdef CONFIG_CMPXCHG_LOCAL c->tid = next_tid(c->tid); -#endif unfreeze_slab(s, page, tail); } @@ -1791,7 +1783,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, { void **object; struct page *page; -#ifdef CONFIG_CMPXCHG_LOCAL unsigned long flags; local_irq_save(flags); @@ -1802,7 +1793,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, * pointer. */ c = this_cpu_ptr(s->cpu_slab); -#endif #endif /* We handle __GFP_ZERO in the caller */ @@ -1831,10 +1821,8 @@ load_freelist: unlock_out: slab_unlock(page); -#ifdef CONFIG_CMPXCHG_LOCAL c->tid = next_tid(c->tid); local_irq_restore(flags); -#endif stat(s, ALLOC_SLOWPATH); return object; @@ -1873,9 +1861,7 @@ load_from_page: } if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) slab_out_of_memory(s, gfpflags, node); -#ifdef CONFIG_CMPXCHG_LOCAL local_irq_restore(flags); -#endif return NULL; debug: if (!alloc_debug_processing(s, page, object, addr)) @@ -1902,20 +1888,12 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, { void **object; struct kmem_cache_cpu *c; -#ifdef CONFIG_CMPXCHG_LOCAL unsigned long tid; -#else - unsigned long flags; -#endif if (slab_pre_alloc_hook(s, gfpflags)) return NULL; -#ifndef CONFIG_CMPXCHG_LOCAL - local_irq_save(flags); -#else redo: -#endif /* * Must read kmem_cache cpu data via this cpu ptr. Preemption is @@ -1925,7 +1903,6 @@ redo: */ c = __this_cpu_ptr(s->cpu_slab); -#ifdef CONFIG_CMPXCHG_LOCAL /* * The transaction ids are globally unique per cpu and per operation on * a per cpu queue. Thus they can be guarantee that the cmpxchg_double @@ -1934,7 +1911,6 @@ redo: */ tid = c->tid; barrier(); -#endif object = c->freelist; if (unlikely(!object || !node_match(c, node))) @@ -1942,7 +1918,6 @@ redo: object = __slab_alloc(s, gfpflags, node, addr, c); else { -#ifdef CONFIG_CMPXCHG_LOCAL /* * The cmpxchg will only match if there was no additonal * operation and if we are on the right processor. @@ -1963,16 +1938,9 @@ redo: note_cmpxchg_failure("slab_alloc", s, tid); goto redo; } -#else - c->freelist = get_freepointer(s, object); -#endif stat(s, ALLOC_FASTPATH); } -#ifndef CONFIG_CMPXCHG_LOCAL - local_irq_restore(flags); -#endif - if (unlikely(gfpflags & __GFP_ZERO) && object) memset(object, 0, s->objsize); @@ -2049,11 +2017,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page, { void *prior; void **object = (void *)x; -#ifdef CONFIG_CMPXCHG_LOCAL unsigned long flags; local_irq_save(flags); -#endif slab_lock(page); stat(s, FREE_SLOWPATH); @@ -2084,9 +2050,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, out_unlock: slab_unlock(page); -#ifdef CONFIG_CMPXCHG_LOCAL local_irq_restore(flags); -#endif return; slab_empty: @@ -2098,9 +2062,7 @@ slab_empty: stat(s, FREE_REMOVE_PARTIAL); } slab_unlock(page); -#ifdef CONFIG_CMPXCHG_LOCAL local_irq_restore(flags); -#endif stat(s, FREE_SLAB); discard_slab(s, page); } @@ -2121,20 +2083,11 @@ static __always_inline void slab_free(struct kmem_cache *s, { void **object = (void *)x; struct kmem_cache_cpu *c; -#ifdef CONFIG_CMPXCHG_LOCAL unsigned long tid; -#else - unsigned long flags; -#endif slab_free_hook(s, x); -#ifndef CONFIG_CMPXCHG_LOCAL - local_irq_save(flags); - -#else redo: -#endif /* * Determine the currently cpus per cpu slab. @@ -2144,15 +2097,12 @@ redo: */ c = __this_cpu_ptr(s->cpu_slab); -#ifdef CONFIG_CMPXCHG_LOCAL tid = c->tid; barrier(); -#endif if (likely(page == c->page && c->node != NUMA_NO_NODE)) { set_freepointer(s, object, c->freelist); -#ifdef CONFIG_CMPXCHG_LOCAL if (unlikely(!this_cpu_cmpxchg_double( s->cpu_slab->freelist, s->cpu_slab->tid, c->freelist, tid, @@ -2161,16 +2111,10 @@ redo: note_cmpxchg_failure("slab_free", s, tid); goto redo; } -#else - c->freelist = object; -#endif stat(s, FREE_FASTPATH); } else __slab_free(s, page, x, addr); -#ifndef CONFIG_CMPXCHG_LOCAL - local_irq_restore(flags); -#endif } void kmem_cache_free(struct kmem_cache *s, void *x) -- cgit v1.2.3 From 6332aa9d25e911cc97aa9cc09acee21afda07ea6 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 16 May 2011 15:26:06 -0500 Subject: slub: Avoid warning for !CONFIG_SLUB_DEBUG Move the #ifdef so that get_map is only defined if CONFIG_SLUB_DEBUG is defined. Reported-by: David Rientjes Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 461199f019d6..8657ab838b82 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -326,6 +326,7 @@ static inline int oo_objects(struct kmem_cache_order_objects x) return x.x & OO_MASK; } +#ifdef CONFIG_SLUB_DEBUG /* * Determine a map of object in use on a page. * @@ -341,7 +342,6 @@ static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) set_bit(slab_index(p, s, addr), map); } -#ifdef CONFIG_SLUB_DEBUG /* * Debug settings: */ -- cgit v1.2.3 From 1393d9a1857471f816d0be1ccc1d6433a86050f6 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 16 May 2011 15:26:08 -0500 Subject: slub: Make CONFIG_DEBUG_PAGE_ALLOC work with new fastpath Fastpath can do a speculative access to a page that CONFIG_DEBUG_PAGE_ALLOC may have marked as invalid to retrieve the pointer to the next free object. Use probe_kernel_read in that case in order not to cause a page fault. Cc: # 38.x Reported-by: Eric Dumazet Signed-off-by: Christoph Lameter Signed-off-by: Eric Dumazet Signed-off-by: Pekka Enberg --- mm/slub.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 8657ab838b82..97bb5b8d935f 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -261,6 +261,18 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object) return *(void **)(object + s->offset); } +static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) +{ + void *p; + +#ifdef CONFIG_DEBUG_PAGEALLOC + probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p)); +#else + p = get_freepointer(s, object); +#endif + return p; +} + static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) { *(void **)(object + s->offset) = fp; @@ -1933,7 +1945,7 @@ redo: if (unlikely(!this_cpu_cmpxchg_double( s->cpu_slab->freelist, s->cpu_slab->tid, object, tid, - get_freepointer(s, object), next_tid(tid)))) { + get_freepointer_safe(s, object), next_tid(tid)))) { note_cmpxchg_failure("slab_alloc", s, tid); goto redo; -- cgit v1.2.3 From bd07d87fd4b8a6af9820544b6bc6c37215d6f8ad Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Thu, 12 May 2011 13:10:49 -0700 Subject: slub: avoid label inside conditional Jumping to a label inside a conditional is considered poor style, especially considering the current organization of __slab_alloc(). This removes the 'load_from_page' label and just duplicates the three lines of code that it uses: c->node = page_to_nid(page); c->page = page; goto load_freelist; since it's probably not worth making this a separate helper function. Acked-by: Christoph Lameter Signed-off-by: David Rientjes Signed-off-by: Pekka Enberg --- mm/slub.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 97bb5b8d935f..9f7bb93e17ec 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1845,7 +1845,6 @@ new_slab: page = get_partial(s, gfpflags, node); if (page) { stat(s, ALLOC_FROM_PARTIAL); -load_from_page: c->node = page_to_nid(page); c->page = page; goto load_freelist; @@ -1868,8 +1867,9 @@ load_from_page: slab_lock(page); __SetPageSlubFrozen(page); - - goto load_from_page; + c->node = page_to_nid(page); + c->page = page; + goto load_freelist; } if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) slab_out_of_memory(s, gfpflags, node); -- cgit v1.2.3 From 442b06bcea23a01934d3da7ec5898fa154a6cafb Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 17 May 2011 16:29:31 -0500 Subject: slub: Remove node check in slab_free We can set the page pointing in the percpu structure to NULL to have the same effect as setting c->node to NUMA_NO_NODE. Gets rid of one check in slab_free() that was only used for forcing the slab_free to the slowpath for debugging. We still need to set c->node to NUMA_NO_NODE to force the slab_alloc() fastpath to the slowpath in case of debugging. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 9f7bb93e17ec..9ee6f515736d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1881,6 +1881,8 @@ debug: page->inuse++; page->freelist = get_freepointer(s, object); + deactivate_slab(s, c); + c->page = NULL; c->node = NUMA_NO_NODE; goto unlock_out; } @@ -2112,7 +2114,7 @@ redo: tid = c->tid; barrier(); - if (likely(page == c->page && c->node != NUMA_NO_NODE)) { + if (likely(page == c->page)) { set_freepointer(s, object, c->freelist); if (unlikely(!this_cpu_cmpxchg_double( -- cgit v1.2.3