From 6cb8f91320d3e720351c21741da795fed580b21b Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 17 Jul 2007 04:03:22 -0700 Subject: Slab allocators: consistent ZERO_SIZE_PTR support and NULL result semantics Define ZERO_OR_NULL_PTR macro to be able to remove the checks from the allocators. Move ZERO_SIZE_PTR related stuff into slab.h. Make ZERO_SIZE_PTR work for all slab allocators and get rid of the WARN_ON_ONCE(size == 0) that is still remaining in SLAB. Make slub return NULL like the other allocators if a too large memory segment is requested via __kmalloc. Signed-off-by: Christoph Lameter Acked-by: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slub.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 1b0a95d75dbb..548d78df81e1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2270,10 +2270,11 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) int index = kmalloc_index(size); if (!index) - return NULL; + return ZERO_SIZE_PTR; /* Allocation too large? */ - BUG_ON(index < 0); + if (index < 0) + return NULL; #ifdef CONFIG_ZONE_DMA if ((flags & SLUB_DMA)) { @@ -2314,9 +2315,10 @@ void *__kmalloc(size_t size, gfp_t flags) { struct kmem_cache *s = get_slab(size, flags); - if (s) - return slab_alloc(s, flags, -1, __builtin_return_address(0)); - return ZERO_SIZE_PTR; + if (ZERO_OR_NULL_PTR(s)) + return s; + + return slab_alloc(s, flags, -1, __builtin_return_address(0)); } EXPORT_SYMBOL(__kmalloc); @@ -2325,9 +2327,10 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) { struct kmem_cache *s = get_slab(size, flags); - if (s) - return slab_alloc(s, flags, node, __builtin_return_address(0)); - return ZERO_SIZE_PTR; + if (ZERO_OR_NULL_PTR(s)) + return s; + + return slab_alloc(s, flags, node, __builtin_return_address(0)); } EXPORT_SYMBOL(__kmalloc_node); #endif @@ -2378,7 +2381,7 @@ void kfree(const void *x) * this comparison would be true for all "negative" pointers * (which would cover the whole upper half of the address space). */ - if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR) + if (ZERO_OR_NULL_PTR(x)) return; page = virt_to_head_page(x); @@ -2687,8 +2690,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) { struct kmem_cache *s = get_slab(size, gfpflags); - if (!s) - return ZERO_SIZE_PTR; + if (ZERO_OR_NULL_PTR(s)) + return s; return slab_alloc(s, gfpflags, -1, caller); } @@ -2698,8 +2701,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, { struct kmem_cache *s = get_slab(size, gfpflags); - if (!s) - return ZERO_SIZE_PTR; + if (ZERO_OR_NULL_PTR(s)) + return s; return slab_alloc(s, gfpflags, node, caller); } -- cgit v1.2.3