summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2010-09-28 08:10:26 -0500
committerPekka Enberg <penberg@kernel.org>2010-10-02 10:44:10 +0300
commit7340cc84141d5236c5dd003359ee921513cd9b84 (patch)
treefec0ddb018a948773df32cf109c15f4bd596c97d /mm/slub.c
parented59ecbf8904a40cf0a1ee5d6f100d76d2f44e5f (diff)
slub: reduce differences between SMP and NUMA
Reduce the #ifdefs and simplify bootstrap by making SMP and NUMA as much alike as possible. This means that there will be an additional indirection to get to the kmem_cache_node field under SMP. Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c39
1 files changed, 1 insertions, 38 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 7e1fe663795a..064bda294af2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -233,11 +233,7 @@ int slab_is_available(void)
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
{
-#ifdef CONFIG_NUMA
return s->node[node];
-#else
- return &s->local_node;
-#endif
}
/* Verify that a pointer has an address that is valid within a slab page */
@@ -871,7 +867,7 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
* dilemma by deferring the increment of the count during
* bootstrap (see early_kmem_cache_node_alloc).
*/
- if (!NUMA_BUILD || n) {
+ if (n) {
atomic_long_inc(&n->nr_slabs);
atomic_long_add(objects, &n->total_objects);
}
@@ -2112,7 +2108,6 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
return s->cpu_slab != NULL;
}
-#ifdef CONFIG_NUMA
static struct kmem_cache *kmem_cache_node;
/*
@@ -2202,17 +2197,6 @@ static int init_kmem_cache_nodes(struct kmem_cache *s)
}
return 1;
}
-#else
-static void free_kmem_cache_nodes(struct kmem_cache *s)
-{
-}
-
-static int init_kmem_cache_nodes(struct kmem_cache *s)
-{
- init_kmem_cache_node(&s->local_node, s);
- return 1;
-}
-#endif
static void set_min_partial(struct kmem_cache *s, unsigned long min)
{
@@ -3023,8 +3007,6 @@ void __init kmem_cache_init(void)
int caches = 0;
struct kmem_cache *temp_kmem_cache;
int order;
-
-#ifdef CONFIG_NUMA
struct kmem_cache *temp_kmem_cache_node;
unsigned long kmalloc_size;
@@ -3048,12 +3030,6 @@ void __init kmem_cache_init(void)
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
-#else
- /* Allocate a single kmem_cache from the page allocator */
- kmem_size = sizeof(struct kmem_cache);
- order = get_order(kmem_size);
- kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
-#endif
/* Able to allocate the per node structures */
slab_state = PARTIAL;
@@ -3064,7 +3040,6 @@ void __init kmem_cache_init(void)
kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
memcpy(kmem_cache, temp_kmem_cache, kmem_size);
-#ifdef CONFIG_NUMA
/*
* Allocate kmem_cache_node properly from the kmem_cache slab.
* kmem_cache_node is separately allocated so no need to
@@ -3078,18 +3053,6 @@ void __init kmem_cache_init(void)
kmem_cache_bootstrap_fixup(kmem_cache_node);
caches++;
-#else
- /*
- * kmem_cache has kmem_cache_node embedded and we moved it!
- * Update the list heads
- */
- INIT_LIST_HEAD(&kmem_cache->local_node.partial);
- list_splice(&temp_kmem_cache->local_node.partial, &kmem_cache->local_node.partial);
-#ifdef CONFIG_SLUB_DEBUG
- INIT_LIST_HEAD(&kmem_cache->local_node.full);
- list_splice(&temp_kmem_cache->local_node.full, &kmem_cache->local_node.full);
-#endif
-#endif
kmem_cache_bootstrap_fixup(kmem_cache);
caches++;
/* Free temporary boot structure */