summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-04-14 19:11:40 +0300
committerPekka Enberg <penberg@cs.helsinki.fi>2008-04-27 18:28:18 +0300
commit65c3376aaca96c66aa76014aaf430398964b68cb (patch)
tree9cd8d64a2cf211e76afbad0de5ec1484af6bfafb /mm/slub.c
parent205ab99dd103e3dd5b0964dad8a16dfe2db69b2e (diff)
slub: Fallback to minimal order during slab page allocation
If any higher order allocation fails then fall back the smallest order necessary to contain at least one object. This enables fallback for all allocations to order 0 pages. The fallback will waste more memory (objects will not fit neatly) and the fallback slabs will be not as efficient as larger slabs since they contain less objects. Note that SLAB also depends on order 1 allocations for some slabs that waste too much memory if forced into PAGE_SIZE'd page. SLUB now can now deal with failing order 1 allocs which SLAB cannot do. Add a new field min that will contain the objects for the smallest possible order for a slab cache. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c39
1 files changed, 28 insertions, 11 deletions
diff --git a/mm/slub.c b/mm/slub.c
index c8514e93ffdf..35c22d940ba7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1113,28 +1113,43 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
/*
* Slab allocation and freeing
*/
+static inline struct page *alloc_slab_page(gfp_t flags, int node,
+ struct kmem_cache_order_objects oo)
+{
+ int order = oo_order(oo);
+
+ if (node == -1)
+ return alloc_pages(flags, order);
+ else
+ return alloc_pages_node(node, flags, order);
+}
+
static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
- int order = oo_order(oo);
- int pages = 1 << order;
flags |= s->allocflags;
- if (node == -1)
- page = alloc_pages(flags, order);
- else
- page = alloc_pages_node(node, flags, order);
-
- if (!page)
- return NULL;
+ page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
+ oo);
+ if (unlikely(!page)) {
+ oo = s->min;
+ /*
+ * Allocation may have failed due to fragmentation.
+ * Try a lower order alloc if possible
+ */
+ page = alloc_slab_page(flags, node, oo);
+ if (!page)
+ return NULL;
+ stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
+ }
page->objects = oo_objects(oo);
mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
- pages);
+ 1 << oo_order(oo));
return page;
}
@@ -2347,6 +2362,7 @@ static int calculate_sizes(struct kmem_cache *s)
* Determine the number of objects per slab
*/
s->oo = oo_make(order, size);
+ s->min = oo_make(get_order(size), size);
if (oo_objects(s->oo) > oo_objects(s->max))
s->max = s->oo;
@@ -4163,7 +4179,7 @@ STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
-
+STAT_ATTR(ORDER_FALLBACK, order_fallback);
#endif
static struct attribute *slab_attrs[] = {
@@ -4216,6 +4232,7 @@ static struct attribute *slab_attrs[] = {
&deactivate_to_head_attr.attr,
&deactivate_to_tail_attr.attr,
&deactivate_remote_frees_attr.attr,
+ &order_fallback_attr.attr,
#endif
NULL
};