summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 18:22:29 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 18:22:29 -0800
commitc00f08d705e149fbfaf7a252b4d4fbb7affdcc96 (patch)
tree8c916856376d0d400ddda239d5be386f9b9516d7 /include
parentc8b6de16d9434405e5832b8772e4f986ddd5118e (diff)
parent3adbefee6fd58a061b2bf1df4f3769701860fc62 (diff)
Merge branch 'slub-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm
* 'slub-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm: SLUB: fix checkpatch warnings Use non atomic unlock SLUB: Support for performance statistics SLUB: Alternate fast paths using cmpxchg_local SLUB: Use unique end pointer for each slab page. SLUB: Deal with annoying gcc warning on kfree()
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/slub_def.h23
2 files changed, 27 insertions, 1 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 34023c65d466..bfee0bd1d435 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -64,7 +64,10 @@ struct page {
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
spinlock_t ptl;
#endif
- struct kmem_cache *slab; /* SLUB: Pointer to slab */
+ struct {
+ struct kmem_cache *slab; /* SLUB: Pointer to slab */
+ void *end; /* SLUB: end marker */
+ };
struct page *first_page; /* Compound tail pages */
};
union {
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index ddb1a706b144..5e6d3d634d5b 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -11,12 +11,35 @@
#include <linux/workqueue.h>
#include <linux/kobject.h>
+enum stat_item {
+ ALLOC_FASTPATH, /* Allocation from cpu slab */
+ ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
+ FREE_FASTPATH, /* Free to cpu slub */
+ FREE_SLOWPATH, /* Freeing not to cpu slab */
+ FREE_FROZEN, /* Freeing to frozen slab */
+ FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
+ FREE_REMOVE_PARTIAL, /* Freeing removes last object */
+ ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */
+ ALLOC_SLAB, /* Cpu slab acquired from page allocator */
+ ALLOC_REFILL, /* Refill cpu slab from slab freelist */
+ FREE_SLAB, /* Slab freed to the page allocator */
+ CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
+ DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
+ DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
+ DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
+ DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
+ DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
+ NR_SLUB_STAT_ITEMS };
+
struct kmem_cache_cpu {
void **freelist; /* Pointer to first free per cpu object */
struct page *page; /* The slab from which we are allocating */
int node; /* The node of the page (or -1 for debug) */
unsigned int offset; /* Freepointer offset (in word units) */
unsigned int objsize; /* Size of an object (from kmem_cache) */
+#ifdef CONFIG_SLUB_STATS
+ unsigned stat[NR_SLUB_STAT_ITEMS];
+#endif
};
struct kmem_cache_node {