summaryrefslogtreecommitdiff
path: root/include/linux/slub_def.h
diff options
context:
space:
mode:
authorJustin Waters <justin.waters@timesys.com>2008-02-26 13:07:02 -0500
committerJustin Waters <justin.waters@timesys.com>2008-02-26 13:07:02 -0500
commitb80a32b9cc634adfa8eaef33ec981e7febf2ade2 (patch)
treef256bce13ba11f514a388160df84e1410bedbe2b /include/linux/slub_def.h
parent594133ef22fae0d737bd1b57352cf3f48a192c63 (diff)
Update the i.MX31 Kernel to 2.6.232.6.23-mx31ads-2008022618072.6.23-mx31-200802261807
This is the result of a brute-force attempt to update the kernel to 2.6.23. Now that we have a git tree, our effort will be a little nicer in the future. Signed-off-by: Justin Waters <justin.waters@timesys.com>
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r--include/linux/slub_def.h43
1 files changed, 13 insertions, 30 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 6207a3d8da71..74962077f632 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -16,7 +16,9 @@ struct kmem_cache_node {
unsigned long nr_partial;
atomic_long_t nr_slabs;
struct list_head partial;
+#ifdef CONFIG_SLUB_DEBUG
struct list_head full;
+#endif
};
/*
@@ -44,7 +46,9 @@ struct kmem_cache {
int align; /* Alignment */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
+#ifdef CONFIG_SLUB_DEBUG
struct kobject kobj; /* For sysfs */
+#endif
#ifdef CONFIG_NUMA
int defrag_ratio;
@@ -74,7 +78,7 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
* Sorry that the following has to be that ugly but some versions of GCC
* have trouble with constant propagation and loops.
*/
-static inline int kmalloc_index(size_t size)
+static __always_inline int kmalloc_index(size_t size)
{
if (!size)
return 0;
@@ -129,7 +133,7 @@ static inline int kmalloc_index(size_t size)
* This ought to end up with a global pointer to the right cache
* in kmalloc_caches.
*/
-static inline struct kmem_cache *kmalloc_slab(size_t size)
+static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
{
int index = kmalloc_index(size);
@@ -156,22 +160,13 @@ static inline struct kmem_cache *kmalloc_slab(size_t size)
#define SLUB_DMA __GFP_DMA
#else
/* Disable DMA functionality */
-#define SLUB_DMA 0
+#define SLUB_DMA (__force gfp_t)0
#endif
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
+void *__kmalloc(size_t size, gfp_t flags);
-/*
- * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
- *
- * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
- *
- * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
- * Both make kfree a no-op.
- */
-#define ZERO_SIZE_PTR ((void *)16)
-
-
-static inline void *kmalloc(size_t size, gfp_t flags)
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
@@ -184,23 +179,11 @@ static inline void *kmalloc(size_t size, gfp_t flags)
return __kmalloc(size, flags);
}
-static inline void *kzalloc(size_t size, gfp_t flags)
-{
- if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
- struct kmem_cache *s = kmalloc_slab(size);
-
- if (!s)
- return ZERO_SIZE_PTR;
-
- return kmem_cache_zalloc(s, flags);
- } else
- return __kzalloc(size, flags);
-}
-
#ifdef CONFIG_NUMA
-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
+void *__kmalloc_node(size_t size, gfp_t flags, int node);
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);