summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig17
-rw-r--r--lib/Kconfig.debug37
-rw-r--r--lib/Makefile8
-rw-r--r--lib/bug.c5
-rw-r--r--lib/fault-inject.c4
-rw-r--r--lib/gen_crc32table.c4
-rw-r--r--lib/genalloc.c3
-rw-r--r--lib/hexdump.c6
-rw-r--r--lib/idr.c436
-rw-r--r--lib/iomap.c15
-rw-r--r--lib/kobject.c19
-rw-r--r--lib/kobject_uevent.c34
-rw-r--r--lib/percpu_counter.c68
-rw-r--r--lib/radix-tree.c3
-rw-r--r--lib/swiotlb.c5
-rw-r--r--lib/vsprintf.c208
16 files changed, 740 insertions, 132 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 2e7ae6b9215b..ba3d104994d9 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -41,6 +41,14 @@ config CRC32
kernel tree does. Such modules that use library CRC32 functions
require M here.
+config CRC7
+ tristate "CRC7 functions"
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require CRC7 functions, but a module built outside
+ the kernel tree does. Such modules that use library CRC7
+ functions require M here.
+
config LIBCRC32C
tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check"
help
@@ -64,6 +72,12 @@ config ZLIB_INFLATE
config ZLIB_DEFLATE
tristate
+config LZO_COMPRESS
+ tristate
+
+config LZO_DECOMPRESS
+ tristate
+
#
# Generic allocator support is selected if needed
#
@@ -124,4 +138,7 @@ config HAS_DMA
depends on !NO_DMA
default y
+config CHECK_SIGNATURE
+ bool
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index da95e10cfd70..cdc9b099e620 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -105,6 +105,15 @@ config DETECT_SOFTLOCKUP
can be detected via the NMI-watchdog, on platforms that
support it.)
+config SCHED_DEBUG
+ bool "Collect scheduler debugging info"
+ depends on DEBUG_KERNEL && PROC_FS
+ default y
+ help
+ If you say Y here, the /proc/sched_debug file will be provided
+ that can help debug the scheduler. The runtime overhead of this
+ option is minimal.
+
config SCHEDSTATS
bool "Collect scheduler statistics"
depends on DEBUG_KERNEL && PROC_FS
@@ -143,6 +152,19 @@ config DEBUG_SLAB_LEAK
bool "Memory leak debugging"
depends on DEBUG_SLAB
+config SLUB_DEBUG_ON
+ bool "SLUB debugging on by default"
+ depends on SLUB && SLUB_DEBUG
+ default n
+ help
+ Boot with debugging on by default. SLUB boots by default with
+ the runtime debug capabilities switched off. Enabling this is
+ equivalent to specifying the "slub_debug" parameter on boot.
+ There is no support for more fine grained debug control like
+ possible with slub_debug=xxx. SLUB debugging may be switched
+ off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying
+ "slub_debug=-".
+
config DEBUG_PREEMPT
bool "Debug preemptible kernel"
depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
@@ -261,6 +283,19 @@ config LOCKDEP
select KALLSYMS
select KALLSYMS_ALL
+config LOCK_STAT
+ bool "Lock usage statistics"
+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+ select LOCKDEP
+ select DEBUG_SPINLOCK
+ select DEBUG_MUTEXES
+ select DEBUG_LOCK_ALLOC
+ default n
+ help
+ This feature enables tracking lock contention points
+
+ For more details, see Documentation/lockstat.txt
+
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP
@@ -316,7 +351,7 @@ config DEBUG_HIGHMEM
config DEBUG_BUGVERBOSE
bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED
depends on BUG
- depends on ARM || ARM26 || AVR32 || M32R || M68K || SPARC32 || SPARC64 || FRV || SUPERH || GENERIC_BUG || BFIN
+ depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || FRV || SUPERH || GENERIC_BUG || BFIN
default !EMBEDDED
help
Say Y here to make BUG() panics output the file name and line number
diff --git a/lib/Makefile b/lib/Makefile
index c8c8e20784ce..4f3f3e256501 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -5,7 +5,7 @@
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o \
idr.o int_sqrt.o bitmap.o extable.o prio_tree.o \
- sha1.o irq_regs.o reciprocal_div.o
+ sha1.o irq_regs.o reciprocal_div.o argv_split.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
@@ -13,7 +13,7 @@ lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o kref.o kobject_uevent.o klist.o
obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
- bust_spinlocks.o hexdump.o
+ bust_spinlocks.o hexdump.o kasprintf.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -22,6 +22,7 @@ endif
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
+obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
@@ -43,12 +44,15 @@ obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
obj-$(CONFIG_CRC16) += crc16.o
obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
obj-$(CONFIG_CRC32) += crc32.o
+obj-$(CONFIG_CRC7) += crc7.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
+obj-$(CONFIG_LZO_COMPRESS) += lzo/
+obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
obj-$(CONFIG_TEXTSEARCH) += textsearch.o
obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
diff --git a/lib/bug.c b/lib/bug.c
index 014b582c5c4b..530f38f55787 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -38,6 +38,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/bug.h>
+#include <linux/sched.h>
extern const struct bug_entry __start___bug_table[], __stop___bug_table[];
@@ -112,7 +113,7 @@ const struct bug_entry *find_bug(unsigned long bugaddr)
return module_find_bug(bugaddr);
}
-enum bug_trap_type report_bug(unsigned long bugaddr)
+enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
{
const struct bug_entry *bug;
const char *file;
@@ -147,7 +148,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr)
"[verbose debug info unavailable]\n",
(void *)bugaddr);
- dump_stack();
+ show_regs(regs);
return BUG_TRAP_TYPE_WARN;
}
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index b18fc2ff9ffe..23985a278bbb 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -139,12 +139,14 @@ static void debugfs_ul_set(void *data, u64 val)
*(unsigned long *)data = val;
}
+#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
static void debugfs_ul_set_MAX_STACK_TRACE_DEPTH(void *data, u64 val)
{
*(unsigned long *)data =
val < MAX_STACK_TRACE_DEPTH ?
val : MAX_STACK_TRACE_DEPTH;
}
+#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
static u64 debugfs_ul_get(void *data)
{
@@ -159,6 +161,7 @@ static struct dentry *debugfs_create_ul(const char *name, mode_t mode,
return debugfs_create_file(name, mode, parent, value, &fops_ul);
}
+#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
DEFINE_SIMPLE_ATTRIBUTE(fops_ul_MAX_STACK_TRACE_DEPTH, debugfs_ul_get,
debugfs_ul_set_MAX_STACK_TRACE_DEPTH, "%llu\n");
@@ -169,6 +172,7 @@ static struct dentry *debugfs_create_ul_MAX_STACK_TRACE_DEPTH(
return debugfs_create_file(name, mode, parent, value,
&fops_ul_MAX_STACK_TRACE_DEPTH);
}
+#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
static void debugfs_atomic_t_set(void *data, u64 val)
{
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index bea5d97df991..0413a483cbbb 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -1,6 +1,10 @@
#include <stdio.h>
#include "crc32defs.h"
+#if defined(__CYGWIN__)
+#include <sys/types.h>
+#else
#include <inttypes.h>
+#endif
#define ENTRIES_PER_LINE 4
diff --git a/lib/genalloc.c b/lib/genalloc.c
index eb7c2bab9ebf..f6d276db2d58 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -54,11 +54,10 @@ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
int nbytes = sizeof(struct gen_pool_chunk) +
(nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
- chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
+ chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
if (unlikely(chunk == NULL))
return -1;
- memset(chunk, 0, nbytes);
spin_lock_init(&chunk->lock);
chunk->start_addr = addr;
chunk->end_addr = addr + size;
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 473f5aed6cae..bd5edaeaa80b 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -145,9 +145,9 @@ EXPORT_SYMBOL(hex_dump_to_buffer);
*/
void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize,
- void *buf, size_t len, bool ascii)
+ const void *buf, size_t len, bool ascii)
{
- u8 *ptr = buf;
+ const u8 *ptr = buf;
int i, linelen, remaining = len;
unsigned char linebuf[200];
@@ -189,7 +189,7 @@ EXPORT_SYMBOL(print_hex_dump);
* rowsize of 16, groupsize of 1, and ASCII output included.
*/
void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
- void *buf, size_t len)
+ const void *buf, size_t len)
{
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
buf, len, 1);
diff --git a/lib/idr.c b/lib/idr.c
index 305117ca2d41..d0f1acdbfa3a 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -70,6 +70,26 @@ static void free_layer(struct idr *idp, struct idr_layer *p)
spin_unlock_irqrestore(&idp->lock, flags);
}
+static void idr_mark_full(struct idr_layer **pa, int id)
+{
+ struct idr_layer *p = pa[0];
+ int l = 0;
+
+ __set_bit(id & IDR_MASK, &p->bitmap);
+ /*
+ * If this layer is full mark the bit in the layer above to
+ * show that this part of the radix tree is full. This may
+ * complete the layer above and require walking up the radix
+ * tree.
+ */
+ while (p->bitmap == IDR_FULL) {
+ if (!(p = pa[++l]))
+ break;
+ id = id >> IDR_BITS;
+ __set_bit((id & IDR_MASK), &p->bitmap);
+ }
+}
+
/**
* idr_pre_get - reserver resources for idr allocation
* @idp: idr handle
@@ -95,15 +115,15 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
}
EXPORT_SYMBOL(idr_pre_get);
-static int sub_alloc(struct idr *idp, void *ptr, int *starting_id)
+static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
{
int n, m, sh;
struct idr_layer *p, *new;
- struct idr_layer *pa[MAX_LEVEL];
- int l, id;
+ int l, id, oid;
long bm;
id = *starting_id;
+ restart:
p = idp->top;
l = idp->layers;
pa[l--] = NULL;
@@ -117,12 +137,23 @@ static int sub_alloc(struct idr *idp, void *ptr, int *starting_id)
if (m == IDR_SIZE) {
/* no space available go back to previous layer. */
l++;
+ oid = id;
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
+
+ /* if already at the top layer, we need to grow */
if (!(p = pa[l])) {
*starting_id = id;
return -2;
}
- continue;
+
+ /* If we need to go up one layer, continue the
+ * loop; otherwise, restart from the top.
+ */
+ sh = IDR_BITS * (l + 1);
+ if (oid >> sh == id >> sh)
+ continue;
+ else
+ goto restart;
}
if (m != n) {
sh = IDR_BITS*l;
@@ -144,30 +175,13 @@ static int sub_alloc(struct idr *idp, void *ptr, int *starting_id)
pa[l--] = p;
p = p->ary[m];
}
- /*
- * We have reached the leaf node, plant the
- * users pointer and return the raw id.
- */
- p->ary[m] = (struct idr_layer *)ptr;
- __set_bit(m, &p->bitmap);
- p->count++;
- /*
- * If this layer is full mark the bit in the layer above
- * to show that this part of the radix tree is full.
- * This may complete the layer above and require walking
- * up the radix tree.
- */
- n = id;
- while (p->bitmap == IDR_FULL) {
- if (!(p = pa[++l]))
- break;
- n = n >> IDR_BITS;
- __set_bit((n & IDR_MASK), &p->bitmap);
- }
- return(id);
+
+ pa[l] = p;
+ return id;
}
-static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
+static int idr_get_empty_slot(struct idr *idp, int starting_id,
+ struct idr_layer **pa)
{
struct idr_layer *p, *new;
int layers, v, id;
@@ -213,12 +227,31 @@ build_up:
}
idp->top = p;
idp->layers = layers;
- v = sub_alloc(idp, ptr, &id);
+ v = sub_alloc(idp, &id, pa);
if (v == -2)
goto build_up;
return(v);
}
+static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
+{
+ struct idr_layer *pa[MAX_LEVEL];
+ int id;
+
+ id = idr_get_empty_slot(idp, starting_id, pa);
+ if (id >= 0) {
+ /*
+ * Successfully found an empty slot. Install the user
+ * pointer and mark the slot full.
+ */
+ pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr;
+ pa[0]->count++;
+ idr_mark_full(pa, id);
+ }
+
+ return id;
+}
+
/**
* idr_get_new_above - allocate new idr entry above or equal to a start id
* @idp: idr handle
@@ -358,6 +391,53 @@ void idr_remove(struct idr *idp, int id)
EXPORT_SYMBOL(idr_remove);
/**
+ * idr_remove_all - remove all ids from the given idr tree
+ * @idp: idr handle
+ *
+ * idr_destroy() only frees up unused, cached idp_layers, but this
+ * function will remove all id mappings and leave all idp_layers
+ * unused.
+ *
+ * A typical clean-up sequence for objects stored in an idr tree, will
+ * use idr_for_each() to free all objects, if necessay, then
+ * idr_remove_all() to remove all ids, and idr_destroy() to free
+ * up the cached idr_layers.
+ */
+void idr_remove_all(struct idr *idp)
+{
+ int n, id, max;
+ struct idr_layer *p;
+ struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer **paa = &pa[0];
+
+ n = idp->layers * IDR_BITS;
+ p = idp->top;
+ max = 1 << n;
+
+ id = 0;
+ while (id < max) {
+ while (n > IDR_BITS && p) {
+ n -= IDR_BITS;
+ *paa++ = p;
+ p = p->ary[(id >> n) & IDR_MASK];
+ }
+
+ id += 1 << n;
+ while (n < fls(id)) {
+ if (p) {
+ memset(p, 0, sizeof *p);
+ free_layer(idp, p);
+ }
+ n += IDR_BITS;
+ p = *--paa;
+ }
+ }
+ idp->top = NULL;
+ idp->layers = 0;
+}
+EXPORT_SYMBOL(idr_remove_all);
+
+/**
* idr_destroy - release all cached layers within an idr tree
* idp: idr handle
*/
@@ -404,6 +484,61 @@ void *idr_find(struct idr *idp, int id)
EXPORT_SYMBOL(idr_find);
/**
+ * idr_for_each - iterate through all stored pointers
+ * @idp: idr handle
+ * @fn: function to be called for each pointer
+ * @data: data passed back to callback function
+ *
+ * Iterate over the pointers registered with the given idr. The
+ * callback function will be called for each pointer currently
+ * registered, passing the id, the pointer and the data pointer passed
+ * to this function. It is not safe to modify the idr tree while in
+ * the callback, so functions such as idr_get_new and idr_remove are
+ * not allowed.
+ *
+ * We check the return of @fn each time. If it returns anything other
+ * than 0, we break out and return that value.
+ *
+ * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
+ */
+int idr_for_each(struct idr *idp,
+ int (*fn)(int id, void *p, void *data), void *data)
+{
+ int n, id, max, error = 0;
+ struct idr_layer *p;
+ struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer **paa = &pa[0];
+
+ n = idp->layers * IDR_BITS;
+ p = idp->top;
+ max = 1 << n;
+
+ id = 0;
+ while (id < max) {
+ while (n > 0 && p) {
+ n -= IDR_BITS;
+ *paa++ = p;
+ p = p->ary[(id >> n) & IDR_MASK];
+ }
+
+ if (p) {
+ error = fn(id, (void *)p, data);
+ if (error)
+ break;
+ }
+
+ id += 1 << n;
+ while (n < fls(id)) {
+ n += IDR_BITS;
+ p = *--paa;
+ }
+ }
+
+ return error;
+}
+EXPORT_SYMBOL(idr_for_each);
+
+/**
* idr_replace - replace pointer for given id
* @idp: idr handle
* @ptr: pointer you want associated with the id
@@ -455,7 +590,7 @@ static int init_id_cache(void)
{
if (!idr_layer_cache)
idr_layer_cache = kmem_cache_create("idr_layer_cache",
- sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL);
+ sizeof(struct idr_layer), 0, 0, idr_cache_ctor);
return 0;
}
@@ -473,3 +608,248 @@ void idr_init(struct idr *idp)
spin_lock_init(&idp->lock);
}
EXPORT_SYMBOL(idr_init);
+
+
+/*
+ * IDA - IDR based ID allocator
+ *
+ * this is id allocator without id -> pointer translation. Memory
+ * usage is much lower than full blown idr because each id only
+ * occupies a bit. ida uses a custom leaf node which contains
+ * IDA_BITMAP_BITS slots.
+ *
+ * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
+ */
+
+static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
+{
+ unsigned long flags;
+
+ if (!ida->free_bitmap) {
+ spin_lock_irqsave(&ida->idr.lock, flags);
+ if (!ida->free_bitmap) {
+ ida->free_bitmap = bitmap;
+ bitmap = NULL;
+ }
+ spin_unlock_irqrestore(&ida->idr.lock, flags);
+ }
+
+ kfree(bitmap);
+}
+
+/**
+ * ida_pre_get - reserve resources for ida allocation
+ * @ida: ida handle
+ * @gfp_mask: memory allocation flag
+ *
+ * This function should be called prior to locking and calling the
+ * following function. It preallocates enough memory to satisfy the
+ * worst possible allocation.
+ *
+ * If the system is REALLY out of memory this function returns 0,
+ * otherwise 1.
+ */
+int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
+{
+ /* allocate idr_layers */
+ if (!idr_pre_get(&ida->idr, gfp_mask))
+ return 0;
+
+ /* allocate free_bitmap */
+ if (!ida->free_bitmap) {
+ struct ida_bitmap *bitmap;
+
+ bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
+ if (!bitmap)
+ return 0;
+
+ free_bitmap(ida, bitmap);
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(ida_pre_get);
+
+/**
+ * ida_get_new_above - allocate new ID above or equal to a start id
+ * @ida: ida handle
+ * @staring_id: id to start search at
+ * @p_id: pointer to the allocated handle
+ *
+ * Allocate new ID above or equal to @ida. It should be called with
+ * any required locks.
+ *
+ * If memory is required, it will return -EAGAIN, you should unlock
+ * and go back to the ida_pre_get() call. If the ida is full, it will
+ * return -ENOSPC.
+ *
+ * @p_id returns a value in the range 0 ... 0x7fffffff.
+ */
+int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
+{
+ struct idr_layer *pa[MAX_LEVEL];
+ struct ida_bitmap *bitmap;
+ unsigned long flags;
+ int idr_id = starting_id / IDA_BITMAP_BITS;
+ int offset = starting_id % IDA_BITMAP_BITS;
+ int t, id;
+
+ restart:
+ /* get vacant slot */
+ t = idr_get_empty_slot(&ida->idr, idr_id, pa);
+ if (t < 0) {
+ if (t == -1)
+ return -EAGAIN;
+ else /* will be -3 */
+ return -ENOSPC;
+ }
+
+ if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
+ return -ENOSPC;
+
+ if (t != idr_id)
+ offset = 0;
+ idr_id = t;
+
+ /* if bitmap isn't there, create a new one */
+ bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
+ if (!bitmap) {
+ spin_lock_irqsave(&ida->idr.lock, flags);
+ bitmap = ida->free_bitmap;
+ ida->free_bitmap = NULL;
+ spin_unlock_irqrestore(&ida->idr.lock, flags);
+
+ if (!bitmap)
+ return -EAGAIN;
+
+ memset(bitmap, 0, sizeof(struct ida_bitmap));
+ pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap;
+ pa[0]->count++;
+ }
+
+ /* lookup for empty slot */
+ t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
+ if (t == IDA_BITMAP_BITS) {
+ /* no empty slot after offset, continue to the next chunk */
+ idr_id++;
+ offset = 0;
+ goto restart;
+ }
+
+ id = idr_id * IDA_BITMAP_BITS + t;
+ if (id >= MAX_ID_BIT)
+ return -ENOSPC;
+
+ __set_bit(t, bitmap->bitmap);
+ if (++bitmap->nr_busy == IDA_BITMAP_BITS)
+ idr_mark_full(pa, idr_id);
+
+ *p_id = id;
+
+ /* Each leaf node can handle nearly a thousand slots and the
+ * whole idea of ida is to have small memory foot print.
+ * Throw away extra resources one by one after each successful
+ * allocation.
+ */
+ if (ida->idr.id_free_cnt || ida->free_bitmap) {
+ struct idr_layer *p = alloc_layer(&ida->idr);
+ if (p)
+ kmem_cache_free(idr_layer_cache, p);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ida_get_new_above);
+
+/**
+ * ida_get_new - allocate new ID
+ * @ida: idr handle
+ * @p_id: pointer to the allocated handle
+ *
+ * Allocate new ID. It should be called with any required locks.
+ *
+ * If memory is required, it will return -EAGAIN, you should unlock
+ * and go back to the idr_pre_get() call. If the idr is full, it will
+ * return -ENOSPC.
+ *
+ * @id returns a value in the range 0 ... 0x7fffffff.
+ */
+int ida_get_new(struct ida *ida, int *p_id)
+{
+ return ida_get_new_above(ida, 0, p_id);
+}
+EXPORT_SYMBOL(ida_get_new);
+
+/**
+ * ida_remove - remove the given ID
+ * @ida: ida handle
+ * @id: ID to free
+ */
+void ida_remove(struct ida *ida, int id)
+{
+ struct idr_layer *p = ida->idr.top;
+ int shift = (ida->idr.layers - 1) * IDR_BITS;
+ int idr_id = id / IDA_BITMAP_BITS;
+ int offset = id % IDA_BITMAP_BITS;
+ int n;
+ struct ida_bitmap *bitmap;
+
+ /* clear full bits while looking up the leaf idr_layer */
+ while ((shift > 0) && p) {
+ n = (idr_id >> shift) & IDR_MASK;
+ __clear_bit(n, &p->bitmap);
+ p = p->ary[n];
+ shift -= IDR_BITS;
+ }
+
+ if (p == NULL)
+ goto err;
+
+ n = idr_id & IDR_MASK;
+ __clear_bit(n, &p->bitmap);
+
+ bitmap = (void *)p->ary[n];
+ if (!test_bit(offset, bitmap->bitmap))
+ goto err;
+
+ /* update bitmap and remove it if empty */
+ __clear_bit(offset, bitmap->bitmap);
+ if (--bitmap->nr_busy == 0) {
+ __set_bit(n, &p->bitmap); /* to please idr_remove() */
+ idr_remove(&ida->idr, idr_id);
+ free_bitmap(ida, bitmap);
+ }
+
+ return;
+
+ err:
+ printk(KERN_WARNING
+ "ida_remove called for id=%d which is not allocated.\n", id);
+}
+EXPORT_SYMBOL(ida_remove);
+
+/**
+ * ida_destroy - release all cached layers within an ida tree
+ * ida: ida handle
+ */
+void ida_destroy(struct ida *ida)
+{
+ idr_destroy(&ida->idr);
+ kfree(ida->free_bitmap);
+}
+EXPORT_SYMBOL(ida_destroy);
+
+/**
+ * ida_init - initialize ida handle
+ * @ida: ida handle
+ *
+ * This function is use to set up the handle (@ida) that you will pass
+ * to the rest of the functions.
+ */
+void ida_init(struct ida *ida)
+{
+ memset(ida, 0, sizeof(struct ida));
+ idr_init(&ida->idr);
+
+}
+EXPORT_SYMBOL(ida_init);
diff --git a/lib/iomap.c b/lib/iomap.c
index a57d262a5ed9..864f2ec1966e 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -240,7 +240,20 @@ void ioport_unmap(void __iomem *addr)
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
-/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+/**
+ * pci_iomap - create a virtual mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @maxlen: length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR without checking for its length first, pass %0 here.
+ * */
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
unsigned long start = pci_resource_start(dev, bar);
diff --git a/lib/kobject.c b/lib/kobject.c
index ac1520651b9b..4b08e0ff95c8 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -44,7 +44,7 @@ static int populate_dir(struct kobject * kobj)
return error;
}
-static int create_dir(struct kobject * kobj, struct dentry *shadow_parent)
+static int create_dir(struct kobject *kobj, struct sysfs_dirent *shadow_parent)
{
int error = 0;
if (kobject_name(kobj)) {
@@ -162,7 +162,7 @@ static void unlink(struct kobject * kobj)
* @shadow_parent: sysfs directory to add to.
*/
-int kobject_shadow_add(struct kobject * kobj, struct dentry *shadow_parent)
+int kobject_shadow_add(struct kobject *kobj, struct sysfs_dirent *shadow_parent)
{
int error = 0;
struct kobject * parent;
@@ -338,7 +338,7 @@ int kobject_rename(struct kobject * kobj, const char *new_name)
/* Note : if we want to send the new name alone, not the full path,
* we could probably use kobject_name(kobj); */
- error = sysfs_rename_dir(kobj, kobj->parent->dentry, new_name);
+ error = sysfs_rename_dir(kobj, kobj->parent->sd, new_name);
/* This function is mostly/only used for network interface.
* Some hotplug package track interfaces by their name and
@@ -361,8 +361,8 @@ out:
* @new_name: object's new name
*/
-int kobject_shadow_rename(struct kobject * kobj, struct dentry *new_parent,
- const char *new_name)
+int kobject_shadow_rename(struct kobject *kobj,
+ struct sysfs_dirent *new_parent, const char *new_name)
{
int error = 0;
@@ -597,10 +597,17 @@ int kset_add(struct kset * k)
int kset_register(struct kset * k)
{
+ int err;
+
if (!k)
return -EINVAL;
+
kset_init(k);
- return kset_add(k);
+ err = kset_add(k);
+ if (err)
+ return err;
+ kobject_uevent(&k->kobj, KOBJ_ADD);
+ return 0;
}
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 12e311dc664c..df02814699d7 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -25,6 +25,16 @@
#define BUFFER_SIZE 2048 /* buffer for the variables */
#define NUM_ENVP 32 /* number of env pointers */
+/* the strings here must match the enum in include/linux/kobject.h */
+const char *kobject_actions[] = {
+ "add",
+ "remove",
+ "change",
+ "move",
+ "online",
+ "offline",
+};
+
#if defined(CONFIG_HOTPLUG)
u64 uevent_seqnum;
char uevent_helper[UEVENT_HELPER_PATH_LEN] = "/sbin/hotplug";
@@ -33,26 +43,6 @@ static DEFINE_SPINLOCK(sequence_lock);
static struct sock *uevent_sock;
#endif
-static char *action_to_string(enum kobject_action action)
-{
- switch (action) {
- case KOBJ_ADD:
- return "add";
- case KOBJ_REMOVE:
- return "remove";
- case KOBJ_CHANGE:
- return "change";
- case KOBJ_OFFLINE:
- return "offline";
- case KOBJ_ONLINE:
- return "online";
- case KOBJ_MOVE:
- return "move";
- default:
- return NULL;
- }
-}
-
/**
* kobject_uevent_env - send an uevent with environmental data
*
@@ -83,7 +73,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
pr_debug("%s\n", __FUNCTION__);
- action_string = action_to_string(action);
+ action_string = kobject_actions[action];
if (!action_string) {
pr_debug("kobject attempted to send uevent without action_string!\n");
return -EINVAL;
@@ -208,7 +198,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
argv [0] = uevent_helper;
argv [1] = (char *)subsystem;
argv [2] = NULL;
- call_usermodehelper (argv[0], argv, envp, 0);
+ call_usermodehelper (argv[0], argv, envp, UMH_WAIT_EXEC);
}
exit:
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 850449080e1c..cf22c617baa4 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -3,8 +3,17 @@
*/
#include <linux/percpu_counter.h>
+#include <linux/notifier.h>
+#include <linux/mutex.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
#include <linux/module.h>
+#ifdef CONFIG_HOTPLUG_CPU
+static LIST_HEAD(percpu_counters);
+static DEFINE_MUTEX(percpu_counters_lock);
+#endif
+
void percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
{
long count;
@@ -36,7 +45,7 @@ s64 percpu_counter_sum(struct percpu_counter *fbc)
spin_lock(&fbc->lock);
ret = fbc->count;
- for_each_possible_cpu(cpu) {
+ for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
@@ -44,3 +53,60 @@ s64 percpu_counter_sum(struct percpu_counter *fbc)
return ret < 0 ? 0 : ret;
}
EXPORT_SYMBOL(percpu_counter_sum);
+
+void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
+{
+ spin_lock_init(&fbc->lock);
+ fbc->count = amount;
+ fbc->counters = alloc_percpu(s32);
+#ifdef CONFIG_HOTPLUG_CPU
+ mutex_lock(&percpu_counters_lock);
+ list_add(&fbc->list, &percpu_counters);
+ mutex_unlock(&percpu_counters_lock);
+#endif
+}
+EXPORT_SYMBOL(percpu_counter_init);
+
+void percpu_counter_destroy(struct percpu_counter *fbc)
+{
+ free_percpu(fbc->counters);
+#ifdef CONFIG_HOTPLUG_CPU
+ mutex_lock(&percpu_counters_lock);
+ list_del(&fbc->list);
+ mutex_unlock(&percpu_counters_lock);
+#endif
+}
+EXPORT_SYMBOL(percpu_counter_destroy);
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu;
+ struct percpu_counter *fbc;
+
+ if (action != CPU_DEAD)
+ return NOTIFY_OK;
+
+ cpu = (unsigned long)hcpu;
+ mutex_lock(&percpu_counters_lock);
+ list_for_each_entry(fbc, &percpu_counters, list) {
+ s32 *pcount;
+
+ spin_lock(&fbc->lock);
+ pcount = per_cpu_ptr(fbc->counters, cpu);
+ fbc->count += *pcount;
+ *pcount = 0;
+ spin_unlock(&fbc->lock);
+ }
+ mutex_unlock(&percpu_counters_lock);
+ return NOTIFY_OK;
+}
+
+static int __init percpu_counter_startup(void)
+{
+ hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
+ return 0;
+}
+module_init(percpu_counter_startup);
+#endif
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 402eb4eb6b23..514efb200be6 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -151,6 +151,7 @@ int radix_tree_preload(gfp_t gfp_mask)
out:
return ret;
}
+EXPORT_SYMBOL(radix_tree_preload);
static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
int offset)
@@ -1020,7 +1021,7 @@ void __init radix_tree_init(void)
{
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0,
- SLAB_PANIC, radix_tree_node_ctor, NULL);
+ SLAB_PANIC, radix_tree_node_ctor);
radix_tree_init_maxindex();
hotcpu_notifier(radix_tree_callback, 0);
}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 10c13ad0d82d..a7381d55663a 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -357,7 +357,8 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
* This is needed when we sync the memory. Then we sync the buffer if
* needed.
*/
- io_tlb_orig_addr[index] = buffer;
+ for (i = 0; i < nslots; i++)
+ io_tlb_orig_addr[index+i] = buffer + (i << IO_TLB_SHIFT);
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
memcpy(dma_addr, buffer, size);
@@ -418,6 +419,8 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
char *buffer = io_tlb_orig_addr[index];
+ buffer += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
+
switch (target) {
case SYNC_FOR_CPU:
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 017290241261..7b481cea54ae 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -135,6 +135,103 @@ static int skip_atoi(const char **s)
return i;
}
+/* Decimal conversion is by far the most typical, and is used
+ * for /proc and /sys data. This directly impacts e.g. top performance
+ * with many processes running. We optimize it for speed
+ * using code from
+ * http://www.cs.uiowa.edu/~jones/bcd/decimal.html
+ * (with permission from the author, Douglas W. Jones). */
+
+/* Formats correctly any integer in [0,99999].
+ * Outputs from one to five digits depending on input.
+ * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */
+static char* put_dec_trunc(char *buf, unsigned q)
+{
+ unsigned d3, d2, d1, d0;
+ d1 = (q>>4) & 0xf;
+ d2 = (q>>8) & 0xf;
+ d3 = (q>>12);
+
+ d0 = 6*(d3 + d2 + d1) + (q & 0xf);
+ q = (d0 * 0xcd) >> 11;
+ d0 = d0 - 10*q;
+ *buf++ = d0 + '0'; /* least significant digit */
+ d1 = q + 9*d3 + 5*d2 + d1;
+ if (d1 != 0) {
+ q = (d1 * 0xcd) >> 11;
+ d1 = d1 - 10*q;
+ *buf++ = d1 + '0'; /* next digit */
+
+ d2 = q + 2*d2;
+ if ((d2 != 0) || (d3 != 0)) {
+ q = (d2 * 0xd) >> 7;
+ d2 = d2 - 10*q;
+ *buf++ = d2 + '0'; /* next digit */
+
+ d3 = q + 4*d3;
+ if (d3 != 0) {
+ q = (d3 * 0xcd) >> 11;
+ d3 = d3 - 10*q;
+ *buf++ = d3 + '0'; /* next digit */
+ if (q != 0)
+ *buf++ = q + '0'; /* most sign. digit */
+ }
+ }
+ }
+ return buf;
+}
+/* Same with if's removed. Always emits five digits */
+static char* put_dec_full(char *buf, unsigned q)
+{
+ /* BTW, if q is in [0,9999], 8-bit ints will be enough, */
+ /* but anyway, gcc produces better code with full-sized ints */
+ unsigned d3, d2, d1, d0;
+ d1 = (q>>4) & 0xf;
+ d2 = (q>>8) & 0xf;
+ d3 = (q>>12);
+
+ /* Possible ways to approx. divide by 10 */
+ /* gcc -O2 replaces multiply with shifts and adds */
+ // (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386)
+ // (x * 0x67) >> 10: 1100111
+ // (x * 0x34) >> 9: 110100 - same
+ // (x * 0x1a) >> 8: 11010 - same
+ // (x * 0x0d) >> 7: 1101 - same, shortest code (on i386)
+
+ d0 = 6*(d3 + d2 + d1) + (q & 0xf);
+ q = (d0 * 0xcd) >> 11;
+ d0 = d0 - 10*q;
+ *buf++ = d0 + '0';
+ d1 = q + 9*d3 + 5*d2 + d1;
+ q = (d1 * 0xcd) >> 11;
+ d1 = d1 - 10*q;
+ *buf++ = d1 + '0';
+
+ d2 = q + 2*d2;
+ q = (d2 * 0xd) >> 7;
+ d2 = d2 - 10*q;
+ *buf++ = d2 + '0';
+
+ d3 = q + 4*d3;
+ q = (d3 * 0xcd) >> 11; /* - shorter code */
+ /* q = (d3 * 0x67) >> 10; - would also work */
+ d3 = d3 - 10*q;
+ *buf++ = d3 + '0';
+ *buf++ = q + '0';
+ return buf;
+}
+/* No inlining helps gcc to use registers better */
+static noinline char* put_dec(char *buf, unsigned long long num)
+{
+ while (1) {
+ unsigned rem;
+ if (num < 100000)
+ return put_dec_trunc(buf, num);
+ rem = do_div(num, 100000);
+ buf = put_dec_full(buf, rem);
+ }
+}
+
#define ZEROPAD 1 /* pad with zero */
#define SIGN 2 /* unsigned/signed long */
#define PLUS 4 /* show plus */
@@ -143,12 +240,14 @@ static int skip_atoi(const char **s)
#define SPECIAL 32 /* 0x */
#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
-static char * number(char * buf, char * end, unsigned long long num, int base, int size, int precision, int type)
+static char *number(char *buf, char *end, unsigned long long num, int base, int size, int precision, int type)
{
- char c,sign,tmp[66];
+ char sign,tmp[66];
const char *digits;
- static const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
- static const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ /* we are called with base 8, 10 or 16, only, thus don't need "g..." */
+ static const char small_digits[] = "0123456789abcdefx"; /* "ghijklmnopqrstuvwxyz"; */
+ static const char large_digits[] = "0123456789ABCDEFX"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
+ int need_pfx = ((type & SPECIAL) && base != 10);
int i;
digits = (type & LARGE) ? large_digits : small_digits;
@@ -156,7 +255,6 @@ static char * number(char * buf, char * end, unsigned long long num, int base, i
type &= ~ZEROPAD;
if (base < 2 || base > 36)
return NULL;
- c = (type & ZEROPAD) ? '0' : ' ';
sign = 0;
if (type & SIGN) {
if ((signed long long) num < 0) {
@@ -171,64 +269,85 @@ static char * number(char * buf, char * end, unsigned long long num, int base, i
size--;
}
}
- if (type & SPECIAL) {
+ if (need_pfx) {
+ size--;
if (base == 16)
- size -= 2;
- else if (base == 8)
size--;
}
+
+ /* generate full string in tmp[], in reverse order */
i = 0;
if (num == 0)
- tmp[i++]='0';
- else while (num != 0)
+ tmp[i++] = '0';
+ /* Generic code, for any base:
+ else do {
tmp[i++] = digits[do_div(num,base)];
+ } while (num != 0);
+ */
+ else if (base != 10) { /* 8 or 16 */
+ int mask = base - 1;
+ int shift = 3;
+ if (base == 16) shift = 4;
+ do {
+ tmp[i++] = digits[((unsigned char)num) & mask];
+ num >>= shift;
+ } while (num);
+ } else { /* base 10 */
+ i = put_dec(tmp, num) - tmp;
+ }
+
+ /* printing 100 using %2d gives "100", not "00" */
if (i > precision)
precision = i;
+ /* leading space padding */
size -= precision;
- if (!(type&(ZEROPAD+LEFT))) {
- while(size-->0) {
+ if (!(type & (ZEROPAD+LEFT))) {
+ while(--size >= 0) {
if (buf < end)
*buf = ' ';
++buf;
}
}
+ /* sign */
if (sign) {
if (buf < end)
*buf = sign;
++buf;
}
- if (type & SPECIAL) {
- if (base==8) {
- if (buf < end)
- *buf = '0';
- ++buf;
- } else if (base==16) {
- if (buf < end)
- *buf = '0';
- ++buf;
+ /* "0x" / "0" prefix */
+ if (need_pfx) {
+ if (buf < end)
+ *buf = '0';
+ ++buf;
+ if (base == 16) {
if (buf < end)
- *buf = digits[33];
+ *buf = digits[16]; /* for arbitrary base: digits[33]; */
++buf;
}
}
+ /* zero or space padding */
if (!(type & LEFT)) {
- while (size-- > 0) {
+ char c = (type & ZEROPAD) ? '0' : ' ';
+ while (--size >= 0) {
if (buf < end)
*buf = c;
++buf;
}
}
- while (i < precision--) {
+ /* hmm even more zero padding? */
+ while (i <= --precision) {
if (buf < end)
*buf = '0';
++buf;
}
- while (i-- > 0) {
+ /* actual digits of result */
+ while (--i >= 0) {
if (buf < end)
*buf = tmp[i];
++buf;
}
- while (size-- > 0) {
+ /* trailing space padding */
+ while (--size >= 0) {
if (buf < end)
*buf = ' ';
++buf;
@@ -276,7 +395,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
used for unknown buffer sizes. */
if (unlikely((int) size < 0)) {
/* There can be only one.. */
- static int warn = 1;
+ static char warn = 1;
WARN_ON(warn);
warn = 0;
return 0;
@@ -859,38 +978,3 @@ int sscanf(const char * buf, const char * fmt, ...)
}
EXPORT_SYMBOL(sscanf);
-
-
-/* Simplified asprintf. */
-char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
-{
- unsigned int len;
- char *p;
- va_list aq;
-
- va_copy(aq, ap);
- len = vsnprintf(NULL, 0, fmt, aq);
- va_end(aq);
-
- p = kmalloc(len+1, gfp);
- if (!p)
- return NULL;
-
- vsnprintf(p, len+1, fmt, ap);
-
- return p;
-}
-EXPORT_SYMBOL(kvasprintf);
-
-char *kasprintf(gfp_t gfp, const char *fmt, ...)
-{
- va_list ap;
- char *p;
-
- va_start(ap, fmt);
- p = kvasprintf(gfp, fmt, ap);
- va_end(ap);
-
- return p;
-}
-EXPORT_SYMBOL(kasprintf);