summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug32
-rw-r--r--lib/Makefile4
-rw-r--r--lib/assoc_array.c6
-rw-r--r--lib/atomic64.c83
-rw-r--r--lib/bitmap.c4
-rw-r--r--lib/dma-debug.c2
-rw-r--r--lib/dynamic_debug.c67
-rw-r--r--lib/flex_proportions.c8
-rw-r--r--lib/genalloc.c50
-rw-r--r--lib/hexdump.c16
-rw-r--r--lib/hweight.c4
-rw-r--r--lib/idr.c2
-rw-r--r--lib/libcrc32c.c16
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c103
-rw-r--r--lib/percpu-refcount.c299
-rw-r--r--lib/percpu_counter.c20
-rw-r--r--lib/prio_heap.c70
-rw-r--r--lib/proportions.c10
-rw-r--r--lib/raid6/algos.c12
-rw-r--r--lib/random32.c39
-rw-r--r--lib/rhashtable.c25
-rw-r--r--lib/string.c32
-rw-r--r--lib/string_helpers.c312
-rw-r--r--lib/test-string_helpers.c277
-rw-r--r--lib/test_bpf.c63
-rw-r--r--lib/textsearch.c4
-rw-r--r--lib/vsprintf.c73
28 files changed, 1232 insertions, 404 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index a5ce0c7f6c30..54cf309a92a5 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -51,6 +51,9 @@ config PERCPU_RWSEM
config ARCH_USE_CMPXCHG_LOCKREF
bool
+config ARCH_HAS_FAST_MULTIPLIER
+ bool
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 07c28323f88f..4e35a5d767ed 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -824,6 +824,18 @@ config SCHEDSTATS
application, you can say N to avoid the very slight overhead
this adds.
+config SCHED_STACK_END_CHECK
+ bool "Detect stack corruption on calls to schedule()"
+ depends on DEBUG_KERNEL
+ default n
+ help
+ This option checks for a stack overrun on calls to schedule().
+ If the stack end location is found to be over written always panic as
+ the content of the corrupted region can no longer be trusted.
+ This is to ensure no erroneous behaviour occurs which could result in
+ data corruption or a sporadic crash at a later stage once the region
+ is examined. The runtime overhead introduced is minimal.
+
config TIMER_STATS
bool "Collect kernel timers statistics"
depends on DEBUG_KERNEL && PROC_FS
@@ -892,6 +904,10 @@ config DEBUG_WW_MUTEX_SLOWPATH
the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
will test all possible w/w mutex interface abuse with the
exception of simply not acquiring all the required locks.
+ Note that this feature can introduce significant overhead, so
+ it really should not be enabled in a production or distro kernel,
+ even a debug kernel. If you are a driver writer, enable it. If
+ you are a distro, do not.
config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
@@ -948,7 +964,7 @@ config PROVE_LOCKING
the proof of observed correctness is also maintained for an
arbitrary combination of these separate locking variants.
- For more details, see Documentation/lockdep-design.txt.
+ For more details, see Documentation/locking/lockdep-design.txt.
config LOCKDEP
bool
@@ -969,7 +985,7 @@ config LOCK_STAT
help
This feature enables tracking lock contention points
- For more details, see Documentation/lockstat.txt
+ For more details, see Documentation/locking/lockstat.txt
This also enables lock events required by "perf lock",
subcommand of perf.
@@ -1032,8 +1048,13 @@ config TRACE_IRQFLAGS
either tracing or lock debugging.
config STACKTRACE
- bool
+ bool "Stack backtrace support"
depends on STACKTRACE_SUPPORT
+ help
+ This option causes the kernel to create a /proc/pid/stack for
+ every process, showing its current stack trace.
+ It is also used by various kernel debugging features that require
+ stack trace generation.
config DEBUG_KOBJECT
bool "kobject debugging"
@@ -1627,7 +1648,7 @@ config DMA_API_DEBUG
If unsure, say N.
-config TEST_MODULE
+config TEST_LKM
tristate "Test module loading with 'hello world' module"
default n
depends on m
@@ -1663,7 +1684,8 @@ config TEST_BPF
against the BPF interpreter or BPF JIT compiler depending on the
current setting. This is in particular useful for BPF JIT compiler
development, but also to run regression tests against changes in
- the interpreter code.
+ the interpreter code. It also enables test stubs for eBPF maps and
+ verifier used by user space verifier testsuite.
If unsure, say N.
diff --git a/lib/Makefile b/lib/Makefile
index d6b4bc496408..7512dc978f18 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -11,7 +11,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o timerqueue.o\
idr.o int_sqrt.o extable.o \
sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
- proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
+ proportions.o flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o
@@ -31,7 +31,7 @@ obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += kstrtox.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
-obj-$(CONFIG_TEST_MODULE) += test_module.o
+obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
obj-$(CONFIG_TEST_BPF) += test_bpf.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index c0b1007011e1..2404d03e251a 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -1723,11 +1723,13 @@ ascend_old_tree:
shortcut = assoc_array_ptr_to_shortcut(ptr);
slot = shortcut->parent_slot;
cursor = shortcut->back_pointer;
+ if (!cursor)
+ goto gc_complete;
} else {
slot = node->parent_slot;
cursor = ptr;
}
- BUG_ON(!ptr);
+ BUG_ON(!cursor);
node = assoc_array_ptr_to_node(cursor);
slot++;
goto continue_node;
@@ -1735,7 +1737,7 @@ ascend_old_tree:
gc_complete:
edit->set[0].to = new_root;
assoc_array_apply_edit(edit);
- edit->array->nr_leaves_on_tree = nr_leaves_on_tree;
+ array->nr_leaves_on_tree = nr_leaves_on_tree;
return 0;
enomem:
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 08a4f068e61e..1298c05ef528 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -70,53 +70,42 @@ void atomic64_set(atomic64_t *v, long long i)
}
EXPORT_SYMBOL(atomic64_set);
-void atomic64_add(long long a, atomic64_t *v)
-{
- unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
-
- raw_spin_lock_irqsave(lock, flags);
- v->counter += a;
- raw_spin_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(atomic64_add);
-
-long long atomic64_add_return(long long a, atomic64_t *v)
-{
- unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
- long long val;
-
- raw_spin_lock_irqsave(lock, flags);
- val = v->counter += a;
- raw_spin_unlock_irqrestore(lock, flags);
- return val;
-}
-EXPORT_SYMBOL(atomic64_add_return);
-
-void atomic64_sub(long long a, atomic64_t *v)
-{
- unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
-
- raw_spin_lock_irqsave(lock, flags);
- v->counter -= a;
- raw_spin_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(atomic64_sub);
-
-long long atomic64_sub_return(long long a, atomic64_t *v)
-{
- unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
- long long val;
-
- raw_spin_lock_irqsave(lock, flags);
- val = v->counter -= a;
- raw_spin_unlock_irqrestore(lock, flags);
- return val;
-}
-EXPORT_SYMBOL(atomic64_sub_return);
+#define ATOMIC64_OP(op, c_op) \
+void atomic64_##op(long long a, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ raw_spinlock_t *lock = lock_addr(v); \
+ \
+ raw_spin_lock_irqsave(lock, flags); \
+ v->counter c_op a; \
+ raw_spin_unlock_irqrestore(lock, flags); \
+} \
+EXPORT_SYMBOL(atomic64_##op);
+
+#define ATOMIC64_OP_RETURN(op, c_op) \
+long long atomic64_##op##_return(long long a, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ raw_spinlock_t *lock = lock_addr(v); \
+ long long val; \
+ \
+ raw_spin_lock_irqsave(lock, flags); \
+ val = (v->counter c_op a); \
+ raw_spin_unlock_irqrestore(lock, flags); \
+ return val; \
+} \
+EXPORT_SYMBOL(atomic64_##op##_return);
+
+#define ATOMIC64_OPS(op, c_op) \
+ ATOMIC64_OP(op, c_op) \
+ ATOMIC64_OP_RETURN(op, c_op)
+
+ATOMIC64_OPS(add, +=)
+ATOMIC64_OPS(sub, -=)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
long long atomic64_dec_if_positive(atomic64_t *v)
{
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 1e031f2c9aba..cd250a2e14cb 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -884,7 +884,7 @@ EXPORT_SYMBOL(bitmap_bitremap);
* read it, you're overqualified for your current job.)
*
* In other words, @orig is mapped onto (surjectively) @dst,
- * using the the map { <n, m> | the n-th bit of @relmap is the
+ * using the map { <n, m> | the n-th bit of @relmap is the
* m-th set bit of @relmap }.
*
* Any set bits in @orig above bit number W, where W is the
@@ -932,7 +932,7 @@ EXPORT_SYMBOL(bitmap_bitremap);
*
* Further lets say we use the following code, invoking
* bitmap_fold() then bitmap_onto, as suggested above to
- * avoid the possitility of an empty @dst result:
+ * avoid the possibility of an empty @dst result:
*
* unsigned long *tmp; // a temporary bitmap's bits
*
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 98f2d7e91a91..add80cc02dbe 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -1149,7 +1149,7 @@ static void check_unmap(struct dma_debug_entry *ref)
static void check_for_stack(struct device *dev, void *addr)
{
if (object_is_on_stack(addr))
- err_printk(dev, NULL, "DMA-API: device driver maps memory from"
+ err_printk(dev, NULL, "DMA-API: device driver maps memory from "
"stack [addr=%p]\n", addr);
}
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index c9afbe2c445a..dfba05521748 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -537,10 +537,9 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
return buf;
}
-int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
+void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
{
va_list args;
- int res;
struct va_format vaf;
char buf[PREFIX_SIZE];
@@ -552,21 +551,17 @@ int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &args;
- res = printk(KERN_DEBUG "%s%pV",
- dynamic_emit_prefix(descriptor, buf), &vaf);
+ printk(KERN_DEBUG "%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf);
va_end(args);
-
- return res;
}
EXPORT_SYMBOL(__dynamic_pr_debug);
-int __dynamic_dev_dbg(struct _ddebug *descriptor,
+void __dynamic_dev_dbg(struct _ddebug *descriptor,
const struct device *dev, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int res;
BUG_ON(!descriptor);
BUG_ON(!fmt);
@@ -577,30 +572,27 @@ int __dynamic_dev_dbg(struct _ddebug *descriptor,
vaf.va = &args;
if (!dev) {
- res = printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
+ printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
} else {
char buf[PREFIX_SIZE];
- res = dev_printk_emit(7, dev, "%s%s %s: %pV",
- dynamic_emit_prefix(descriptor, buf),
- dev_driver_string(dev), dev_name(dev),
- &vaf);
+ dev_printk_emit(7, dev, "%s%s %s: %pV",
+ dynamic_emit_prefix(descriptor, buf),
+ dev_driver_string(dev), dev_name(dev),
+ &vaf);
}
va_end(args);
-
- return res;
}
EXPORT_SYMBOL(__dynamic_dev_dbg);
#ifdef CONFIG_NET
-int __dynamic_netdev_dbg(struct _ddebug *descriptor,
- const struct net_device *dev, const char *fmt, ...)
+void __dynamic_netdev_dbg(struct _ddebug *descriptor,
+ const struct net_device *dev, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int res;
BUG_ON(!descriptor);
BUG_ON(!fmt);
@@ -613,23 +605,21 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor,
if (dev && dev->dev.parent) {
char buf[PREFIX_SIZE];
- res = dev_printk_emit(7, dev->dev.parent,
- "%s%s %s %s%s: %pV",
- dynamic_emit_prefix(descriptor, buf),
- dev_driver_string(dev->dev.parent),
- dev_name(dev->dev.parent),
- netdev_name(dev), netdev_reg_state(dev),
- &vaf);
+ dev_printk_emit(7, dev->dev.parent,
+ "%s%s %s %s%s: %pV",
+ dynamic_emit_prefix(descriptor, buf),
+ dev_driver_string(dev->dev.parent),
+ dev_name(dev->dev.parent),
+ netdev_name(dev), netdev_reg_state(dev),
+ &vaf);
} else if (dev) {
- res = printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
- netdev_reg_state(dev), &vaf);
+ printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
+ netdev_reg_state(dev), &vaf);
} else {
- res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
+ printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
}
va_end(args);
-
- return res;
}
EXPORT_SYMBOL(__dynamic_netdev_dbg);
@@ -829,22 +819,9 @@ static const struct seq_operations ddebug_proc_seqops = {
*/
static int ddebug_proc_open(struct inode *inode, struct file *file)
{
- struct ddebug_iter *iter;
- int err;
-
vpr_info("called\n");
-
- iter = kzalloc(sizeof(*iter), GFP_KERNEL);
- if (iter == NULL)
- return -ENOMEM;
-
- err = seq_open(file, &ddebug_proc_seqops);
- if (err) {
- kfree(iter);
- return err;
- }
- ((struct seq_file *)file->private_data)->private = iter;
- return 0;
+ return seq_open_private(file, &ddebug_proc_seqops,
+ sizeof(struct ddebug_iter));
}
static const struct file_operations ddebug_proc_fops = {
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index ebf3bac460b0..8f25652f40d4 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -34,13 +34,13 @@
*/
#include <linux/flex_proportions.h>
-int fprop_global_init(struct fprop_global *p)
+int fprop_global_init(struct fprop_global *p, gfp_t gfp)
{
int err;
p->period = 0;
/* Use 1 to avoid dealing with periods with 0 events... */
- err = percpu_counter_init(&p->events, 1);
+ err = percpu_counter_init(&p->events, 1, gfp);
if (err)
return err;
seqcount_init(&p->sequence);
@@ -168,11 +168,11 @@ void fprop_fraction_single(struct fprop_global *p,
*/
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
-int fprop_local_init_percpu(struct fprop_local_percpu *pl)
+int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
{
int err;
- err = percpu_counter_init(&pl->events, 0);
+ err = percpu_counter_init(&pl->events, 0, gfp);
if (err)
return err;
pl->period = 0;
diff --git a/lib/genalloc.c b/lib/genalloc.c
index bdb9a456bcbb..cce4dd68c40d 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -403,6 +403,35 @@ void gen_pool_for_each_chunk(struct gen_pool *pool,
EXPORT_SYMBOL(gen_pool_for_each_chunk);
/**
+ * addr_in_gen_pool - checks if an address falls within the range of a pool
+ * @pool: the generic memory pool
+ * @start: start address
+ * @size: size of the region
+ *
+ * Check if the range of addresses falls within the specified pool. Returns
+ * true if the entire range is contained in the pool and false otherwise.
+ */
+bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
+ size_t size)
+{
+ bool found = false;
+ unsigned long end = start + size;
+ struct gen_pool_chunk *chunk;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
+ if (start >= chunk->start_addr && start <= chunk->end_addr) {
+ if (end <= chunk->end_addr) {
+ found = true;
+ break;
+ }
+ }
+ }
+ rcu_read_unlock();
+ return found;
+}
+
+/**
* gen_pool_avail - get available free space of the pool
* @pool: pool to get available free space
*
@@ -481,6 +510,26 @@ unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
EXPORT_SYMBOL(gen_pool_first_fit);
/**
+ * gen_pool_first_fit_order_align - find the first available region
+ * of memory matching the size requirement. The region will be aligned
+ * to the order of the size specified.
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: additional data - unused
+ */
+unsigned long gen_pool_first_fit_order_align(unsigned long *map,
+ unsigned long size, unsigned long start,
+ unsigned int nr, void *data)
+{
+ unsigned long align_mask = roundup_pow_of_two(nr) - 1;
+
+ return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+}
+EXPORT_SYMBOL(gen_pool_first_fit_order_align);
+
+/**
* gen_pool_best_fit - find the best fitting region of memory
* macthing the size requirement (no alignment constraint)
* @map: The address to base the search on
@@ -588,6 +637,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np,
if (!np_pool)
return NULL;
pdev = of_find_device_by_node(np_pool);
+ of_node_put(np_pool);
if (!pdev)
return NULL;
return dev_get_gen_pool(&pdev->dev);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 8499c810909a..270773b91923 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -59,6 +59,22 @@ int hex2bin(u8 *dst, const char *src, size_t count)
EXPORT_SYMBOL(hex2bin);
/**
+ * bin2hex - convert binary data to an ascii hexadecimal string
+ * @dst: ascii hexadecimal result
+ * @src: binary data
+ * @count: binary data length
+ */
+char *bin2hex(char *dst, const void *src, size_t count)
+{
+ const unsigned char *_src = src;
+
+ while (count--)
+ dst = hex_byte_pack(dst, *_src++);
+ return dst;
+}
+EXPORT_SYMBOL(bin2hex);
+
+/**
* hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
* @buf: data blob to dump
* @len: number of bytes in the @buf
diff --git a/lib/hweight.c b/lib/hweight.c
index b7d81ba143d1..9a5c1f221558 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -11,7 +11,7 @@
unsigned int __sw_hweight32(unsigned int w)
{
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x55555555;
w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
w = (w + (w >> 4)) & 0x0f0f0f0f;
@@ -49,7 +49,7 @@ unsigned long __sw_hweight64(__u64 w)
return __sw_hweight32((unsigned int)(w >> 32)) +
__sw_hweight32((unsigned int)w);
#elif BITS_PER_LONG == 64
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x5555555555555555ul;
w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
diff --git a/lib/idr.c b/lib/idr.c
index 50be3fa9b657..e654aebd5f80 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -626,7 +626,7 @@ static void __idr_remove_all(struct idr *idp)
* idr_destroy().
*
* A typical clean-up sequence for objects stored in an idr tree will use
- * idr_for_each() to free all objects, if necessay, then idr_destroy() to
+ * idr_for_each() to free all objects, if necessary, then idr_destroy() to
* free up the id mappings and cached idr_layers.
*/
void idr_destroy(struct idr *idp)
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index b3131f5cf8a2..6a08ce7d6adc 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -41,20 +41,18 @@ static struct crypto_shash *tfm;
u32 crc32c(u32 crc, const void *address, unsigned int length)
{
- struct {
- struct shash_desc shash;
- char ctx[crypto_shash_descsize(tfm)];
- } desc;
+ SHASH_DESC_ON_STACK(shash, tfm);
+ u32 *ctx = (u32 *)shash_desc_ctx(shash);
int err;
- desc.shash.tfm = tfm;
- desc.shash.flags = 0;
- *(u32 *)desc.ctx = crc;
+ shash->tfm = tfm;
+ shash->flags = 0;
+ *ctx = crc;
- err = crypto_shash_update(&desc.shash, address, length);
+ err = crypto_shash_update(shash, address, length);
BUG_ON(err);
- return *(u32 *)desc.ctx;
+ return *ctx;
}
EXPORT_SYMBOL(crc32c);
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 8563081e8da3..a1c387f6afba 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -19,31 +19,21 @@
#include <linux/lzo.h>
#include "lzodefs.h"
-#define HAVE_IP(t, x) \
- (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
- (((t + x) >= t) && ((t + x) >= x)))
+#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
+#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
+#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
+#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
+#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
-#define HAVE_OP(t, x) \
- (((size_t)(op_end - op) >= (size_t)(t + x)) && \
- (((t + x) >= t) && ((t + x) >= x)))
-
-#define NEED_IP(t, x) \
- do { \
- if (!HAVE_IP(t, x)) \
- goto input_overrun; \
- } while (0)
-
-#define NEED_OP(t, x) \
- do { \
- if (!HAVE_OP(t, x)) \
- goto output_overrun; \
- } while (0)
-
-#define TEST_LB(m_pos) \
- do { \
- if ((m_pos) < out) \
- goto lookbehind_overrun; \
- } while (0)
+/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base
+ * count without overflowing an integer. The multiply will overflow when
+ * multiplying 255 by more than MAXINT/255. The sum will overflow earlier
+ * depending on the base count. Since the base count is taken from a u8
+ * and a few bits, it is safe to assume that it will always be lower than
+ * or equal to 2*255, thus we can always prevent any overflow by accepting
+ * two less 255 steps. See Documentation/lzo.txt for more information.
+ */
+#define MAX_255_COUNT ((((size_t)~0) / 255) - 2)
int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len)
@@ -75,17 +65,24 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
if (t < 16) {
if (likely(state == 0)) {
if (unlikely(t == 0)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
- NEED_IP(1, 0);
+ NEED_IP(1);
}
- t += 15 + *ip++;
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 15 + *ip++;
}
t += 3;
copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
+ if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
const unsigned char *ie = ip + t;
unsigned char *oe = op + t;
do {
@@ -101,8 +98,8 @@ copy_literal_run:
} else
#endif
{
- NEED_OP(t, 0);
- NEED_IP(t, 3);
+ NEED_OP(t);
+ NEED_IP(t + 3);
do {
*op++ = *ip++;
} while (--t > 0);
@@ -115,7 +112,7 @@ copy_literal_run:
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
TEST_LB(m_pos);
- NEED_OP(2, 0);
+ NEED_OP(2);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
@@ -136,13 +133,20 @@ copy_literal_run:
} else if (t >= 32) {
t = (t & 31) + (3 - 1);
if (unlikely(t == 2)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
- NEED_IP(1, 0);
+ NEED_IP(1);
}
- t += 31 + *ip++;
- NEED_IP(2, 0);
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 31 + *ip++;
+ NEED_IP(2);
}
m_pos = op - 1;
next = get_unaligned_le16(ip);
@@ -154,13 +158,20 @@ copy_literal_run:
m_pos -= (t & 8) << 11;
t = (t & 7) + (3 - 1);
if (unlikely(t == 2)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
- NEED_IP(1, 0);
+ NEED_IP(1);
}
- t += 7 + *ip++;
- NEED_IP(2, 0);
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 7 + *ip++;
+ NEED_IP(2);
}
next = get_unaligned_le16(ip);
ip += 2;
@@ -174,7 +185,7 @@ copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (op - m_pos >= 8) {
unsigned char *oe = op + t;
- if (likely(HAVE_OP(t, 15))) {
+ if (likely(HAVE_OP(t + 15))) {
do {
COPY8(op, m_pos);
op += 8;
@@ -184,7 +195,7 @@ copy_literal_run:
m_pos += 8;
} while (op < oe);
op = oe;
- if (HAVE_IP(6, 0)) {
+ if (HAVE_IP(6)) {
state = next;
COPY4(op, ip);
op += next;
@@ -192,7 +203,7 @@ copy_literal_run:
continue;
}
} else {
- NEED_OP(t, 0);
+ NEED_OP(t);
do {
*op++ = *m_pos++;
} while (op < oe);
@@ -201,7 +212,7 @@ copy_literal_run:
#endif
{
unsigned char *oe = op + t;
- NEED_OP(t, 0);
+ NEED_OP(t);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
@@ -214,15 +225,15 @@ match_next:
state = next;
t = next;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
+ if (likely(HAVE_IP(6) && HAVE_OP(4))) {
COPY4(op, ip);
op += t;
ip += t;
} else
#endif
{
- NEED_IP(t, 3);
- NEED_OP(t, 0);
+ NEED_IP(t + 3);
+ NEED_OP(t);
while (t > 0) {
*op++ = *ip++;
t--;
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index fe5a3342e960..6111bcb28376 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -1,6 +1,8 @@
#define pr_fmt(fmt) "%s: " fmt "\n", __func__
#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
#include <linux/percpu-refcount.h>
/*
@@ -11,8 +13,8 @@
* percpu counters will all sum to the correct value
*
* (More precisely: because moduler arithmatic is commutative the sum of all the
- * pcpu_count vars will be equal to what it would have been if all the gets and
- * puts were done to a single integer, even if some of the percpu integers
+ * percpu_count vars will be equal to what it would have been if all the gets
+ * and puts were done to a single integer, even if some of the percpu integers
* overflow or underflow).
*
* The real trick to implementing percpu refcounts is shutdown. We can't detect
@@ -25,75 +27,64 @@
* works.
*
* Converting to non percpu mode is done with some RCUish stuff in
- * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t
- * can't hit 0 before we've added up all the percpu refs.
+ * percpu_ref_kill. Additionally, we need a bias value so that the
+ * atomic_long_t can't hit 0 before we've added up all the percpu refs.
*/
-#define PCPU_COUNT_BIAS (1U << 31)
+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
-static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
+static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
+
+static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
{
- return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
+ return (unsigned long __percpu *)
+ (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
}
/**
* percpu_ref_init - initialize a percpu refcount
* @ref: percpu_ref to initialize
* @release: function which will be called when refcount hits 0
+ * @flags: PERCPU_REF_INIT_* flags
+ * @gfp: allocation mask to use
*
- * Initializes the refcount in single atomic counter mode with a refcount of 1;
- * analagous to atomic_set(ref, 1).
+ * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a
+ * refcount of 1; analagous to atomic_long_set(ref, 1). See the
+ * definitions of PERCPU_REF_INIT_* flags for flag behaviors.
*
* Note that @release must not sleep - it may potentially be called from RCU
* callback context by percpu_ref_kill().
*/
-int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
+int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
+ unsigned int flags, gfp_t gfp)
{
- atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
+ size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
+ __alignof__(unsigned long));
+ unsigned long start_count = 0;
- ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned);
- if (!ref->pcpu_count_ptr)
+ ref->percpu_count_ptr = (unsigned long)
+ __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
+ if (!ref->percpu_count_ptr)
return -ENOMEM;
- ref->release = release;
- return 0;
-}
-EXPORT_SYMBOL_GPL(percpu_ref_init);
+ ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
-/**
- * percpu_ref_reinit - re-initialize a percpu refcount
- * @ref: perpcu_ref to re-initialize
- *
- * Re-initialize @ref so that it's in the same state as when it finished
- * percpu_ref_init(). @ref must have been initialized successfully, killed
- * and reached 0 but not exited.
- *
- * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
- * this function is in progress.
- */
-void percpu_ref_reinit(struct percpu_ref *ref)
-{
- unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
- int cpu;
-
- BUG_ON(!pcpu_count);
- WARN_ON(!percpu_ref_is_zero(ref));
+ if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
+ ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
+ else
+ start_count += PERCPU_COUNT_BIAS;
- atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
+ if (flags & PERCPU_REF_INIT_DEAD)
+ ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
+ else
+ start_count++;
- /*
- * Restore per-cpu operation. smp_store_release() is paired with
- * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees
- * that the zeroing is visible to all percpu accesses which can see
- * the following PCPU_REF_DEAD clearing.
- */
- for_each_possible_cpu(cpu)
- *per_cpu_ptr(pcpu_count, cpu) = 0;
+ atomic_long_set(&ref->count, start_count);
- smp_store_release(&ref->pcpu_count_ptr,
- ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
+ ref->release = release;
+ return 0;
}
-EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+EXPORT_SYMBOL_GPL(percpu_ref_init);
/**
* percpu_ref_exit - undo percpu_ref_init()
@@ -107,26 +98,39 @@ EXPORT_SYMBOL_GPL(percpu_ref_reinit);
*/
void percpu_ref_exit(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
+ unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
- if (pcpu_count) {
- free_percpu(pcpu_count);
- ref->pcpu_count_ptr = PCPU_REF_DEAD;
+ if (percpu_count) {
+ free_percpu(percpu_count);
+ ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
}
}
EXPORT_SYMBOL_GPL(percpu_ref_exit);
-static void percpu_ref_kill_rcu(struct rcu_head *rcu)
+static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
+{
+ struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
+
+ ref->confirm_switch(ref);
+ ref->confirm_switch = NULL;
+ wake_up_all(&percpu_ref_switch_waitq);
+
+ /* drop ref from percpu_ref_switch_to_atomic() */
+ percpu_ref_put(ref);
+}
+
+static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
{
struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
- unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
- unsigned count = 0;
+ unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+ unsigned long count = 0;
int cpu;
for_each_possible_cpu(cpu)
- count += *per_cpu_ptr(pcpu_count, cpu);
+ count += *per_cpu_ptr(percpu_count, cpu);
- pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
+ pr_debug("global %ld percpu %ld",
+ atomic_long_read(&ref->count), (long)count);
/*
* It's crucial that we sum the percpu counters _before_ adding the sum
@@ -140,21 +144,137 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
* reaching 0 before we add the percpu counts. But doing it at the same
* time is equivalent and saves us atomic operations:
*/
+ atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
+
+ WARN_ONCE(atomic_long_read(&ref->count) <= 0,
+ "percpu ref (%pf) <= 0 (%ld) after switching to atomic",
+ ref->release, atomic_long_read(&ref->count));
+
+ /* @ref is viewed as dead on all CPUs, send out switch confirmation */
+ percpu_ref_call_confirm_rcu(rcu);
+}
+
+static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
+{
+}
+
+static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch)
+{
+ if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) {
+ /* switching from percpu to atomic */
+ ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
+
+ /*
+ * Non-NULL ->confirm_switch is used to indicate that
+ * switching is in progress. Use noop one if unspecified.
+ */
+ WARN_ON_ONCE(ref->confirm_switch);
+ ref->confirm_switch =
+ confirm_switch ?: percpu_ref_noop_confirm_switch;
+
+ percpu_ref_get(ref); /* put after confirmation */
+ call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
+ } else if (confirm_switch) {
+ /*
+ * Somebody already set ATOMIC. Switching may still be in
+ * progress. @confirm_switch must be invoked after the
+ * switching is complete and a full sched RCU grace period
+ * has passed. Wait synchronously for the previous
+ * switching and schedule @confirm_switch invocation.
+ */
+ wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
+ ref->confirm_switch = confirm_switch;
+
+ percpu_ref_get(ref); /* put after confirmation */
+ call_rcu_sched(&ref->rcu, percpu_ref_call_confirm_rcu);
+ }
+}
+
+/**
+ * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
+ * @ref: percpu_ref to switch to atomic mode
+ * @confirm_switch: optional confirmation callback
+ *
+ * There's no reason to use this function for the usual reference counting.
+ * Use percpu_ref_kill[_and_confirm]().
+ *
+ * Schedule switching of @ref to atomic mode. All its percpu counts will
+ * be collected to the main atomic counter. On completion, when all CPUs
+ * are guaraneed to be in atomic mode, @confirm_switch, which may not
+ * block, is invoked. This function may be invoked concurrently with all
+ * the get/put operations and can safely be mixed with kill and reinit
+ * operations. Note that @ref will stay in atomic mode across kill/reinit
+ * cycles until percpu_ref_switch_to_percpu() is called.
+ *
+ * This function normally doesn't block and can be called from any context
+ * but it may block if @confirm_kill is specified and @ref is already in
+ * the process of switching to atomic mode. In such cases, @confirm_switch
+ * will be invoked after the switching is complete.
+ *
+ * Due to the way percpu_ref is implemented, @confirm_switch will be called
+ * after at least one full sched RCU grace period has passed but this is an
+ * implementation detail and must not be depended upon.
+ */
+void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch)
+{
+ ref->force_atomic = true;
+ __percpu_ref_switch_to_atomic(ref, confirm_switch);
+}
- atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count);
+static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+ int cpu;
- WARN_ONCE(atomic_read(&ref->count) <= 0, "percpu ref <= 0 (%i)",
- atomic_read(&ref->count));
+ BUG_ON(!percpu_count);
- /* @ref is viewed as dead on all CPUs, send out kill confirmation */
- if (ref->confirm_kill)
- ref->confirm_kill(ref);
+ if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
+ return;
+
+ wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
+
+ atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
/*
- * Now we're in single atomic_t mode with a consistent refcount, so it's
- * safe to drop our initial ref:
+ * Restore per-cpu operation. smp_store_release() is paired with
+ * smp_read_barrier_depends() in __ref_is_percpu() and guarantees
+ * that the zeroing is visible to all percpu accesses which can see
+ * the following __PERCPU_REF_ATOMIC clearing.
*/
- percpu_ref_put(ref);
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(percpu_count, cpu) = 0;
+
+ smp_store_release(&ref->percpu_count_ptr,
+ ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
+}
+
+/**
+ * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
+ * @ref: percpu_ref to switch to percpu mode
+ *
+ * There's no reason to use this function for the usual reference counting.
+ * To re-use an expired ref, use percpu_ref_reinit().
+ *
+ * Switch @ref to percpu mode. This function may be invoked concurrently
+ * with all the get/put operations and can safely be mixed with kill and
+ * reinit operations. This function reverses the sticky atomic state set
+ * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
+ * dying or dead, the actual switching takes place on the following
+ * percpu_ref_reinit().
+ *
+ * This function normally doesn't block and can be called from any context
+ * but it may block if @ref is in the process of switching to atomic mode
+ * by percpu_ref_switch_atomic().
+ */
+void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
+{
+ ref->force_atomic = false;
+
+ /* a dying or dead ref can't be switched to percpu mode w/o reinit */
+ if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+ __percpu_ref_switch_to_percpu(ref);
}
/**
@@ -164,23 +284,48 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
*
* Equivalent to percpu_ref_kill() but also schedules kill confirmation if
* @confirm_kill is not NULL. @confirm_kill, which may not block, will be
- * called after @ref is seen as dead from all CPUs - all further
- * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget()
- * for more details.
+ * called after @ref is seen as dead from all CPUs at which point all
+ * further invocations of percpu_ref_tryget_live() will fail. See
+ * percpu_ref_tryget_live() for details.
+ *
+ * This function normally doesn't block and can be called from any context
+ * but it may block if @confirm_kill is specified and @ref is in the
+ * process of switching to atomic mode by percpu_ref_switch_atomic().
*
- * Due to the way percpu_ref is implemented, @confirm_kill will be called
- * after at least one full RCU grace period has passed but this is an
- * implementation detail and callers must not depend on it.
+ * Due to the way percpu_ref is implemented, @confirm_switch will be called
+ * after at least one full sched RCU grace period has passed but this is an
+ * implementation detail and must not be depended upon.
*/
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill)
{
- WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
- "percpu_ref_kill() called more than once!\n");
+ WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
+ "%s called more than once on %pf!", __func__, ref->release);
- ref->pcpu_count_ptr |= PCPU_REF_DEAD;
- ref->confirm_kill = confirm_kill;
-
- call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
+ ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
+ __percpu_ref_switch_to_atomic(ref, confirm_kill);
+ percpu_ref_put(ref);
}
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
+
+/**
+ * percpu_ref_reinit - re-initialize a percpu refcount
+ * @ref: perpcu_ref to re-initialize
+ *
+ * Re-initialize @ref so that it's in the same state as when it finished
+ * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
+ * initialized successfully and reached 0 but not exited.
+ *
+ * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
+ * this function is in progress.
+ */
+void percpu_ref_reinit(struct percpu_ref *ref)
+{
+ WARN_ON_ONCE(!percpu_ref_is_zero(ref));
+
+ ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
+ percpu_ref_get(ref);
+ if (!ref->force_atomic)
+ __percpu_ref_switch_to_percpu(ref);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_reinit);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 7dd33577b905..48144cdae819 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -112,13 +112,15 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
}
EXPORT_SYMBOL(__percpu_counter_sum);
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key)
{
+ unsigned long flags __maybe_unused;
+
raw_spin_lock_init(&fbc->lock);
lockdep_set_class(&fbc->lock, key);
fbc->count = amount;
- fbc->counters = alloc_percpu(s32);
+ fbc->counters = alloc_percpu_gfp(s32, gfp);
if (!fbc->counters)
return -ENOMEM;
@@ -126,9 +128,9 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
#ifdef CONFIG_HOTPLUG_CPU
INIT_LIST_HEAD(&fbc->list);
- spin_lock(&percpu_counters_lock);
+ spin_lock_irqsave(&percpu_counters_lock, flags);
list_add(&fbc->list, &percpu_counters);
- spin_unlock(&percpu_counters_lock);
+ spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
return 0;
}
@@ -136,15 +138,17 @@ EXPORT_SYMBOL(__percpu_counter_init);
void percpu_counter_destroy(struct percpu_counter *fbc)
{
+ unsigned long flags __maybe_unused;
+
if (!fbc->counters)
return;
debug_percpu_counter_deactivate(fbc);
#ifdef CONFIG_HOTPLUG_CPU
- spin_lock(&percpu_counters_lock);
+ spin_lock_irqsave(&percpu_counters_lock, flags);
list_del(&fbc->list);
- spin_unlock(&percpu_counters_lock);
+ spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
free_percpu(fbc->counters);
fbc->counters = NULL;
@@ -173,7 +177,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
return NOTIFY_OK;
cpu = (unsigned long)hcpu;
- spin_lock(&percpu_counters_lock);
+ spin_lock_irq(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
unsigned long flags;
@@ -184,7 +188,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
*pcount = 0;
raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
- spin_unlock(&percpu_counters_lock);
+ spin_unlock_irq(&percpu_counters_lock);
#endif
return NOTIFY_OK;
}
diff --git a/lib/prio_heap.c b/lib/prio_heap.c
deleted file mode 100644
index a7af6f85eca8..000000000000
--- a/lib/prio_heap.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Simple insertion-only static-sized priority heap containing
- * pointers, based on CLR, chapter 7
- */
-
-#include <linux/slab.h>
-#include <linux/prio_heap.h>
-
-int heap_init(struct ptr_heap *heap, size_t size, gfp_t gfp_mask,
- int (*gt)(void *, void *))
-{
- heap->ptrs = kmalloc(size, gfp_mask);
- if (!heap->ptrs)
- return -ENOMEM;
- heap->size = 0;
- heap->max = size / sizeof(void *);
- heap->gt = gt;
- return 0;
-}
-
-void heap_free(struct ptr_heap *heap)
-{
- kfree(heap->ptrs);
-}
-
-void *heap_insert(struct ptr_heap *heap, void *p)
-{
- void *res;
- void **ptrs = heap->ptrs;
- int pos;
-
- if (heap->size < heap->max) {
- /* Heap insertion */
- pos = heap->size++;
- while (pos > 0 && heap->gt(p, ptrs[(pos-1)/2])) {
- ptrs[pos] = ptrs[(pos-1)/2];
- pos = (pos-1)/2;
- }
- ptrs[pos] = p;
- return NULL;
- }
-
- /* The heap is full, so something will have to be dropped */
-
- /* If the new pointer is greater than the current max, drop it */
- if (heap->gt(p, ptrs[0]))
- return p;
-
- /* Replace the current max and heapify */
- res = ptrs[0];
- ptrs[0] = p;
- pos = 0;
-
- while (1) {
- int left = 2 * pos + 1;
- int right = 2 * pos + 2;
- int largest = pos;
- if (left < heap->size && heap->gt(ptrs[left], p))
- largest = left;
- if (right < heap->size && heap->gt(ptrs[right], ptrs[largest]))
- largest = right;
- if (largest == pos)
- break;
- /* Push p down the heap one level and bump one up */
- ptrs[pos] = ptrs[largest];
- ptrs[largest] = p;
- pos = largest;
- }
- return res;
-}
diff --git a/lib/proportions.c b/lib/proportions.c
index 05df84801b56..6f724298f67a 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -73,7 +73,7 @@
#include <linux/proportions.h>
#include <linux/rcupdate.h>
-int prop_descriptor_init(struct prop_descriptor *pd, int shift)
+int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp)
{
int err;
@@ -83,11 +83,11 @@ int prop_descriptor_init(struct prop_descriptor *pd, int shift)
pd->index = 0;
pd->pg[0].shift = shift;
mutex_init(&pd->mutex);
- err = percpu_counter_init(&pd->pg[0].events, 0);
+ err = percpu_counter_init(&pd->pg[0].events, 0, gfp);
if (err)
goto out;
- err = percpu_counter_init(&pd->pg[1].events, 0);
+ err = percpu_counter_init(&pd->pg[1].events, 0, gfp);
if (err)
percpu_counter_destroy(&pd->pg[0].events);
@@ -188,12 +188,12 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
-int prop_local_init_percpu(struct prop_local_percpu *pl)
+int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp)
{
raw_spin_lock_init(&pl->lock);
pl->shift = 0;
pl->period = 0;
- return percpu_counter_init(&pl->events, 0);
+ return percpu_counter_init(&pl->events, 0, gfp);
}
void prop_local_destroy_percpu(struct prop_local_percpu *pl)
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index f0b1aa3586d1..7d0e5cd7b570 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -121,9 +121,9 @@ static inline const struct raid6_recov_calls *raid6_choose_recov(void)
raid6_2data_recov = best->data2;
raid6_datap_recov = best->datap;
- printk("raid6: using %s recovery algorithm\n", best->name);
+ pr_info("raid6: using %s recovery algorithm\n", best->name);
} else
- printk("raid6: Yikes! No recovery algorithm found!\n");
+ pr_err("raid6: Yikes! No recovery algorithm found!\n");
return best;
}
@@ -157,18 +157,18 @@ static inline const struct raid6_calls *raid6_choose_gen(
bestperf = perf;
best = *algo;
}
- printk("raid6: %-8s %5ld MB/s\n", (*algo)->name,
+ pr_info("raid6: %-8s %5ld MB/s\n", (*algo)->name,
(perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
}
}
if (best) {
- printk("raid6: using algorithm %s (%ld MB/s)\n",
+ pr_info("raid6: using algorithm %s (%ld MB/s)\n",
best->name,
(bestperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
raid6_call = *best;
} else
- printk("raid6: Yikes! No algorithm found!\n");
+ pr_err("raid6: Yikes! No algorithm found!\n");
return best;
}
@@ -194,7 +194,7 @@ int __init raid6_select_algo(void)
syndromes = (void *) __get_free_pages(GFP_KERNEL, 1);
if (!syndromes) {
- printk("raid6: Yikes! No memory available.\n");
+ pr_err("raid6: Yikes! No memory available.\n");
return -ENOMEM;
}
diff --git a/lib/random32.c b/lib/random32.c
index c9b6bf3afe0c..0bee183fa18f 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -37,6 +37,7 @@
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/sched.h>
+#include <asm/unaligned.h>
#ifdef CONFIG_RANDOM32_SELFTEST
static void __init prandom_state_selftest(void);
@@ -96,27 +97,23 @@ EXPORT_SYMBOL(prandom_u32);
* This is used for pseudo-randomness with no outside seeding.
* For more random results, use prandom_bytes().
*/
-void prandom_bytes_state(struct rnd_state *state, void *buf, int bytes)
+void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
{
- unsigned char *p = buf;
- int i;
-
- for (i = 0; i < round_down(bytes, sizeof(u32)); i += sizeof(u32)) {
- u32 random = prandom_u32_state(state);
- int j;
+ u8 *ptr = buf;
- for (j = 0; j < sizeof(u32); j++) {
- p[i + j] = random;
- random >>= BITS_PER_BYTE;
- }
+ while (bytes >= sizeof(u32)) {
+ put_unaligned(prandom_u32_state(state), (u32 *) ptr);
+ ptr += sizeof(u32);
+ bytes -= sizeof(u32);
}
- if (i < bytes) {
- u32 random = prandom_u32_state(state);
- for (; i < bytes; i++) {
- p[i] = random;
- random >>= BITS_PER_BYTE;
- }
+ if (bytes > 0) {
+ u32 rem = prandom_u32_state(state);
+ do {
+ *ptr++ = (u8) rem;
+ bytes--;
+ rem >>= BITS_PER_BYTE;
+ } while (bytes > 0);
}
}
EXPORT_SYMBOL(prandom_bytes_state);
@@ -126,7 +123,7 @@ EXPORT_SYMBOL(prandom_bytes_state);
* @buf: where to copy the pseudo-random bytes to
* @bytes: the requested number of bytes
*/
-void prandom_bytes(void *buf, int bytes)
+void prandom_bytes(void *buf, size_t bytes)
{
struct rnd_state *state = &get_cpu_var(net_rand_state);
@@ -137,7 +134,7 @@ EXPORT_SYMBOL(prandom_bytes);
static void prandom_warmup(struct rnd_state *state)
{
- /* Calling RNG ten times to satify recurrence condition */
+ /* Calling RNG ten times to satisfy recurrence condition */
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
@@ -152,7 +149,7 @@ static void prandom_warmup(struct rnd_state *state)
static u32 __extract_hwseed(void)
{
- u32 val = 0;
+ unsigned int val = 0;
(void)(arch_get_random_seed_int(&val) ||
arch_get_random_int(&val));
@@ -228,7 +225,7 @@ static void __prandom_timer(unsigned long dontcare)
prandom_seed(entropy);
/* reseed every ~60 seconds, in [40 .. 80) interval with slack */
- expires = 40 + (prandom_u32() % 40);
+ expires = 40 + prandom_u32_max(40);
seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
add_timer(&seed_timer);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index a2c78810ebc1..081be3ba9ea8 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -23,7 +23,6 @@
#include <linux/hash.h>
#include <linux/random.h>
#include <linux/rhashtable.h>
-#include <linux/log2.h>
#define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4UL
@@ -55,7 +54,7 @@ static u32 __hashfn(const struct rhashtable *ht, const void *key,
/**
* rhashtable_hashfn - compute hash for key of given length
- * @ht: hash table to compuate for
+ * @ht: hash table to compute for
* @key: pointer to key
* @len: length of key
*
@@ -86,7 +85,7 @@ static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
/**
* rhashtable_obj_hashfn - compute hash for hashed object
- * @ht: hash table to compuate for
+ * @ht: hash table to compute for
* @ptr: pointer to hashed object
*
* Computes the hash value using the hash function `hashfn` respectively
@@ -298,7 +297,7 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
ASSERT_RHT_MUTEX(ht);
- if (tbl->size <= HASH_MIN_SIZE)
+ if (ht->shift <= ht->p.min_shift)
return 0;
ntbl = bucket_table_alloc(tbl->size / 2, flags);
@@ -506,9 +505,10 @@ void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
-static size_t rounded_hashtable_size(unsigned int nelem)
+static size_t rounded_hashtable_size(struct rhashtable_params *params)
{
- return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE);
+ return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
+ 1UL << params->min_shift);
}
/**
@@ -566,8 +566,11 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
(!params->key_len && !params->obj_hashfn))
return -EINVAL;
+ params->min_shift = max_t(size_t, params->min_shift,
+ ilog2(HASH_MIN_SIZE));
+
if (params->nelem_hint)
- size = rounded_hashtable_size(params->nelem_hint);
+ size = rounded_hashtable_size(params);
tbl = bucket_table_alloc(size, GFP_KERNEL);
if (tbl == NULL)
@@ -589,13 +592,13 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
* rhashtable_destroy - destroy hash table
* @ht: the hash table to destroy
*
- * Frees the bucket array.
+ * Frees the bucket array. This function is not rcu safe, therefore the caller
+ * has to make sure that no resizing may happen by unpublishing the hashtable
+ * and waiting for the quiescent cycle before releasing the bucket array.
*/
void rhashtable_destroy(const struct rhashtable *ht)
{
- const struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
-
- bucket_table_free(tbl);
+ bucket_table_free(ht->tbl);
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);
diff --git a/lib/string.c b/lib/string.c
index 992bf30af759..2fc20aa06f84 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -27,14 +27,14 @@
#include <linux/bug.h>
#include <linux/errno.h>
-#ifndef __HAVE_ARCH_STRNICMP
+#ifndef __HAVE_ARCH_STRNCASECMP
/**
- * strnicmp - Case insensitive, length-limited string comparison
+ * strncasecmp - Case insensitive, length-limited string comparison
* @s1: One string
* @s2: The other string
* @len: the maximum number of characters to compare
*/
-int strnicmp(const char *s1, const char *s2, size_t len)
+int strncasecmp(const char *s1, const char *s2, size_t len)
{
/* Yes, Virginia, it had better be unsigned */
unsigned char c1, c2;
@@ -56,6 +56,14 @@ int strnicmp(const char *s1, const char *s2, size_t len)
} while (--len);
return (int)c1 - (int)c2;
}
+EXPORT_SYMBOL(strncasecmp);
+#endif
+#ifndef __HAVE_ARCH_STRNICMP
+#undef strnicmp
+int strnicmp(const char *s1, const char *s2, size_t len)
+{
+ return strncasecmp(s1, s2, len);
+}
EXPORT_SYMBOL(strnicmp);
#endif
@@ -73,20 +81,6 @@ int strcasecmp(const char *s1, const char *s2)
EXPORT_SYMBOL(strcasecmp);
#endif
-#ifndef __HAVE_ARCH_STRNCASECMP
-int strncasecmp(const char *s1, const char *s2, size_t n)
-{
- int c1, c2;
-
- do {
- c1 = tolower(*s1++);
- c2 = tolower(*s2++);
- } while ((--n > 0) && c1 == c2 && c1 != 0);
- return c1 - c2;
-}
-EXPORT_SYMBOL(strncasecmp);
-#endif
-
#ifndef __HAVE_ARCH_STRCPY
/**
* strcpy - Copy a %NUL terminated string
@@ -807,9 +801,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
return check_bytes8(start, value, bytes);
value64 = value;
-#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
value64 *= 0x0101010101010101;
-#elif defined(ARCH_HAS_FAST_MULTIPLIER)
+#elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER)
value64 *= 0x01010101;
value64 |= value64 << 32;
#else
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 29033f319aea..58b78ba57439 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -8,6 +8,8 @@
#include <linux/math64.h>
#include <linux/export.h>
#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/string.h>
#include <linux/string_helpers.h>
/**
@@ -168,6 +170,44 @@ static bool unescape_special(char **src, char **dst)
return true;
}
+/**
+ * string_unescape - unquote characters in the given string
+ * @src: source buffer (escaped)
+ * @dst: destination buffer (unescaped)
+ * @size: size of the destination buffer (0 to unlimit)
+ * @flags: combination of the flags (bitwise OR):
+ * %UNESCAPE_SPACE:
+ * '\f' - form feed
+ * '\n' - new line
+ * '\r' - carriage return
+ * '\t' - horizontal tab
+ * '\v' - vertical tab
+ * %UNESCAPE_OCTAL:
+ * '\NNN' - byte with octal value NNN (1 to 3 digits)
+ * %UNESCAPE_HEX:
+ * '\xHH' - byte with hexadecimal value HH (1 to 2 digits)
+ * %UNESCAPE_SPECIAL:
+ * '\"' - double quote
+ * '\\' - backslash
+ * '\a' - alert (BEL)
+ * '\e' - escape
+ * %UNESCAPE_ANY:
+ * all previous together
+ *
+ * Description:
+ * The function unquotes characters in the given string.
+ *
+ * Because the size of the output will be the same as or less than the size of
+ * the input, the transformation may be performed in place.
+ *
+ * Caller must provide valid source and destination pointers. Be aware that
+ * destination buffer will always be NULL-terminated. Source string must be
+ * NULL-terminated as well.
+ *
+ * Return:
+ * The amount of the characters processed to the destination buffer excluding
+ * trailing '\0' is returned.
+ */
int string_unescape(char *src, char *dst, size_t size, unsigned int flags)
{
char *out = dst;
@@ -202,3 +242,275 @@ int string_unescape(char *src, char *dst, size_t size, unsigned int flags)
return out - dst;
}
EXPORT_SYMBOL(string_unescape);
+
+static int escape_passthrough(unsigned char c, char **dst, size_t *osz)
+{
+ char *out = *dst;
+
+ if (*osz < 1)
+ return -ENOMEM;
+
+ *out++ = c;
+
+ *dst = out;
+ *osz -= 1;
+
+ return 1;
+}
+
+static int escape_space(unsigned char c, char **dst, size_t *osz)
+{
+ char *out = *dst;
+ unsigned char to;
+
+ if (*osz < 2)
+ return -ENOMEM;
+
+ switch (c) {
+ case '\n':
+ to = 'n';
+ break;
+ case '\r':
+ to = 'r';
+ break;
+ case '\t':
+ to = 't';
+ break;
+ case '\v':
+ to = 'v';
+ break;
+ case '\f':
+ to = 'f';
+ break;
+ default:
+ return 0;
+ }
+
+ *out++ = '\\';
+ *out++ = to;
+
+ *dst = out;
+ *osz -= 2;
+
+ return 1;
+}
+
+static int escape_special(unsigned char c, char **dst, size_t *osz)
+{
+ char *out = *dst;
+ unsigned char to;
+
+ if (*osz < 2)
+ return -ENOMEM;
+
+ switch (c) {
+ case '\\':
+ to = '\\';
+ break;
+ case '\a':
+ to = 'a';
+ break;
+ case '\e':
+ to = 'e';
+ break;
+ default:
+ return 0;
+ }
+
+ *out++ = '\\';
+ *out++ = to;
+
+ *dst = out;
+ *osz -= 2;
+
+ return 1;
+}
+
+static int escape_null(unsigned char c, char **dst, size_t *osz)
+{
+ char *out = *dst;
+
+ if (*osz < 2)
+ return -ENOMEM;
+
+ if (c)
+ return 0;
+
+ *out++ = '\\';
+ *out++ = '0';
+
+ *dst = out;
+ *osz -= 2;
+
+ return 1;
+}
+
+static int escape_octal(unsigned char c, char **dst, size_t *osz)
+{
+ char *out = *dst;
+
+ if (*osz < 4)
+ return -ENOMEM;
+
+ *out++ = '\\';
+ *out++ = ((c >> 6) & 0x07) + '0';
+ *out++ = ((c >> 3) & 0x07) + '0';
+ *out++ = ((c >> 0) & 0x07) + '0';
+
+ *dst = out;
+ *osz -= 4;
+
+ return 1;
+}
+
+static int escape_hex(unsigned char c, char **dst, size_t *osz)
+{
+ char *out = *dst;
+
+ if (*osz < 4)
+ return -ENOMEM;
+
+ *out++ = '\\';
+ *out++ = 'x';
+ *out++ = hex_asc_hi(c);
+ *out++ = hex_asc_lo(c);
+
+ *dst = out;
+ *osz -= 4;
+
+ return 1;
+}
+
+/**
+ * string_escape_mem - quote characters in the given memory buffer
+ * @src: source buffer (unescaped)
+ * @isz: source buffer size
+ * @dst: destination buffer (escaped)
+ * @osz: destination buffer size
+ * @flags: combination of the flags (bitwise OR):
+ * %ESCAPE_SPACE:
+ * '\f' - form feed
+ * '\n' - new line
+ * '\r' - carriage return
+ * '\t' - horizontal tab
+ * '\v' - vertical tab
+ * %ESCAPE_SPECIAL:
+ * '\\' - backslash
+ * '\a' - alert (BEL)
+ * '\e' - escape
+ * %ESCAPE_NULL:
+ * '\0' - null
+ * %ESCAPE_OCTAL:
+ * '\NNN' - byte with octal value NNN (3 digits)
+ * %ESCAPE_ANY:
+ * all previous together
+ * %ESCAPE_NP:
+ * escape only non-printable characters (checked by isprint)
+ * %ESCAPE_ANY_NP:
+ * all previous together
+ * %ESCAPE_HEX:
+ * '\xHH' - byte with hexadecimal value HH (2 digits)
+ * @esc: NULL-terminated string of characters any of which, if found in
+ * the source, has to be escaped
+ *
+ * Description:
+ * The process of escaping byte buffer includes several parts. They are applied
+ * in the following sequence.
+ * 1. The character is matched to the printable class, if asked, and in
+ * case of match it passes through to the output.
+ * 2. The character is not matched to the one from @esc string and thus
+ * must go as is to the output.
+ * 3. The character is checked if it falls into the class given by @flags.
+ * %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any
+ * character. Note that they actually can't go together, otherwise
+ * %ESCAPE_HEX will be ignored.
+ *
+ * Caller must provide valid source and destination pointers. Be aware that
+ * destination buffer will not be NULL-terminated, thus caller have to append
+ * it if needs.
+ *
+ * Return:
+ * The amount of the characters processed to the destination buffer, or
+ * %-ENOMEM if the size of buffer is not enough to put an escaped character is
+ * returned.
+ *
+ * Even in the case of error @dst pointer will be updated to point to the byte
+ * after the last processed character.
+ */
+int string_escape_mem(const char *src, size_t isz, char **dst, size_t osz,
+ unsigned int flags, const char *esc)
+{
+ char *out = *dst, *p = out;
+ bool is_dict = esc && *esc;
+ int ret = 0;
+
+ while (isz--) {
+ unsigned char c = *src++;
+
+ /*
+ * Apply rules in the following sequence:
+ * - the character is printable, when @flags has
+ * %ESCAPE_NP bit set
+ * - the @esc string is supplied and does not contain a
+ * character under question
+ * - the character doesn't fall into a class of symbols
+ * defined by given @flags
+ * In these cases we just pass through a character to the
+ * output buffer.
+ */
+ if ((flags & ESCAPE_NP && isprint(c)) ||
+ (is_dict && !strchr(esc, c))) {
+ /* do nothing */
+ } else {
+ if (flags & ESCAPE_SPACE) {
+ ret = escape_space(c, &p, &osz);
+ if (ret < 0)
+ break;
+ if (ret > 0)
+ continue;
+ }
+
+ if (flags & ESCAPE_SPECIAL) {
+ ret = escape_special(c, &p, &osz);
+ if (ret < 0)
+ break;
+ if (ret > 0)
+ continue;
+ }
+
+ if (flags & ESCAPE_NULL) {
+ ret = escape_null(c, &p, &osz);
+ if (ret < 0)
+ break;
+ if (ret > 0)
+ continue;
+ }
+
+ /* ESCAPE_OCTAL and ESCAPE_HEX always go last */
+ if (flags & ESCAPE_OCTAL) {
+ ret = escape_octal(c, &p, &osz);
+ if (ret < 0)
+ break;
+ continue;
+ }
+ if (flags & ESCAPE_HEX) {
+ ret = escape_hex(c, &p, &osz);
+ if (ret < 0)
+ break;
+ continue;
+ }
+ }
+
+ ret = escape_passthrough(c, &p, &osz);
+ if (ret < 0)
+ break;
+ }
+
+ *dst = p;
+
+ if (ret < 0)
+ return ret;
+
+ return p - out;
+}
+EXPORT_SYMBOL(string_escape_mem);
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
index 6ac48de04c0e..ab0d30e1e18f 100644
--- a/lib/test-string_helpers.c
+++ b/lib/test-string_helpers.c
@@ -5,11 +5,32 @@
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/string.h>
#include <linux/string_helpers.h>
+static __init bool test_string_check_buf(const char *name, unsigned int flags,
+ char *in, size_t p,
+ char *out_real, size_t q_real,
+ char *out_test, size_t q_test)
+{
+ if (q_real == q_test && !memcmp(out_test, out_real, q_test))
+ return true;
+
+ pr_warn("Test '%s' failed: flags = %u\n", name, flags);
+
+ print_hex_dump(KERN_WARNING, "Input: ", DUMP_PREFIX_NONE, 16, 1,
+ in, p, true);
+ print_hex_dump(KERN_WARNING, "Expected: ", DUMP_PREFIX_NONE, 16, 1,
+ out_test, q_test, true);
+ print_hex_dump(KERN_WARNING, "Got: ", DUMP_PREFIX_NONE, 16, 1,
+ out_real, q_real, true);
+
+ return false;
+}
+
struct test_string {
const char *in;
const char *out;
@@ -39,12 +60,17 @@ static const struct test_string strings[] __initconst = {
},
};
-static void __init test_string_unescape(unsigned int flags, bool inplace)
+static void __init test_string_unescape(const char *name, unsigned int flags,
+ bool inplace)
{
- char in[256];
- char out_test[256];
- char out_real[256];
- int i, p = 0, q_test = 0, q_real = sizeof(out_real);
+ int q_real = 256;
+ char *in = kmalloc(q_real, GFP_KERNEL);
+ char *out_test = kmalloc(q_real, GFP_KERNEL);
+ char *out_real = kmalloc(q_real, GFP_KERNEL);
+ int i, p = 0, q_test = 0;
+
+ if (!in || !out_test || !out_real)
+ goto out;
for (i = 0; i < ARRAY_SIZE(strings); i++) {
const char *s = strings[i].in;
@@ -77,15 +103,225 @@ static void __init test_string_unescape(unsigned int flags, bool inplace)
q_real = string_unescape(in, out_real, q_real, flags);
}
- if (q_real != q_test || memcmp(out_test, out_real, q_test)) {
- pr_warn("Test failed: flags = %u\n", flags);
- print_hex_dump(KERN_WARNING, "Input: ",
- DUMP_PREFIX_NONE, 16, 1, in, p - 1, true);
- print_hex_dump(KERN_WARNING, "Expected: ",
- DUMP_PREFIX_NONE, 16, 1, out_test, q_test, true);
- print_hex_dump(KERN_WARNING, "Got: ",
- DUMP_PREFIX_NONE, 16, 1, out_real, q_real, true);
+ test_string_check_buf(name, flags, in, p - 1, out_real, q_real,
+ out_test, q_test);
+out:
+ kfree(out_real);
+ kfree(out_test);
+ kfree(in);
+}
+
+struct test_string_1 {
+ const char *out;
+ unsigned int flags;
+};
+
+#define TEST_STRING_2_MAX_S1 32
+struct test_string_2 {
+ const char *in;
+ struct test_string_1 s1[TEST_STRING_2_MAX_S1];
+};
+
+#define TEST_STRING_2_DICT_0 NULL
+static const struct test_string_2 escape0[] __initconst = {{
+ .in = "\f\\ \n\r\t\v",
+ .s1 = {{
+ .out = "\\f\\ \\n\\r\\t\\v",
+ .flags = ESCAPE_SPACE,
+ },{
+ .out = "\\f\\134\\040\\n\\r\\t\\v",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL,
+ },{
+ .out = "\\f\\x5c\\x20\\n\\r\\t\\v",
+ .flags = ESCAPE_SPACE | ESCAPE_HEX,
+ },{
+ /* terminator */
+ }},
+},{
+ .in = "\\h\\\"\a\e\\",
+ .s1 = {{
+ .out = "\\\\h\\\\\"\\a\\e\\\\",
+ .flags = ESCAPE_SPECIAL,
+ },{
+ .out = "\\\\\\150\\\\\\042\\a\\e\\\\",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL,
+ },{
+ .out = "\\\\\\x68\\\\\\x22\\a\\e\\\\",
+ .flags = ESCAPE_SPECIAL | ESCAPE_HEX,
+ },{
+ /* terminator */
+ }},
+},{
+ .in = "\eb \\C\007\"\x90\r]",
+ .s1 = {{
+ .out = "\eb \\C\007\"\x90\\r]",
+ .flags = ESCAPE_SPACE,
+ },{
+ .out = "\\eb \\\\C\\a\"\x90\r]",
+ .flags = ESCAPE_SPECIAL,
+ },{
+ .out = "\\eb \\\\C\\a\"\x90\\r]",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL,
+ },{
+ .out = "\\033\\142\\040\\134\\103\\007\\042\\220\\015\\135",
+ .flags = ESCAPE_OCTAL,
+ },{
+ .out = "\\033\\142\\040\\134\\103\\007\\042\\220\\r\\135",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL,
+ },{
+ .out = "\\e\\142\\040\\\\\\103\\a\\042\\220\\015\\135",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL,
+ },{
+ .out = "\\e\\142\\040\\\\\\103\\a\\042\\220\\r\\135",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_OCTAL,
+ },{
+ .out = "\eb \\C\007\"\x90\r]",
+ .flags = ESCAPE_NP,
+ },{
+ .out = "\eb \\C\007\"\x90\\r]",
+ .flags = ESCAPE_SPACE | ESCAPE_NP,
+ },{
+ .out = "\\eb \\C\\a\"\x90\r]",
+ .flags = ESCAPE_SPECIAL | ESCAPE_NP,
+ },{
+ .out = "\\eb \\C\\a\"\x90\\r]",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NP,
+ },{
+ .out = "\\033b \\C\\007\"\\220\\015]",
+ .flags = ESCAPE_OCTAL | ESCAPE_NP,
+ },{
+ .out = "\\033b \\C\\007\"\\220\\r]",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NP,
+ },{
+ .out = "\\eb \\C\\a\"\\220\\r]",
+ .flags = ESCAPE_SPECIAL | ESCAPE_SPACE | ESCAPE_OCTAL |
+ ESCAPE_NP,
+ },{
+ .out = "\\x1bb \\C\\x07\"\\x90\\x0d]",
+ .flags = ESCAPE_NP | ESCAPE_HEX,
+ },{
+ /* terminator */
+ }},
+},{
+ /* terminator */
+}};
+
+#define TEST_STRING_2_DICT_1 "b\\ \t\r"
+static const struct test_string_2 escape1[] __initconst = {{
+ .in = "\f\\ \n\r\t\v",
+ .s1 = {{
+ .out = "\f\\134\\040\n\\015\\011\v",
+ .flags = ESCAPE_OCTAL,
+ },{
+ .out = "\f\\x5c\\x20\n\\x0d\\x09\v",
+ .flags = ESCAPE_HEX,
+ },{
+ /* terminator */
+ }},
+},{
+ .in = "\\h\\\"\a\e\\",
+ .s1 = {{
+ .out = "\\134h\\134\"\a\e\\134",
+ .flags = ESCAPE_OCTAL,
+ },{
+ /* terminator */
+ }},
+},{
+ .in = "\eb \\C\007\"\x90\r]",
+ .s1 = {{
+ .out = "\e\\142\\040\\134C\007\"\x90\\015]",
+ .flags = ESCAPE_OCTAL,
+ },{
+ /* terminator */
+ }},
+},{
+ /* terminator */
+}};
+
+static __init const char *test_string_find_match(const struct test_string_2 *s2,
+ unsigned int flags)
+{
+ const struct test_string_1 *s1 = s2->s1;
+ unsigned int i;
+
+ if (!flags)
+ return s2->in;
+
+ /* Test cases are NULL-aware */
+ flags &= ~ESCAPE_NULL;
+
+ /* ESCAPE_OCTAL has a higher priority */
+ if (flags & ESCAPE_OCTAL)
+ flags &= ~ESCAPE_HEX;
+
+ for (i = 0; i < TEST_STRING_2_MAX_S1 && s1->out; i++, s1++)
+ if (s1->flags == flags)
+ return s1->out;
+ return NULL;
+}
+
+static __init void test_string_escape(const char *name,
+ const struct test_string_2 *s2,
+ unsigned int flags, const char *esc)
+{
+ int q_real = 512;
+ char *out_test = kmalloc(q_real, GFP_KERNEL);
+ char *out_real = kmalloc(q_real, GFP_KERNEL);
+ char *in = kmalloc(256, GFP_KERNEL);
+ char *buf = out_real;
+ int p = 0, q_test = 0;
+
+ if (!out_test || !out_real || !in)
+ goto out;
+
+ for (; s2->in; s2++) {
+ const char *out;
+ int len;
+
+ /* NULL injection */
+ if (flags & ESCAPE_NULL) {
+ in[p++] = '\0';
+ out_test[q_test++] = '\\';
+ out_test[q_test++] = '0';
+ }
+
+ /* Don't try strings that have no output */
+ out = test_string_find_match(s2, flags);
+ if (!out)
+ continue;
+
+ /* Copy string to in buffer */
+ len = strlen(s2->in);
+ memcpy(&in[p], s2->in, len);
+ p += len;
+
+ /* Copy expected result for given flags */
+ len = strlen(out);
+ memcpy(&out_test[q_test], out, len);
+ q_test += len;
}
+
+ q_real = string_escape_mem(in, p, &buf, q_real, flags, esc);
+
+ test_string_check_buf(name, flags, in, p, out_real, q_real, out_test,
+ q_test);
+out:
+ kfree(in);
+ kfree(out_real);
+ kfree(out_test);
+}
+
+static __init void test_string_escape_nomem(void)
+{
+ char *in = "\eb \\C\007\"\x90\r]";
+ char out[64], *buf = out;
+ int rc = -ENOMEM, ret;
+
+ ret = string_escape_str_any_np(in, &buf, strlen(in), NULL);
+ if (ret == rc)
+ return;
+
+ pr_err("Test 'escape nomem' failed: got %d instead of %d\n", ret, rc);
}
static int __init test_string_helpers_init(void)
@@ -94,8 +330,19 @@ static int __init test_string_helpers_init(void)
pr_info("Running tests...\n");
for (i = 0; i < UNESCAPE_ANY + 1; i++)
- test_string_unescape(i, false);
- test_string_unescape(get_random_int() % (UNESCAPE_ANY + 1), true);
+ test_string_unescape("unescape", i, false);
+ test_string_unescape("unescape inplace",
+ get_random_int() % (UNESCAPE_ANY + 1), true);
+
+ /* Without dictionary */
+ for (i = 0; i < (ESCAPE_ANY_NP | ESCAPE_HEX) + 1; i++)
+ test_string_escape("escape 0", escape0, i, TEST_STRING_2_DICT_0);
+
+ /* With dictionary */
+ for (i = 0; i < (ESCAPE_ANY_NP | ESCAPE_HEX) + 1; i++)
+ test_string_escape("escape 1", escape1, i, TEST_STRING_2_DICT_1);
+
+ test_string_escape_nomem();
return -EINVAL;
}
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 89e0345733bd..23e070bcf72d 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1342,6 +1342,44 @@ static struct bpf_test tests[] = {
{ { 0, -1 } }
},
{
+ "INT: shifts by register",
+ .u.insns_int = {
+ BPF_MOV64_IMM(R0, -1234),
+ BPF_MOV64_IMM(R1, 1),
+ BPF_ALU32_REG(BPF_RSH, R0, R1),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0x7ffffd97, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R2, 1),
+ BPF_ALU64_REG(BPF_LSH, R0, R2),
+ BPF_MOV32_IMM(R4, -1234),
+ BPF_JMP_REG(BPF_JEQ, R0, R4, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_AND, R4, 63),
+ BPF_ALU64_REG(BPF_LSH, R0, R4), /* R0 <= 46 */
+ BPF_MOV64_IMM(R3, 47),
+ BPF_ALU64_REG(BPF_ARSH, R0, R3),
+ BPF_JMP_IMM(BPF_JEQ, R0, -617, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R2, 1),
+ BPF_ALU64_REG(BPF_LSH, R4, R2), /* R4 = 46 << 1 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 92, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R4, 4),
+ BPF_ALU64_REG(BPF_LSH, R4, R4), /* R4 = 4 << 4 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 64, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R4, 5),
+ BPF_ALU32_REG(BPF_LSH, R4, R4), /* R4 = 5 << 5 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 160, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
"INT: DIV + ABS",
.u.insns_int = {
BPF_ALU64_REG(BPF_MOV, R6, R1),
@@ -1697,6 +1735,27 @@ static struct bpf_test tests[] = {
{ },
{ { 1, 0 } },
},
+ {
+ "load 64-bit immediate",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x567800001234LL),
+ BPF_MOV64_REG(R2, R1),
+ BPF_MOV64_REG(R3, R2),
+ BPF_ALU64_IMM(BPF_RSH, R2, 32),
+ BPF_ALU64_IMM(BPF_LSH, R3, 32),
+ BPF_ALU64_IMM(BPF_RSH, R3, 32),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R2, 0x5678, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
};
static struct net_device dev;
@@ -1798,7 +1857,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
break;
case INTERNAL:
- fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
+ fp = bpf_prog_alloc(bpf_prog_size(flen), 0);
if (fp == NULL) {
pr_cont("UNEXPECTED_FAIL no memory left\n");
*err = -ENOMEM;
@@ -1835,7 +1894,7 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
int runs, u64 *duration)
{
u64 start, finish;
- int ret, i;
+ int ret = 0, i;
start = ktime_to_us(ktime_get());
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 0c7e9ab2d88f..0b79908dfe89 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -249,9 +249,7 @@ EXPORT_SYMBOL(textsearch_find_continuous);
* @flags: search flags
*
* Looks up the search algorithm module and creates a new textsearch
- * configuration for the specified pattern. Upon completion all
- * necessary refcnts are held and the configuration must be put back
- * using textsearch_put() after usage.
+ * configuration for the specified pattern.
*
* Note: The format of the pattern may not be compatible between
* the various search algorithms.
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 6fe2c84eb055..ec337f64f52d 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -33,6 +33,7 @@
#include <asm/page.h> /* for PAGE_SIZE */
#include <asm/sections.h> /* for dereference_function_descriptor() */
+#include <linux/string_helpers.h>
#include "kstrtox.h"
/**
@@ -1101,6 +1102,62 @@ char *ip4_addr_string_sa(char *buf, char *end, const struct sockaddr_in *sa,
}
static noinline_for_stack
+char *escaped_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
+ const char *fmt)
+{
+ bool found = true;
+ int count = 1;
+ unsigned int flags = 0;
+ int len;
+
+ if (spec.field_width == 0)
+ return buf; /* nothing to print */
+
+ if (ZERO_OR_NULL_PTR(addr))
+ return string(buf, end, NULL, spec); /* NULL pointer */
+
+
+ do {
+ switch (fmt[count++]) {
+ case 'a':
+ flags |= ESCAPE_ANY;
+ break;
+ case 'c':
+ flags |= ESCAPE_SPECIAL;
+ break;
+ case 'h':
+ flags |= ESCAPE_HEX;
+ break;
+ case 'n':
+ flags |= ESCAPE_NULL;
+ break;
+ case 'o':
+ flags |= ESCAPE_OCTAL;
+ break;
+ case 'p':
+ flags |= ESCAPE_NP;
+ break;
+ case 's':
+ flags |= ESCAPE_SPACE;
+ break;
+ default:
+ found = false;
+ break;
+ }
+ } while (found);
+
+ if (!flags)
+ flags = ESCAPE_ANY_NP;
+
+ len = spec.field_width < 0 ? 1 : spec.field_width;
+
+ /* Ignore the error. We print as many characters as we can */
+ string_escape_mem(addr, len, &buf, end - buf, flags, NULL);
+
+ return buf;
+}
+
+static noinline_for_stack
char *uuid_string(char *buf, char *end, const u8 *addr,
struct printf_spec spec, const char *fmt)
{
@@ -1221,6 +1278,17 @@ int kptr_restrict __read_mostly;
* - '[Ii][4S][hnbl]' IPv4 addresses in host, network, big or little endian order
* - 'I[6S]c' for IPv6 addresses printed as specified by
* http://tools.ietf.org/html/rfc5952
+ * - 'E[achnops]' For an escaped buffer, where rules are defined by combination
+ * of the following flags (see string_escape_mem() for the
+ * details):
+ * a - ESCAPE_ANY
+ * c - ESCAPE_SPECIAL
+ * h - ESCAPE_HEX
+ * n - ESCAPE_NULL
+ * o - ESCAPE_OCTAL
+ * p - ESCAPE_NP
+ * s - ESCAPE_SPACE
+ * By default ESCAPE_ANY_NP is used.
* - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
* "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
* Options for %pU are:
@@ -1321,6 +1389,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
}}
}
break;
+ case 'E':
+ return escaped_string(buf, end, ptr, spec, fmt);
case 'U':
return uuid_string(buf, end, ptr, spec, fmt);
case 'V':
@@ -1633,6 +1703,7 @@ qualifier:
* %piS depending on sa_family of 'struct sockaddr *' print IPv4/IPv6 address
* %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper
* case.
+ * %*pE[achnops] print an escaped buffer
* %*ph[CDN] a variable-length hex string with a separator (supports up to 64
* bytes of the input)
* %n is ignored
@@ -1937,7 +2008,7 @@ EXPORT_SYMBOL(sprintf);
* @args: Arguments for the format string
*
* The format follows C99 vsnprintf, except %n is ignored, and its argument
- * is skiped.
+ * is skipped.
*
* The return value is the number of words(32bits) which would be generated for
* the given input.