summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug9
-rw-r--r--lib/Makefile1
-rw-r--r--lib/bitmap.c50
-rw-r--r--lib/kstrtox.c6
-rw-r--r--lib/percpu-refcount.c169
-rw-r--r--lib/strncpy_from_user.c2
6 files changed, 145 insertions, 92 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 39d07e754822..33bc56cf60d7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1857,15 +1857,6 @@ config PROVIDE_OHCI1394_DMA_INIT
See Documentation/debugging-via-ohci1394.txt for more information.
-config BUILD_DOCSRC
- bool "Build targets in Documentation/ tree"
- depends on HEADERS_CHECK
- help
- This option attempts to build objects from the source files in the
- kernel Documentation/ tree.
-
- Say N if you are unsure.
-
config DMA_API_DEBUG
bool "Enable debugging of DMA-API usage"
depends on HAVE_DMA_API_DEBUG
diff --git a/lib/Makefile b/lib/Makefile
index f3ca8c0ab634..50144a3aeebd 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -180,6 +180,7 @@ obj-$(CONFIG_IRQ_POLL) += irq_poll.o
obj-$(CONFIG_STACKDEPOT) += stackdepot.o
KASAN_SANITIZE_stackdepot.o := n
+KCOV_INSTRUMENT_stackdepot.o := n
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
fdt_empty_tree.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index eca88087fa8a..0b66f0e5eb6b 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -496,6 +496,11 @@ EXPORT_SYMBOL(bitmap_print_to_pagebuf);
* ranges. Consecutively set bits are shown as two hyphen-separated
* decimal numbers, the smallest and largest bit numbers set in
* the range.
+ * Optionally each range can be postfixed to denote that only parts of it
+ * should be set. The range will divided to groups of specific size.
+ * From each group will be used only defined amount of bits.
+ * Syntax: range:used_size/group_size
+ * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769
*
* Returns 0 on success, -errno on invalid input strings.
* Error values:
@@ -507,16 +512,20 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
int is_user, unsigned long *maskp,
int nmaskbits)
{
- unsigned a, b;
+ unsigned int a, b, old_a, old_b;
+ unsigned int group_size, used_size;
int c, old_c, totaldigits, ndigits;
const char __user __force *ubuf = (const char __user __force *)buf;
- int at_start, in_range;
+ int at_start, in_range, in_partial_range;
totaldigits = c = 0;
+ old_a = old_b = 0;
+ group_size = used_size = 0;
bitmap_zero(maskp, nmaskbits);
do {
at_start = 1;
in_range = 0;
+ in_partial_range = 0;
a = b = 0;
ndigits = totaldigits;
@@ -547,6 +556,24 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
if ((totaldigits != ndigits) && isspace(old_c))
return -EINVAL;
+ if (c == '/') {
+ used_size = a;
+ at_start = 1;
+ in_range = 0;
+ a = b = 0;
+ continue;
+ }
+
+ if (c == ':') {
+ old_a = a;
+ old_b = b;
+ at_start = 1;
+ in_range = 0;
+ in_partial_range = 1;
+ a = b = 0;
+ continue;
+ }
+
if (c == '-') {
if (at_start || in_range)
return -EINVAL;
@@ -567,15 +594,30 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
}
if (ndigits == totaldigits)
continue;
+ if (in_partial_range) {
+ group_size = a;
+ a = old_a;
+ b = old_b;
+ old_a = old_b = 0;
+ }
/* if no digit is after '-', it's wrong*/
if (at_start && in_range)
return -EINVAL;
- if (!(a <= b))
+ if (!(a <= b) || !(used_size <= group_size))
return -EINVAL;
if (b >= nmaskbits)
return -ERANGE;
while (a <= b) {
- set_bit(a, maskp);
+ if (in_partial_range) {
+ static int pos_in_group = 1;
+
+ if (pos_in_group <= used_size)
+ set_bit(a, maskp);
+
+ if (a == b || ++pos_in_group > group_size)
+ pos_in_group = 1;
+ } else
+ set_bit(a, maskp);
a++;
}
} while (buflen && c == ',');
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index d8a5cf66c316..b8e2080c1a47 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -48,11 +48,9 @@ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long
{
unsigned long long res;
unsigned int rv;
- int overflow;
res = 0;
rv = 0;
- overflow = 0;
while (*s) {
unsigned int val;
@@ -71,15 +69,13 @@ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long
*/
if (unlikely(res & (~0ull << 60))) {
if (res > div_u64(ULLONG_MAX - val, base))
- overflow = 1;
+ rv |= KSTRTOX_OVERFLOW;
}
res = res * base + val;
rv++;
s++;
}
*p = res;
- if (overflow)
- rv |= KSTRTOX_OVERFLOW;
return rv;
}
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 27fe74948882..9ac959ef4cae 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -33,6 +33,7 @@
#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
+static DEFINE_SPINLOCK(percpu_ref_switch_lock);
static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
@@ -82,6 +83,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
atomic_long_set(&ref->count, start_count);
ref->release = release;
+ ref->confirm_switch = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(percpu_ref_init);
@@ -101,6 +103,8 @@ void percpu_ref_exit(struct percpu_ref *ref)
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
if (percpu_count) {
+ /* non-NULL confirm_switch indicates switching in progress */
+ WARN_ON_ONCE(ref->confirm_switch);
free_percpu(percpu_count);
ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
}
@@ -161,66 +165,23 @@ static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch)
{
- if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) {
- /* switching from percpu to atomic */
- ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
-
- /*
- * Non-NULL ->confirm_switch is used to indicate that
- * switching is in progress. Use noop one if unspecified.
- */
- WARN_ON_ONCE(ref->confirm_switch);
- ref->confirm_switch =
- confirm_switch ?: percpu_ref_noop_confirm_switch;
-
- percpu_ref_get(ref); /* put after confirmation */
- call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
- } else if (confirm_switch) {
- /*
- * Somebody already set ATOMIC. Switching may still be in
- * progress. @confirm_switch must be invoked after the
- * switching is complete and a full sched RCU grace period
- * has passed. Wait synchronously for the previous
- * switching and schedule @confirm_switch invocation.
- */
- wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
- ref->confirm_switch = confirm_switch;
-
- percpu_ref_get(ref); /* put after confirmation */
- call_rcu_sched(&ref->rcu, percpu_ref_call_confirm_rcu);
+ if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
+ if (confirm_switch)
+ confirm_switch(ref);
+ return;
}
-}
-/**
- * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
- * @ref: percpu_ref to switch to atomic mode
- * @confirm_switch: optional confirmation callback
- *
- * There's no reason to use this function for the usual reference counting.
- * Use percpu_ref_kill[_and_confirm]().
- *
- * Schedule switching of @ref to atomic mode. All its percpu counts will
- * be collected to the main atomic counter. On completion, when all CPUs
- * are guaraneed to be in atomic mode, @confirm_switch, which may not
- * block, is invoked. This function may be invoked concurrently with all
- * the get/put operations and can safely be mixed with kill and reinit
- * operations. Note that @ref will stay in atomic mode across kill/reinit
- * cycles until percpu_ref_switch_to_percpu() is called.
- *
- * This function normally doesn't block and can be called from any context
- * but it may block if @confirm_kill is specified and @ref is already in
- * the process of switching to atomic mode. In such cases, @confirm_switch
- * will be invoked after the switching is complete.
- *
- * Due to the way percpu_ref is implemented, @confirm_switch will be called
- * after at least one full sched RCU grace period has passed but this is an
- * implementation detail and must not be depended upon.
- */
-void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
- percpu_ref_func_t *confirm_switch)
-{
- ref->force_atomic = true;
- __percpu_ref_switch_to_atomic(ref, confirm_switch);
+ /* switching from percpu to atomic */
+ ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
+
+ /*
+ * Non-NULL ->confirm_switch is used to indicate that switching is
+ * in progress. Use noop one if unspecified.
+ */
+ ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
+
+ percpu_ref_get(ref); /* put after confirmation */
+ call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
}
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
@@ -233,8 +194,6 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
return;
- wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
-
atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
/*
@@ -250,6 +209,58 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
}
+static void __percpu_ref_switch_mode(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch)
+{
+ lockdep_assert_held(&percpu_ref_switch_lock);
+
+ /*
+ * If the previous ATOMIC switching hasn't finished yet, wait for
+ * its completion. If the caller ensures that ATOMIC switching
+ * isn't in progress, this function can be called from any context.
+ */
+ wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch,
+ percpu_ref_switch_lock);
+
+ if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+ __percpu_ref_switch_to_atomic(ref, confirm_switch);
+ else
+ __percpu_ref_switch_to_percpu(ref);
+}
+
+/**
+ * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
+ * @ref: percpu_ref to switch to atomic mode
+ * @confirm_switch: optional confirmation callback
+ *
+ * There's no reason to use this function for the usual reference counting.
+ * Use percpu_ref_kill[_and_confirm]().
+ *
+ * Schedule switching of @ref to atomic mode. All its percpu counts will
+ * be collected to the main atomic counter. On completion, when all CPUs
+ * are guaraneed to be in atomic mode, @confirm_switch, which may not
+ * block, is invoked. This function may be invoked concurrently with all
+ * the get/put operations and can safely be mixed with kill and reinit
+ * operations. Note that @ref will stay in atomic mode across kill/reinit
+ * cycles until percpu_ref_switch_to_percpu() is called.
+ *
+ * This function may block if @ref is in the process of switching to atomic
+ * mode. If the caller ensures that @ref is not in the process of
+ * switching to atomic mode, this function can be called from any context.
+ */
+void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+
+ ref->force_atomic = true;
+ __percpu_ref_switch_mode(ref, confirm_switch);
+
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
+}
+
/**
* percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
* @ref: percpu_ref to switch to percpu mode
@@ -264,17 +275,20 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
* dying or dead, the actual switching takes place on the following
* percpu_ref_reinit().
*
- * This function normally doesn't block and can be called from any context
- * but it may block if @ref is in the process of switching to atomic mode
- * by percpu_ref_switch_atomic().
+ * This function may block if @ref is in the process of switching to atomic
+ * mode. If the caller ensures that @ref is not in the process of
+ * switching to atomic mode, this function can be called from any context.
*/
void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+
ref->force_atomic = false;
+ __percpu_ref_switch_mode(ref, NULL);
- /* a dying or dead ref can't be switched to percpu mode w/o reinit */
- if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD))
- __percpu_ref_switch_to_percpu(ref);
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
/**
@@ -290,21 +304,23 @@ void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
*
* This function normally doesn't block and can be called from any context
* but it may block if @confirm_kill is specified and @ref is in the
- * process of switching to atomic mode by percpu_ref_switch_atomic().
- *
- * Due to the way percpu_ref is implemented, @confirm_switch will be called
- * after at least one full sched RCU grace period has passed but this is an
- * implementation detail and must not be depended upon.
+ * process of switching to atomic mode by percpu_ref_switch_to_atomic().
*/
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+
WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
"%s called more than once on %pf!", __func__, ref->release);
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
- __percpu_ref_switch_to_atomic(ref, confirm_kill);
+ __percpu_ref_switch_mode(ref, confirm_kill);
percpu_ref_put(ref);
+
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
@@ -321,11 +337,16 @@ EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
*/
void percpu_ref_reinit(struct percpu_ref *ref)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+
WARN_ON_ONCE(!percpu_ref_is_zero(ref));
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
percpu_ref_get(ref);
- if (!ref->force_atomic)
- __percpu_ref_switch_to_percpu(ref);
+ __percpu_ref_switch_mode(ref, NULL);
+
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
EXPORT_SYMBOL_GPL(percpu_ref_reinit);
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 9c5fe8110413..7e35fc450c5b 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -1,6 +1,7 @@
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/kasan-checks.h>
+#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -111,6 +112,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
long retval;
kasan_check_write(dst, count);
+ check_object_size(dst, count, false);
user_access_begin();
retval = do_strncpy_from_user(dst, src, count, max);
user_access_end();