summaryrefslogtreecommitdiff
path: root/recipes-kernel/linux/linux-toradex-rt-3.14.52
diff options
context:
space:
mode:
authorMax Krummenacher <max.krummenacher@toradex.com>2017-07-11 16:17:20 +0200
committerMax Krummenacher <max.krummenacher@toradex.com>2017-07-11 16:21:22 +0200
commitd16f2d4bfbe79d9bfa730134fb37c6447124abd3 (patch)
treeac244230b6d04730f88f36045d4f87618f99b6e3 /recipes-kernel/linux/linux-toradex-rt-3.14.52
parentafa46eed44a4576e69ee4921c69be57ebb3d867c (diff)
linux-toradex-rt-3.14.52: add patches to metadatajethro-next
The checksums of the files fetched from Github's linux-fslc repository changed. Rather than fixing the checksum provide the patches from the meta data. Signed-off-by: Max Krummenacher <max.krummenacher@toradex.com>
Diffstat (limited to 'recipes-kernel/linux/linux-toradex-rt-3.14.52')
-rw-r--r--recipes-kernel/linux/linux-toradex-rt-3.14.52/cond_resched.patch186
-rw-r--r--recipes-kernel/linux/linux-toradex-rt-3.14.52/rename_define.patch59
2 files changed, 245 insertions, 0 deletions
diff --git a/recipes-kernel/linux/linux-toradex-rt-3.14.52/cond_resched.patch b/recipes-kernel/linux/linux-toradex-rt-3.14.52/cond_resched.patch
new file mode 100644
index 0000000..1b9524a
--- /dev/null
+++ b/recipes-kernel/linux/linux-toradex-rt-3.14.52/cond_resched.patch
@@ -0,0 +1,186 @@
+From e211cb68dd3c951b104ff0b47dbaed2c8b8d2399 Mon Sep 17 00:00:00 2001
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Date: Wed, 15 Jul 2015 12:52:04 +0300
+Subject: [PATCH] sched/preempt: Fix cond_resched_lock() and
+ cond_resched_softirq()
+
+commit fe32d3cd5e8eb0f82e459763374aa80797023403 upstream.
+
+These functions check should_resched() before unlocking spinlock/bh-enable:
+preempt_count always non-zero => should_resched() always returns false.
+cond_resched_lock() worked iff spin_needbreak is set.
+
+This patch adds argument "preempt_offset" to should_resched().
+
+preempt_count offset constants for that:
+
+ PREEMPT_DISABLE_OFFSET - offset after preempt_disable()
+ PREEMPT_LOCK_OFFSET - offset after spin_lock()
+ SOFTIRQ_DISABLE_OFFSET - offset after local_bh_distable()
+ SOFTIRQ_LOCK_OFFSET - offset after spin_lock_bh()
+
+Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Alexander Graf <agraf@suse.de>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: David Vrabel <david.vrabel@citrix.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: bdb438065890 ("sched: Extract the basic add/sub preempt_count modifiers")
+Link: http://lkml.kernel.org/r/20150715095204.12246.98268.stgit@buzz
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/preempt.h | 4 ++--
+ include/asm-generic/preempt.h | 5 +++--
+ include/linux/preempt.h | 5 +++--
+ include/linux/preempt_mask.h | 14 +++++++++++---
+ include/linux/sched.h | 6 ------
+ kernel/sched/core.c | 6 +++---
+ 6 files changed, 22 insertions(+), 18 deletions(-)
+
+diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
+index b39e194f6c8d1..999b4a3e65f58 100644
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -105,9 +105,9 @@ static __always_inline bool __preempt_count_dec_and_test(void)
+ /*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+-static __always_inline bool should_resched(void)
++static __always_inline bool should_resched(int preempt_offset)
+ {
+- return unlikely(!__this_cpu_read_4(__preempt_count));
++ return unlikely(__this_cpu_read_4(__preempt_count) == preempt_offset);
+ }
+
+ #ifdef CONFIG_PREEMPT
+diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
+index 1cd3f5d767a81..54352f4dde1a4 100644
+--- a/include/asm-generic/preempt.h
++++ b/include/asm-generic/preempt.h
+@@ -74,9 +74,10 @@ static __always_inline bool __preempt_count_dec_and_test(void)
+ /*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+-static __always_inline bool should_resched(void)
++static __always_inline bool should_resched(int preempt_offset)
+ {
+- return unlikely(!preempt_count() && tif_need_resched());
++ return unlikely(preempt_count() == preempt_offset &&
++ tif_need_resched());
+ }
+
+ #ifdef CONFIG_PREEMPT
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 1841b58cf1734..411a5c6371da8 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -22,7 +22,8 @@
+ #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
+ extern void preempt_count_add(int val);
+ extern void preempt_count_sub(int val);
+-#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
++#define preempt_count_dec_and_test() \
++ ({ preempt_count_sub(1); should_resched(0); })
+ #else
+ #define preempt_count_add(val) __preempt_count_add(val)
+ #define preempt_count_sub(val) __preempt_count_sub(val)
+@@ -61,7 +62,7 @@ do { \
+
+ #define preempt_check_resched() \
+ do { \
+- if (should_resched()) \
++ if (should_resched(0)) \
+ __preempt_schedule(); \
+ } while (0)
+
+diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
+index 1f654ee836b7d..5cb25f17331a3 100644
+--- a/include/linux/preempt_mask.h
++++ b/include/linux/preempt_mask.h
+@@ -71,13 +71,21 @@
+ */
+ #define in_nmi() (preempt_count() & NMI_MASK)
+
++/*
++ * The preempt_count offset after preempt_disable();
++ */
+ #if defined(CONFIG_PREEMPT_COUNT)
+-# define PREEMPT_DISABLE_OFFSET 1
++# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
+ #else
+-# define PREEMPT_DISABLE_OFFSET 0
++# define PREEMPT_DISABLE_OFFSET 0
+ #endif
+
+ /*
++ * The preempt_count offset after spin_lock()
++ */
++#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
++
++/*
+ * The preempt_count offset needed for things like:
+ *
+ * spin_lock_bh()
+@@ -90,7 +98,7 @@
+ *
+ * Work as expected.
+ */
+-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
++#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
+
+ /*
+ * Are we running in atomic context? WARNING: this macro cannot
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 91fe6a38b3076..ec6000f66e75b 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2647,12 +2647,6 @@ extern int _cond_resched(void);
+
+ extern int __cond_resched_lock(spinlock_t *lock);
+
+-#ifdef CONFIG_PREEMPT_COUNT
+-#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
+-#else
+-#define PREEMPT_LOCK_OFFSET 0
+-#endif
+-
+ #define cond_resched_lock(lock) ({ \
+ __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
+ __cond_resched_lock(lock); \
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index a19262a7d70b3..bbe957762ace6 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4113,7 +4113,7 @@ static void __cond_resched(void)
+
+ int __sched _cond_resched(void)
+ {
+- if (should_resched()) {
++ if (should_resched(0)) {
+ __cond_resched();
+ return 1;
+ }
+@@ -4131,7 +4131,7 @@ EXPORT_SYMBOL(_cond_resched);
+ */
+ int __cond_resched_lock(spinlock_t *lock)
+ {
+- int resched = should_resched();
++ int resched = should_resched(PREEMPT_LOCK_OFFSET);
+ int ret = 0;
+
+ lockdep_assert_held(lock);
+@@ -4153,7 +4153,7 @@ int __sched __cond_resched_softirq(void)
+ {
+ BUG_ON(!in_softirq());
+
+- if (should_resched()) {
++ if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
+ local_bh_enable();
+ __cond_resched();
+ local_bh_disable();
diff --git a/recipes-kernel/linux/linux-toradex-rt-3.14.52/rename_define.patch b/recipes-kernel/linux/linux-toradex-rt-3.14.52/rename_define.patch
new file mode 100644
index 0000000..6f5b962
--- /dev/null
+++ b/recipes-kernel/linux/linux-toradex-rt-3.14.52/rename_define.patch
@@ -0,0 +1,59 @@
+From d379e64ca4fc535334a02dc0314cba6e50f4b720 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <fweisbec@gmail.com>
+Date: Tue, 12 May 2015 16:41:48 +0200
+Subject: [PATCH] sched/preempt: Rename PREEMPT_CHECK_OFFSET to
+ PREEMPT_DISABLE_OFFSET
+
+commit 90b62b5129d5cb50f62f40e684de7a1961e57197 upstream.
+
+"CHECK" suggests it's only used as a comparison mask. But now it's used
+further as a config-conditional preempt disabler offset. Lets
+disambiguate this name.
+
+Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1431441711-29753-4-git-send-email-fweisbec@gmail.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/preempt_mask.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
+index dbeec4d4a3bea..1f654ee836b7d 100644
+--- a/include/linux/preempt_mask.h
++++ b/include/linux/preempt_mask.h
+@@ -72,9 +72,9 @@
+ #define in_nmi() (preempt_count() & NMI_MASK)
+
+ #if defined(CONFIG_PREEMPT_COUNT)
+-# define PREEMPT_CHECK_OFFSET 1
++# define PREEMPT_DISABLE_OFFSET 1
+ #else
+-# define PREEMPT_CHECK_OFFSET 0
++# define PREEMPT_DISABLE_OFFSET 0
+ #endif
+
+ /*
+@@ -90,7 +90,7 @@
+ *
+ * Work as expected.
+ */
+-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET)
++#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
+
+ /*
+ * Are we running in atomic context? WARNING: this macro cannot
+@@ -106,7 +106,7 @@
+ * (used by the scheduler, *after* releasing the kernel lock)
+ */
+ #define in_atomic_preempt_off() \
+- ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
++ ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET)
+
+ #ifdef CONFIG_PREEMPT_COUNT
+ # define preemptible() (preempt_count() == 0 && !irqs_disabled())