summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/bug.h18
-rw-r--r--include/asm-generic/cmpxchg-local.h8
-rw-r--r--include/linux/buffer_head.h44
-rw-r--r--include/linux/console.h1
-rw-r--r--include/linux/cpu.h10
-rw-r--r--include/linux/ftrace_event.h3
-rw-r--r--include/linux/hardirq.h16
-rw-r--r--include/linux/hrtimer.h13
-rw-r--r--include/linux/idr.h2
-rw-r--r--include/linux/init_task.h7
-rw-r--r--include/linux/interrupt.h63
-rw-r--r--include/linux/irqflags.h29
-rw-r--r--include/linux/jbd_common.h24
-rw-r--r--include/linux/jump_label.h2
-rw-r--r--include/linux/kdb.h2
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/lglock.h100
-rw-r--r--include/linux/list.h11
-rw-r--r--include/linux/locallock.h230
-rw-r--r--include/linux/mm.h46
-rw-r--r--include/linux/mm_types.h10
-rw-r--r--include/linux/mutex.h21
-rw-r--r--include/linux/mutex_rt.h84
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/of.h2
-rw-r--r--include/linux/page_cgroup.h28
-rw-r--r--include/linux/percpu.h25
-rw-r--r--include/linux/pid.h1
-rw-r--r--include/linux/preempt.h32
-rw-r--r--include/linux/printk.h8
-rw-r--r--include/linux/radix-tree.h8
-rw-r--r--include/linux/rcupdate.h26
-rw-r--r--include/linux/rcutree.h18
-rw-r--r--include/linux/rtmutex.h38
-rw-r--r--include/linux/rwlock_rt.h123
-rw-r--r--include/linux/rwlock_types.h7
-rw-r--r--include/linux/rwlock_types_rt.h33
-rw-r--r--include/linux/rwsem.h6
-rw-r--r--include/linux/rwsem_rt.h105
-rw-r--r--include/linux/sched.h102
-rw-r--r--include/linux/seqlock.h250
-rw-r--r--include/linux/signal.h1
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/smp.h4
-rw-r--r--include/linux/spinlock.h12
-rw-r--r--include/linux/spinlock_api_smp.h4
-rw-r--r--include/linux/spinlock_rt.h166
-rw-r--r--include/linux/spinlock_types.h79
-rw-r--r--include/linux/spinlock_types_nort.h33
-rw-r--r--include/linux/spinlock_types_raw.h56
-rw-r--r--include/linux/spinlock_types_rt.h49
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/linux/sysrq.h5
-rw-r--r--include/linux/timer.h2
-rw-r--r--include/linux/uaccess.h41
-rw-r--r--include/linux/vmstat.h4
-rw-r--r--include/linux/wait-simple.h152
-rw-r--r--include/linux/wait.h5
-rw-r--r--include/linux/workqueue.h5
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/trace/events/hist.h69
-rw-r--r--include/trace/events/latency_hist.h30
62 files changed, 2008 insertions, 277 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 84458b0c38d1..97c1eafe24c3 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -3,6 +3,10 @@
#include <linux/compiler.h>
+#ifndef __ASSEMBLY__
+extern void __WARN_ON(const char *func, const char *file, const int line);
+#endif /* __ASSEMBLY__ */
+
#ifdef CONFIG_BUG
#ifdef CONFIG_GENERIC_BUG
@@ -202,4 +206,18 @@ extern void warn_slowpath_null(const char *file, const int line);
# define WARN_ON_SMP(x) ({0;})
#endif
+#ifdef CONFIG_PREEMPT_RT_BASE
+# define BUG_ON_RT(c) BUG_ON(c)
+# define BUG_ON_NONRT(c) do { } while (0)
+# define WARN_ON_RT(condition) WARN_ON(condition)
+# define WARN_ON_NONRT(condition) do { } while (0)
+# define WARN_ON_ONCE_NONRT(condition) do { } while (0)
+#else
+# define BUG_ON_RT(c) do { } while (0)
+# define BUG_ON_NONRT(c) BUG_ON(c)
+# define WARN_ON_RT(condition) do { } while (0)
+# define WARN_ON_NONRT(condition) WARN_ON(condition)
+# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition)
+#endif
+
#endif
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
index 2533fddd34a6..d8d4c898c1bb 100644
--- a/include/asm-generic/cmpxchg-local.h
+++ b/include/asm-generic/cmpxchg-local.h
@@ -21,7 +21,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
if (size == 8 && sizeof(unsigned long) != 8)
wrong_size_cmpxchg(ptr);
- local_irq_save(flags);
+ raw_local_irq_save(flags);
switch (size) {
case 1: prev = *(u8 *)ptr;
if (prev == old)
@@ -42,7 +42,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
default:
wrong_size_cmpxchg(ptr);
}
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
return prev;
}
@@ -55,11 +55,11 @@ static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
u64 prev;
unsigned long flags;
- local_irq_save(flags);
+ raw_local_irq_save(flags);
prev = *(u64 *)ptr;
if (prev == old)
*(u64 *)ptr = new;
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
return prev;
}
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 458f497738a4..3f8e27b16ed5 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -72,8 +72,52 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
atomic_t b_count; /* users using this buffer_head */
+#ifdef CONFIG_PREEMPT_RT_BASE
+ spinlock_t b_uptodate_lock;
+#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
+ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
+ spinlock_t b_state_lock;
+ spinlock_t b_journal_head_lock;
+#endif
+#endif
};
+static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
+{
+ unsigned long flags;
+
+#ifndef CONFIG_PREEMPT_RT_BASE
+ local_irq_save(flags);
+ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
+#else
+ spin_lock_irqsave(&bh->b_uptodate_lock, flags);
+#endif
+ return flags;
+}
+
+static inline void
+bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
+{
+#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
+ local_irq_restore(flags);
+#else
+ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
+#endif
+}
+
+static inline void buffer_head_init_locks(struct buffer_head *bh)
+{
+#ifdef CONFIG_PREEMPT_RT_BASE
+ spin_lock_init(&bh->b_uptodate_lock);
+#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
+ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
+ spin_lock_init(&bh->b_state_lock);
+ spin_lock_init(&bh->b_journal_head_lock);
+#endif
+#endif
+}
+
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
diff --git a/include/linux/console.h b/include/linux/console.h
index 7453cfd593c8..e5b5dc0c737b 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -133,6 +133,7 @@ struct console {
for (con = console_drivers; con != NULL; con = con->next)
extern int console_set_on_cmdline;
+extern struct console *early_console;
extern int add_preferred_console(char *name, int idx, char *options);
extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 6cb60fd2ea84..72e90bbf625a 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -66,8 +66,10 @@ enum {
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
- /* prepare workqueues for other notifiers */
- CPU_PRI_WORKQUEUE = 5,
+
+ CPU_PRI_WORKQUEUE_ACTIVE = 5, /* prepare workqueues for others */
+ CPU_PRI_NORMAL = 0,
+ CPU_PRI_WORKQUEUE_INACTIVE = -5, /* flush workqueues after others */
};
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
@@ -167,6 +169,8 @@ extern struct sysdev_class cpu_sysdev_class;
extern void get_online_cpus(void);
extern void put_online_cpus(void);
+extern void pin_current_cpu(void);
+extern void unpin_current_cpu(void);
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
@@ -189,6 +193,8 @@ static inline void cpu_hotplug_driver_unlock(void)
#define get_online_cpus() do { } while (0)
#define put_online_cpus() do { } while (0)
+static inline void pin_current_cpu(void) { }
+static inline void unpin_current_cpu(void) { }
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
/* These aren't inline functions due to a GCC bug. */
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index c3da42dd22ba..7c5e1765a0fd 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -49,7 +49,8 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
int pid;
- int padding;
+ unsigned short migrate_disable;
+ unsigned short padding;
};
#define FTRACE_MAX_EVENT \
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f743883f769e..7059ce231a36 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -60,7 +60,11 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+#else
+# define SOFTIRQ_DISABLE_OFFSET (0)
+#endif
#ifndef PREEMPT_ACTIVE
#define PREEMPT_ACTIVE_BITS 1
@@ -73,10 +77,17 @@
#endif
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
| NMI_MASK))
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+#else
+# define softirq_count() (0UL)
+extern int in_serving_softirq(void);
+#endif
+
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
@@ -86,7 +97,6 @@
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
/*
* Are we in NMI context?
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index fd0dc30c9f15..0e3708636ce0 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -111,6 +111,8 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
+ struct list_head cb_entry;
+ int irqsafe;
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
@@ -147,6 +149,7 @@ struct hrtimer_clock_base {
int index;
clockid_t clockid;
struct timerqueue_head active;
+ struct list_head expired;
ktime_t resolution;
ktime_t (*get_time)(void);
ktime_t softirq_time;
@@ -187,6 +190,9 @@ struct hrtimer_cpu_base {
unsigned long nr_hangs;
ktime_t max_hang_time;
#endif
+#ifdef CONFIG_PREEMPT_RT_BASE
+ wait_queue_head_t wait;
+#endif
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
};
@@ -374,6 +380,13 @@ static inline int hrtimer_restart(struct hrtimer *timer)
return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
+/* Softirq preemption could deadlock timer removal */
+#ifdef CONFIG_PREEMPT_RT_BASE
+ extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
+#else
+# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
+#endif
+
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 255491cf522e..4eaacf03e4a6 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -136,7 +136,7 @@ struct ida {
struct ida_bitmap *free_bitmap;
};
-#define IDA_INIT(name) { .idr = IDR_INIT(name), .free_bitmap = NULL, }
+#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 32574eef9394..cfd9f8de4dc2 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -126,6 +126,12 @@ extern struct cred init_cred;
# define INIT_PERF_EVENTS(tsk)
#endif
+#ifdef CONFIG_PREEMPT_RT_BASE
+# define INIT_TIMER_LIST .posix_timer_list = NULL,
+#else
+# define INIT_TIMER_LIST
+#endif
+
#define INIT_TASK_COMM "swapper"
/*
@@ -180,6 +186,7 @@ extern struct cred init_cred;
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
+ INIT_TIMER_LIST \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a64b00e286f5..bb4b441b1860 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -219,7 +219,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
#ifdef CONFIG_LOCKDEP
# define local_irq_enable_in_hardirq() do { } while (0)
#else
-# define local_irq_enable_in_hardirq() local_irq_enable()
+# define local_irq_enable_in_hardirq() local_irq_enable_nort()
#endif
extern void disable_irq_nosync(unsigned int irq);
@@ -396,9 +396,13 @@ static inline int disable_irq_wake(unsigned int irq)
#ifdef CONFIG_IRQ_FORCED_THREADING
-extern bool force_irqthreads;
+# ifndef CONFIG_PREEMPT_RT_BASE
+ extern bool force_irqthreads;
+# else
+# define force_irqthreads (true)
+# endif
#else
-#define force_irqthreads (0)
+#define force_irqthreads (false)
#endif
#ifndef __ARCH_SET_SOFTIRQ_PENDING
@@ -452,8 +456,14 @@ struct softirq_action
void (*action)(struct softirq_action *);
};
+#ifndef CONFIG_PREEMPT_RT_FULL
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
+static inline void thread_do_softirq(void) { do_softirq(); }
+#else
+extern void thread_do_softirq(void);
+#endif
+
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
static inline void __raise_softirq_irqoff(unsigned int nr)
@@ -465,6 +475,8 @@ static inline void __raise_softirq_irqoff(unsigned int nr)
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
+extern void softirq_check_pending_idle(void);
+
/* This is the worklist that queues up per-cpu softirq work.
*
* send_remote_sendirq() adds work to these lists, and
@@ -505,8 +517,9 @@ extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its execution is still not
started, it will be executed only once.
- * If this tasklet is already running on another CPU (or schedule is called
- from tasklet itself), it is rescheduled for later.
+ * If this tasklet is already running on another CPU, it is rescheduled
+ for later.
+ * Schedule must not be called from the tasklet itself (a lockup occurs)
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
@@ -531,27 +544,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
+ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
+ TASKLET_STATE_PENDING /* Tasklet is pending */
};
-#ifdef CONFIG_SMP
+#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
+#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
+#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
+
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
}
+static inline int tasklet_tryunlock(struct tasklet_struct *t)
+{
+ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
+}
+
static inline void tasklet_unlock(struct tasklet_struct *t)
{
smp_mb__before_clear_bit();
clear_bit(TASKLET_STATE_RUN, &(t)->state);
}
-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
-{
- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
-}
+extern void tasklet_unlock_wait(struct tasklet_struct *t);
+
#else
#define tasklet_trylock(t) 1
+#define tasklet_tryunlock(t) 1
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif
@@ -600,17 +622,8 @@ static inline void tasklet_disable(struct tasklet_struct *t)
smp_mb();
}
-static inline void tasklet_enable(struct tasklet_struct *t)
-{
- smp_mb__before_atomic_dec();
- atomic_dec(&t->count);
-}
-
-static inline void tasklet_hi_enable(struct tasklet_struct *t)
-{
- smp_mb__before_atomic_dec();
- atomic_dec(&t->count);
-}
+extern void tasklet_enable(struct tasklet_struct *t);
+extern void tasklet_hi_enable(struct tasklet_struct *t);
extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
@@ -642,6 +655,12 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
tasklet_kill(&ttimer->tasklet);
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+extern void softirq_early_init(void);
+#else
+static inline void softirq_early_init(void) { }
+#endif
+
/*
* Autoprobing for irqs:
*
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index d176d658fe25..a52b35d4229f 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -25,8 +25,6 @@
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
#else
# define trace_hardirqs_on() do { } while (0)
@@ -39,9 +37,15 @@
# define trace_softirqs_enabled(p) 0
# define trace_hardirq_enter() do { } while (0)
# define trace_hardirq_exit() do { } while (0)
+# define INIT_TRACE_IRQFLAGS
+#endif
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
+# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
+# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
+#else
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
-# define INIT_TRACE_IRQFLAGS
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \
@@ -147,4 +151,23 @@
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
+/*
+ * local_irq* variants depending on RT/!RT
+ */
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define local_irq_disable_nort() do { } while (0)
+# define local_irq_enable_nort() do { } while (0)
+# define local_irq_save_nort(flags) do { local_save_flags(flags); } while (0)
+# define local_irq_restore_nort(flags) do { (void)(flags); } while (0)
+# define local_irq_disable_rt() local_irq_disable()
+# define local_irq_enable_rt() local_irq_enable()
+#else
+# define local_irq_disable_nort() local_irq_disable()
+# define local_irq_enable_nort() local_irq_enable()
+# define local_irq_save_nort(flags) local_irq_save(flags)
+# define local_irq_restore_nort(flags) local_irq_restore(flags)
+# define local_irq_disable_rt() do { } while (0)
+# define local_irq_enable_rt() do { } while (0)
+#endif
+
#endif
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
index 6230f8556a4e..11c313e8561d 100644
--- a/include/linux/jbd_common.h
+++ b/include/linux/jbd_common.h
@@ -37,32 +37,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
static inline void jbd_lock_bh_state(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(BH_State, &bh->b_state);
+#else
+ spin_lock(&bh->b_state_lock);
+#endif
}
static inline int jbd_trylock_bh_state(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
return bit_spin_trylock(BH_State, &bh->b_state);
+#else
+ return spin_trylock(&bh->b_state_lock);
+#endif
}
static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
return bit_spin_is_locked(BH_State, &bh->b_state);
+#else
+ return spin_is_locked(&bh->b_state_lock);
+#endif
}
static inline void jbd_unlock_bh_state(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(BH_State, &bh->b_state);
+#else
+ spin_unlock(&bh->b_state_lock);
+#endif
}
static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(BH_JournalHead, &bh->b_state);
+#else
+ spin_lock(&bh->b_journal_head_lock);
+#endif
}
static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(BH_JournalHead, &bh->b_state);
+#else
+ spin_unlock(&bh->b_journal_head_lock);
+#endif
}
#endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 388b0d425b50..9cc8ed96888f 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -4,7 +4,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && !defined(CONFIG_PREEMPT_BASE)
struct jump_label_key {
atomic_t enabled;
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 064725854db8..0d1ebfc9ff43 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -150,12 +150,14 @@ extern int kdb_register(char *, kdb_func_t, char *, char *, short);
extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
short, kdb_repeat_t);
extern int kdb_unregister(char *);
+#define in_kdb_printk() (kdb_trap_printk)
#else /* ! CONFIG_KGDB_KDB */
#define kdb_printf(...)
#define kdb_init(x)
#define kdb_register(...)
#define kdb_register_repeat(...)
#define kdb_uregister(x)
+#define in_kdb_printk() (0)
#endif /* CONFIG_KGDB_KDB */
enum {
KDB_NOT_INITIALIZED,
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e8b1597b5cf2..c485aaa618d6 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -356,7 +356,7 @@ extern enum system_states {
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
- SYSTEM_SUSPEND_DISK,
+ SYSTEM_SUSPEND,
} system_state;
#define TAINT_PROPRIETARY_MODULE 0
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 87f402ccec55..d8acbcc8a2bf 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -71,6 +71,9 @@
extern void name##_global_lock_online(void); \
extern void name##_global_unlock_online(void); \
+
+#ifndef CONFIG_PREEMPT_RT_FULL
+
#define DEFINE_LGLOCK(name) \
\
DEFINE_SPINLOCK(name##_cpu_lock); \
@@ -197,4 +200,101 @@
preempt_enable(); \
} \
EXPORT_SYMBOL(name##_global_unlock);
+
+#else /* !PREEMPT_RT_FULL */
+#define DEFINE_LGLOCK(name) \
+ \
+ DEFINE_PER_CPU(struct rt_mutex, name##_lock); \
+ DEFINE_LGLOCK_LOCKDEP(name); \
+ \
+ void name##_lock_init(void) { \
+ int i; \
+ LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
+ for_each_possible_cpu(i) { \
+ struct rt_mutex *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ rt_mutex_init(lock); \
+ } \
+ } \
+ EXPORT_SYMBOL(name##_lock_init); \
+ \
+ void name##_local_lock(void) { \
+ struct rt_mutex *lock; \
+ migrate_disable(); \
+ rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
+ lock = &__get_cpu_var(name##_lock); \
+ __rt_spin_lock(lock); \
+ } \
+ EXPORT_SYMBOL(name##_local_lock); \
+ \
+ void name##_local_unlock(void) { \
+ struct rt_mutex *lock; \
+ rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
+ lock = &__get_cpu_var(name##_lock); \
+ __rt_spin_unlock(lock); \
+ migrate_enable(); \
+ } \
+ EXPORT_SYMBOL(name##_local_unlock); \
+ \
+ void name##_local_lock_cpu(int cpu) { \
+ struct rt_mutex *lock; \
+ rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
+ lock = &per_cpu(name##_lock, cpu); \
+ __rt_spin_lock(lock); \
+ } \
+ EXPORT_SYMBOL(name##_local_lock_cpu); \
+ \
+ void name##_local_unlock_cpu(int cpu) { \
+ struct rt_mutex *lock; \
+ rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
+ lock = &per_cpu(name##_lock, cpu); \
+ __rt_spin_unlock(lock); \
+ } \
+ EXPORT_SYMBOL(name##_local_unlock_cpu); \
+ \
+ void name##_global_lock_online(void) { \
+ int i; \
+ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
+ for_each_online_cpu(i) { \
+ struct rt_mutex *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ __rt_spin_lock(lock); \
+ } \
+ } \
+ EXPORT_SYMBOL(name##_global_lock_online); \
+ \
+ void name##_global_unlock_online(void) { \
+ int i; \
+ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
+ for_each_online_cpu(i) { \
+ struct rt_mutex *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ __rt_spin_unlock(lock); \
+ } \
+ } \
+ EXPORT_SYMBOL(name##_global_unlock_online); \
+ \
+ void name##_global_lock(void) { \
+ int i; \
+ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
+ for_each_possible_cpu(i) { \
+ struct rt_mutex *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ __rt_spin_lock(lock); \
+ } \
+ } \
+ EXPORT_SYMBOL(name##_global_lock); \
+ \
+ void name##_global_unlock(void) { \
+ int i; \
+ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
+ for_each_possible_cpu(i) { \
+ struct rt_mutex *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ __rt_spin_unlock(lock); \
+ } \
+ } \
+ EXPORT_SYMBOL(name##_global_unlock);
+#endif /* PRREMPT_RT_FULL */
+
#endif
diff --git a/include/linux/list.h b/include/linux/list.h
index cc6d2aa6b415..7a9851bbef04 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -362,6 +362,17 @@ static inline void list_splice_tail_init(struct list_head *list,
list_entry((ptr)->next, type, member)
/**
+ * list_last_entry - get the last element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ *
+ * Note, that list is expected to be not empty.
+ */
+#define list_last_entry(ptr, type, member) \
+ list_entry((ptr)->prev, type, member)
+
+/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
diff --git a/include/linux/locallock.h b/include/linux/locallock.h
new file mode 100644
index 000000000000..8fbc3938638f
--- /dev/null
+++ b/include/linux/locallock.h
@@ -0,0 +1,230 @@
+#ifndef _LINUX_LOCALLOCK_H
+#define _LINUX_LOCALLOCK_H
+
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_PREEMPT_RT_BASE
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define LL_WARN(cond) WARN_ON(cond)
+#else
+# define LL_WARN(cond) do { } while (0)
+#endif
+
+/*
+ * per cpu lock based substitute for local_irq_*()
+ */
+struct local_irq_lock {
+ spinlock_t lock;
+ struct task_struct *owner;
+ int nestcnt;
+ unsigned long flags;
+};
+
+#define DEFINE_LOCAL_IRQ_LOCK(lvar) \
+ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \
+ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
+
+#define local_irq_lock_init(lvar) \
+ do { \
+ int __cpu; \
+ for_each_possible_cpu(__cpu) \
+ spin_lock_init(&per_cpu(lvar, __cpu).lock); \
+ } while (0)
+
+static inline void __local_lock(struct local_irq_lock *lv)
+{
+ if (lv->owner != current) {
+ spin_lock(&lv->lock);
+ LL_WARN(lv->owner);
+ LL_WARN(lv->nestcnt);
+ lv->owner = current;
+ }
+ lv->nestcnt++;
+}
+
+#define local_lock(lvar) \
+ do { __local_lock(&get_local_var(lvar)); } while (0)
+
+static inline int __local_trylock(struct local_irq_lock *lv)
+{
+ if (lv->owner != current && spin_trylock(&lv->lock)) {
+ LL_WARN(lv->owner);
+ LL_WARN(lv->nestcnt);
+ lv->owner = current;
+ lv->nestcnt = 1;
+ return 1;
+ }
+ return 0;
+}
+
+#define local_trylock(lvar) \
+ ({ \
+ int __locked; \
+ __locked = __local_trylock(&get_local_var(lvar)); \
+ if (!__locked) \
+ put_local_var(lvar); \
+ __locked; \
+ })
+
+static inline void __local_unlock(struct local_irq_lock *lv)
+{
+ LL_WARN(lv->nestcnt == 0);
+ LL_WARN(lv->owner != current);
+ if (--lv->nestcnt)
+ return;
+
+ lv->owner = NULL;
+ spin_unlock(&lv->lock);
+}
+
+#define local_unlock(lvar) \
+ do { \
+ __local_unlock(&__get_cpu_var(lvar)); \
+ put_local_var(lvar); \
+ } while (0)
+
+static inline void __local_lock_irq(struct local_irq_lock *lv)
+{
+ spin_lock_irqsave(&lv->lock, lv->flags);
+ LL_WARN(lv->owner);
+ LL_WARN(lv->nestcnt);
+ lv->owner = current;
+ lv->nestcnt = 1;
+}
+
+#define local_lock_irq(lvar) \
+ do { __local_lock_irq(&get_local_var(lvar)); } while (0)
+
+static inline void __local_unlock_irq(struct local_irq_lock *lv)
+{
+ LL_WARN(!lv->nestcnt);
+ LL_WARN(lv->owner != current);
+ lv->owner = NULL;
+ lv->nestcnt = 0;
+ spin_unlock_irq(&lv->lock);
+}
+
+#define local_unlock_irq(lvar) \
+ do { \
+ __local_unlock_irq(&__get_cpu_var(lvar)); \
+ put_local_var(lvar); \
+ } while (0)
+
+static inline int __local_lock_irqsave(struct local_irq_lock *lv)
+{
+ if (lv->owner != current) {
+ __local_lock_irq(lv);
+ return 0;
+ } else {
+ lv->nestcnt++;
+ return 1;
+ }
+}
+
+#define local_lock_irqsave(lvar, _flags) \
+ do { \
+ if (__local_lock_irqsave(&get_local_var(lvar))) \
+ put_local_var(lvar); \
+ _flags = __get_cpu_var(lvar).flags; \
+ } while (0)
+
+static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
+ unsigned long flags)
+{
+ LL_WARN(!lv->nestcnt);
+ LL_WARN(lv->owner != current);
+ if (--lv->nestcnt)
+ return 0;
+
+ lv->owner = NULL;
+ spin_unlock_irqrestore(&lv->lock, lv->flags);
+ return 1;
+}
+
+#define local_unlock_irqrestore(lvar, flags) \
+ do { \
+ if (__local_unlock_irqrestore(&__get_cpu_var(lvar), flags)) \
+ put_local_var(lvar); \
+ } while (0)
+
+#define local_spin_trylock_irq(lvar, lock) \
+ ({ \
+ int __locked; \
+ local_lock_irq(lvar); \
+ __locked = spin_trylock(lock); \
+ if (!__locked) \
+ local_unlock_irq(lvar); \
+ __locked; \
+ })
+
+#define local_spin_lock_irq(lvar, lock) \
+ do { \
+ local_lock_irq(lvar); \
+ spin_lock(lock); \
+ } while (0)
+
+#define local_spin_unlock_irq(lvar, lock) \
+ do { \
+ spin_unlock(lock); \
+ local_unlock_irq(lvar); \
+ } while (0)
+
+#define local_spin_lock_irqsave(lvar, lock, flags) \
+ do { \
+ local_lock_irqsave(lvar, flags); \
+ spin_lock(lock); \
+ } while (0)
+
+#define local_spin_unlock_irqrestore(lvar, lock, flags) \
+ do { \
+ spin_unlock(lock); \
+ local_unlock_irqrestore(lvar, flags); \
+ } while (0)
+
+#define get_locked_var(lvar, var) \
+ (*({ \
+ local_lock(lvar); \
+ &__get_cpu_var(var); \
+ }))
+
+#define put_locked_var(lvar, var) local_unlock(lvar)
+
+#define local_lock_cpu(lvar) \
+ ({ \
+ local_lock(lvar); \
+ smp_processor_id(); \
+ })
+
+#define local_unlock_cpu(lvar) local_unlock(lvar)
+
+#else /* PREEMPT_RT_BASE */
+
+#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar
+
+static inline void local_irq_lock_init(int lvar) { }
+
+#define local_lock(lvar) preempt_disable()
+#define local_unlock(lvar) preempt_enable()
+#define local_lock_irq(lvar) local_irq_disable()
+#define local_unlock_irq(lvar) local_irq_enable()
+#define local_lock_irqsave(lvar, flags) local_irq_save(flags)
+#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags)
+
+#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock)
+#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock)
+#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock)
+#define local_spin_lock_irqsave(lvar, lock, flags) \
+ spin_lock_irqsave(lock, flags)
+#define local_spin_unlock_irqrestore(lvar, lock, flags) \
+ spin_unlock_irqrestore(lock, flags)
+
+#define get_locked_var(lvar, var) get_cpu_var(var)
+#define put_locked_var(lvar, var) put_cpu_var(var)
+
+#define local_lock_cpu(lvar) get_cpu()
+#define local_unlock_cpu(lvar) put_cpu()
+
+#endif
+
+#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 4baadd18f4ad..c9e64e58f0d2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1195,27 +1195,59 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
* overflow into the next struct page (as it might with DEBUG_SPINLOCK).
* When freeing, reset page->mapping so free_pages_check won't complain.
*/
+#ifndef CONFIG_PREEMPT_RT_FULL
+
#define __pte_lockptr(page) &((page)->ptl)
-#define pte_lock_init(_page) do { \
- spin_lock_init(__pte_lockptr(_page)); \
-} while (0)
+
+static inline struct page *pte_lock_init(struct page *page)
+{
+ spin_lock_init(__pte_lockptr(page));
+ return page;
+}
+
#define pte_lock_deinit(page) ((page)->mapping = NULL)
+
+#else /* !PREEMPT_RT_FULL */
+
+/*
+ * On PREEMPT_RT_FULL the spinlock_t's are too large to embed in the
+ * page frame, hence it only has a pointer and we need to dynamically
+ * allocate the lock when we allocate PTE-pages.
+ *
+ * This is an overall win, since only a small fraction of the pages
+ * will be PTE pages under normal circumstances.
+ */
+
+#define __pte_lockptr(page) ((page)->ptl)
+
+extern struct page *pte_lock_init(struct page *page);
+extern void pte_lock_deinit(struct page *page);
+
+#endif /* PREEMPT_RT_FULL */
+
#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
#else /* !USE_SPLIT_PTLOCKS */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
*/
-#define pte_lock_init(page) do {} while (0)
+static inline struct page *pte_lock_init(struct page *page) { return page; }
#define pte_lock_deinit(page) do {} while (0)
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
#endif /* USE_SPLIT_PTLOCKS */
-static inline void pgtable_page_ctor(struct page *page)
+static inline struct page *__pgtable_page_ctor(struct page *page)
{
- pte_lock_init(page);
- inc_zone_page_state(page, NR_PAGETABLE);
+ page = pte_lock_init(page);
+ if (page)
+ inc_zone_page_state(page, NR_PAGETABLE);
+ return page;
}
+#define pgtable_page_ctor(page) \
+do { \
+ page = __pgtable_page_ctor(page); \
+} while (0)
+
static inline void pgtable_page_dtor(struct page *page)
{
pte_lock_deinit(page);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5b42f1b34eb7..c303a27981fe 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -12,6 +12,7 @@
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/page-debug-flags.h>
+#include <linux/rcupdate.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -118,7 +119,11 @@ struct page {
* system if PG_buddy is set.
*/
#if USE_SPLIT_PTLOCKS
- spinlock_t ptl;
+# ifndef CONFIG_PREEMPT_RT_FULL
+ spinlock_t ptl;
+# else
+ spinlock_t *ptl;
+# endif
#endif
struct kmem_cache *slab; /* SLUB: Pointer to slab */
struct page *first_page; /* Compound tail pages */
@@ -389,6 +394,9 @@ struct mm_struct {
#ifdef CONFIG_CPUMASK_OFFSTACK
struct cpumask cpumask_allocation;
#endif
+#ifdef CONFIG_PREEMPT_RT_BASE
+ struct rcu_head delayed_drop;
+#endif
};
static inline void mm_init_cpumask(struct mm_struct *mm)
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 9121595a8ebf..bdf1da2cf4b6 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -17,6 +17,17 @@
#include <linux/atomic.h>
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+ , .dep_map = { .name = #lockname }
+#else
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+#endif
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+# include <linux/mutex_rt.h>
+#else
+
/*
* Simple, straightforward mutexes with strict semantics:
*
@@ -95,13 +106,6 @@ do { \
static inline void mutex_destroy(struct mutex *lock) {}
#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
- , .dep_map = { .name = #lockname }
-#else
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
-#endif
-
#define __MUTEX_INITIALIZER(lockname) \
{ .count = ATOMIC_INIT(1) \
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
@@ -167,6 +171,9 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
*/
extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock);
+
+#endif /* !PREEMPT_RT_FULL */
+
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h
new file mode 100644
index 000000000000..c38a44b14da5
--- /dev/null
+++ b/include/linux/mutex_rt.h
@@ -0,0 +1,84 @@
+#ifndef __LINUX_MUTEX_RT_H
+#define __LINUX_MUTEX_RT_H
+
+#ifndef __LINUX_MUTEX_H
+#error "Please include mutex.h"
+#endif
+
+#include <linux/rtmutex.h>
+
+/* FIXME: Just for __lockfunc */
+#include <linux/spinlock.h>
+
+struct mutex {
+ struct rt_mutex lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#define __MUTEX_INITIALIZER(mutexname) \
+ { \
+ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
+ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
+ }
+
+#define DEFINE_MUTEX(mutexname) \
+ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
+extern void __lockfunc _mutex_lock(struct mutex *lock);
+extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
+extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
+extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
+extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
+extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
+extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
+extern int __lockfunc _mutex_trylock(struct mutex *lock);
+extern void __lockfunc _mutex_unlock(struct mutex *lock);
+
+#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
+#define mutex_lock(l) _mutex_lock(l)
+#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
+#define mutex_lock_killable(l) _mutex_lock_killable(l)
+#define mutex_trylock(l) _mutex_trylock(l)
+#define mutex_unlock(l) _mutex_unlock(l)
+#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
+# define mutex_lock_interruptible_nested(l, s) \
+ _mutex_lock_interruptible_nested(l, s)
+# define mutex_lock_killable_nested(l, s) \
+ _mutex_lock_killable_nested(l, s)
+
+# define mutex_lock_nest_lock(lock, nest_lock) \
+do { \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
+ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+} while (0)
+
+#else
+# define mutex_lock_nested(l, s) _mutex_lock(l)
+# define mutex_lock_interruptible_nested(l, s) \
+ _mutex_lock_interruptible(l)
+# define mutex_lock_killable_nested(l, s) \
+ _mutex_lock_killable(l)
+# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
+#endif
+
+# define mutex_init(mutex) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ rt_mutex_init(&(mutex)->lock); \
+ __mutex_do_init((mutex), #mutex, &__key); \
+} while (0)
+
+# define __mutex_init(mutex, name, key) \
+do { \
+ rt_mutex_init(&(mutex)->lock); \
+ __mutex_do_init((mutex), name, key); \
+} while (0)
+
+#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a82ad4dd306a..a5bde7715be0 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1760,6 +1760,7 @@ struct softnet_data {
unsigned dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
+ struct sk_buff_head tofree_queue;
};
static inline void input_queue_head_incr(struct softnet_data *sd)
diff --git a/include/linux/of.h b/include/linux/of.h
index 4948552d60f5..e87303dfbbfc 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -71,7 +71,7 @@ struct device_node {
extern struct device_node *allnodes;
extern struct device_node *of_chosen;
extern struct device_node *of_aliases;
-extern rwlock_t devtree_lock;
+extern raw_spinlock_t devtree_lock;
static inline bool of_have_populated_dt(void)
{
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 961ecc7d30bc..2927c084ff50 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -30,6 +30,10 @@ enum {
*/
struct page_cgroup {
unsigned long flags;
+#ifdef CONFIG_PREEMPT_RT_BASE
+ spinlock_t pcg_lock;
+ spinlock_t pcm_lock;
+#endif
struct mem_cgroup *mem_cgroup;
struct list_head lru; /* per cgroup LRU list */
};
@@ -96,30 +100,54 @@ static inline void lock_page_cgroup(struct page_cgroup *pc)
* Don't take this lock in IRQ context.
* This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION
*/
+#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(PCG_LOCK, &pc->flags);
+#else
+ spin_lock(&pc->pcg_lock);
+#endif
}
static inline void unlock_page_cgroup(struct page_cgroup *pc)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(PCG_LOCK, &pc->flags);
+#else
+ spin_unlock(&pc->pcg_lock);
+#endif
}
static inline void move_lock_page_cgroup(struct page_cgroup *pc,
unsigned long *flags)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
/*
* We know updates to pc->flags of page cache's stats are from both of
* usual context or IRQ context. Disable IRQ to avoid deadlock.
*/
local_irq_save(*flags);
bit_spin_lock(PCG_MOVE_LOCK, &pc->flags);
+#else
+ spin_lock_irqsave(&pc->pcm_lock, *flags);
+#endif
}
static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
unsigned long *flags)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags);
local_irq_restore(*flags);
+#else
+ spin_unlock_irqrestore(&pc->pcm_lock, *flags);
+#endif
+}
+
+static inline void page_cgroup_lock_init(struct page_cgroup *pc)
+{
+#ifdef CONFIG_PREEMPT_RT_BASE
+ spin_lock_init(&pc->pcg_lock);
+ spin_lock_init(&pc->pcm_lock);
+#endif
}
#ifdef CONFIG_SPARSEMEM
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 9ca008f0c542..58163cd6cca1 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -48,6 +48,31 @@
preempt_enable(); \
} while (0)
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define get_local_var(var) get_cpu_var(var)
+# define put_local_var(var) put_cpu_var(var)
+# define get_local_ptr(var) get_cpu_ptr(var)
+# define put_local_ptr(var) put_cpu_ptr(var)
+#else
+# define get_local_var(var) (*({ \
+ migrate_disable(); \
+ &__get_cpu_var(var); }))
+
+# define put_local_var(var) do { \
+ (void)&(var); \
+ migrate_enable(); \
+} while (0)
+
+# define get_local_ptr(var) ({ \
+ migrate_disable(); \
+ this_cpu_ptr(var); })
+
+# define put_local_ptr(var) do { \
+ (void)(var); \
+ migrate_enable(); \
+} while (0)
+#endif
+
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
diff --git a/include/linux/pid.h b/include/linux/pid.h
index b152d44fb181..7f336833f2c5 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -2,6 +2,7 @@
#define _LINUX_PID_H
#include <linux/rcupdate.h>
+#include <linux/atomic.h>
enum pid_type
{
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 58969b2a8a82..6450c0113b5b 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -48,15 +48,21 @@ do { \
barrier(); \
} while (0)
-#define preempt_enable_no_resched() \
+#define __preempt_enable_no_resched() \
do { \
barrier(); \
dec_preempt_count(); \
} while (0)
+#ifndef CONFIG_PREEMPT_RT_BASE
+# define preempt_enable_no_resched() __preempt_enable_no_resched()
+#else
+# define preempt_enable_no_resched() preempt_enable()
+#endif
+
#define preempt_enable() \
do { \
- preempt_enable_no_resched(); \
+ __preempt_enable_no_resched(); \
barrier(); \
preempt_check_resched(); \
} while (0)
@@ -92,6 +98,7 @@ do { \
#else /* !CONFIG_PREEMPT_COUNT */
#define preempt_disable() do { } while (0)
+#define __preempt_enable_no_resched() do { } while (0)
#define preempt_enable_no_resched() do { } while (0)
#define preempt_enable() do { } while (0)
@@ -101,6 +108,27 @@ do { \
#endif /* CONFIG_PREEMPT_COUNT */
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define preempt_disable_rt() preempt_disable()
+# define preempt_enable_rt() preempt_enable()
+# define preempt_disable_nort() do { } while (0)
+# define preempt_enable_nort() do { } while (0)
+# ifdef CONFIG_SMP
+ extern void migrate_disable(void);
+ extern void migrate_enable(void);
+# else /* CONFIG_SMP */
+# define migrate_disable() do { } while (0)
+# define migrate_enable() do { } while (0)
+# endif /* CONFIG_SMP */
+#else
+# define preempt_disable_rt() do { } while (0)
+# define preempt_enable_rt() do { } while (0)
+# define preempt_disable_nort() preempt_disable()
+# define preempt_enable_nort() preempt_enable()
+# define migrate_disable() preempt_disable()
+# define migrate_enable() preempt_enable()
+#endif
+
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
diff --git a/include/linux/printk.h b/include/linux/printk.h
index f0e22f75143f..a53adf62ce7d 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -88,8 +88,15 @@ int no_printk(const char *fmt, ...)
return 0;
}
+#ifdef CONFIG_EARLY_PRINTK
extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...);
+extern void printk_kill(void);
+#else
+static inline __printf(1, 2) __cold
+void early_printk(const char *s, ...) { }
+static inline void printk_kill(void) { }
+#endif
extern int printk_needs_cpu(int cpu);
extern void printk_tick(void);
@@ -109,7 +116,6 @@ extern int __printk_ratelimit(const char *func);
#define printk_ratelimit() __printk_ratelimit(__func__)
extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
unsigned int interval_msec);
-
extern int printk_delay_msec;
extern int dmesg_restrict;
extern int kptr_restrict;
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 9d4539c52e53..6b4bdf2097dd 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -231,7 +231,13 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan);
unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan);
+
+#ifndef CONFIG_PREEMPT_RT_FULL
int radix_tree_preload(gfp_t gfp_mask);
+#else
+static inline int radix_tree_preload(gfp_t gm) { return 0; }
+#endif
+
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *root,
unsigned long index, unsigned int tag);
@@ -256,7 +262,7 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
static inline void radix_tree_preload_end(void)
{
- preempt_enable();
+ preempt_enable_nort();
}
#endif /* _LINUX_RADIX_TREE_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 2cf4226ade7e..0e6fb5c9dce4 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -92,6 +92,9 @@ extern void call_rcu(struct rcu_head *head,
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_PREEMPT_RT_FULL
+#define call_rcu_bh call_rcu
+#else
/**
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
@@ -112,6 +115,7 @@ extern void call_rcu(struct rcu_head *head,
*/
extern void call_rcu_bh(struct rcu_head *head,
void (*func)(struct rcu_head *head));
+#endif
/**
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
@@ -147,6 +151,11 @@ void synchronize_rcu(void);
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
+#ifndef CONFIG_PREEMPT_RT_FULL
+#define sched_rcu_preempt_depth() rcu_preempt_depth()
+#else
+static inline int sched_rcu_preempt_depth(void) { return 0; }
+#endif
#else /* #ifdef CONFIG_PREEMPT_RCU */
@@ -170,6 +179,8 @@ static inline int rcu_preempt_depth(void)
return 0;
}
+#define sched_rcu_preempt_depth() rcu_preempt_depth()
+
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
/* Internal to kernel */
@@ -274,7 +285,14 @@ static inline int rcu_read_lock_held(void)
* rcu_read_lock_bh_held() is defined out of line to avoid #include-file
* hell.
*/
+#ifdef CONFIG_PREEMPT_RT_FULL
+static inline int rcu_read_lock_bh_held(void)
+{
+ return rcu_read_lock_held();
+}
+#else
extern int rcu_read_lock_bh_held(void);
+#endif
/**
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
@@ -677,8 +695,12 @@ static inline void rcu_read_unlock(void)
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
+#ifdef CONFIG_PREEMPT_RT_FULL
+ rcu_read_lock();
+#else
__acquire(RCU_BH);
rcu_read_acquire_bh();
+#endif
}
/*
@@ -688,8 +710,12 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
+#ifdef CONFIG_PREEMPT_RT_FULL
+ rcu_read_unlock();
+#else
rcu_read_release_bh();
__release(RCU_BH);
+#endif
local_bh_enable();
}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 67458468f1a8..6e503a364be6 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -57,7 +57,11 @@ static inline void exit_rcu(void)
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
+#ifndef CONFIG_PREEMPT_RT_FULL
extern void synchronize_rcu_bh(void);
+#else
+# define synchronize_rcu_bh synchronize_rcu
+#endif
extern void synchronize_sched_expedited(void);
extern void synchronize_rcu_expedited(void);
@@ -67,19 +71,29 @@ static inline void synchronize_rcu_bh_expedited(void)
}
extern void rcu_barrier(void);
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define rcu_barrier_bh rcu_barrier
+#else
extern void rcu_barrier_bh(void);
+#endif
extern void rcu_barrier_sched(void);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
extern long rcu_batches_completed(void);
-extern long rcu_batches_completed_bh(void);
extern long rcu_batches_completed_sched(void);
extern void rcu_force_quiescent_state(void);
-extern void rcu_bh_force_quiescent_state(void);
extern void rcu_sched_force_quiescent_state(void);
+#ifndef CONFIG_PREEMPT_RT_FULL
+extern void rcu_bh_force_quiescent_state(void);
+extern long rcu_batches_completed_bh(void);
+#else
+# define rcu_bh_force_quiescent_state rcu_force_quiescent_state
+# define rcu_batches_completed_bh rcu_batches_completed
+#endif
+
/* A context switch is a grace period for RCU-sched and RCU-bh. */
static inline int rcu_blocking_is_gp(void)
{
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index de17134244f3..5ebd0bbb6eaa 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -14,7 +14,7 @@
#include <linux/linkage.h>
#include <linux/plist.h>
-#include <linux/spinlock_types.h>
+#include <linux/spinlock_types_raw.h>
extern int max_lock_depth; /* for sysctl */
@@ -29,9 +29,10 @@ struct rt_mutex {
raw_spinlock_t wait_lock;
struct plist_head wait_list;
struct task_struct *owner;
-#ifdef CONFIG_DEBUG_RT_MUTEXES
int save_state;
- const char *name, *file;
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+ const char *file;
+ const char *name;
int line;
void *magic;
#endif
@@ -56,19 +57,39 @@ struct hrtimer_sleeper;
#ifdef CONFIG_DEBUG_RT_MUTEXES
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
, .name = #mutexname, .file = __FILE__, .line = __LINE__
-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
+
+# define rt_mutex_init(mutex) \
+ do { \
+ raw_spin_lock_init(&(mutex)->wait_lock); \
+ __rt_mutex_init(mutex, #mutex); \
+ } while (0)
+
extern void rt_mutex_debug_task_free(struct task_struct *tsk);
#else
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
+
+# define rt_mutex_init(mutex) \
+ do { \
+ raw_spin_lock_init(&(mutex)->wait_lock); \
+ __rt_mutex_init(mutex, #mutex); \
+ } while (0)
+
# define rt_mutex_debug_task_free(t) do { } while (0)
#endif
-#define __RT_MUTEX_INITIALIZER(mutexname) \
- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
, .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \
, .owner = NULL \
- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
+ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
+
+
+#define __RT_MUTEX_INITIALIZER(mutexname) \
+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
+
+#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
+ , .save_state = 1 }
#define DEFINE_RT_MUTEX(mutexname) \
struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
@@ -90,6 +111,7 @@ extern void rt_mutex_destroy(struct rt_mutex *lock);
extern void rt_mutex_lock(struct rt_mutex *lock);
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
int detect_deadlock);
+extern int rt_mutex_lock_killable(struct rt_mutex *lock, int detect_deadlock);
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *timeout,
int detect_deadlock);
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
new file mode 100644
index 000000000000..853ee367fef4
--- /dev/null
+++ b/include/linux/rwlock_rt.h
@@ -0,0 +1,123 @@
+#ifndef __LINUX_RWLOCK_RT_H
+#define __LINUX_RWLOCK_RT_H
+
+#ifndef __LINUX_SPINLOCK_H
+#error Do not include directly. Use spinlock.h
+#endif
+
+#define rwlock_init(rwl) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ rt_mutex_init(&(rwl)->lock); \
+ __rt_rwlock_init(rwl, #rwl, &__key); \
+} while (0)
+
+extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
+extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
+extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
+extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
+extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
+extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
+extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
+extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
+extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
+extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
+
+#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
+#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
+
+#define write_trylock_irqsave(lock, flags) \
+ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ migrate_disable(); \
+ flags = rt_read_lock_irqsave(lock); \
+ } while (0)
+
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ migrate_disable(); \
+ flags = rt_write_lock_irqsave(lock); \
+ } while (0)
+
+#define read_lock(lock) \
+ do { \
+ migrate_disable(); \
+ rt_read_lock(lock); \
+ } while (0)
+
+#define read_lock_bh(lock) \
+ do { \
+ local_bh_disable(); \
+ migrate_disable(); \
+ rt_read_lock(lock); \
+ } while (0)
+
+#define read_lock_irq(lock) read_lock(lock)
+
+#define write_lock(lock) \
+ do { \
+ migrate_disable(); \
+ rt_write_lock(lock); \
+ } while (0)
+
+#define write_lock_bh(lock) \
+ do { \
+ local_bh_disable(); \
+ migrate_disable(); \
+ rt_write_lock(lock); \
+ } while (0)
+
+#define write_lock_irq(lock) write_lock(lock)
+
+#define read_unlock(lock) \
+ do { \
+ rt_read_unlock(lock); \
+ migrate_enable(); \
+ } while (0)
+
+#define read_unlock_bh(lock) \
+ do { \
+ rt_read_unlock(lock); \
+ migrate_enable(); \
+ local_bh_enable(); \
+ } while (0)
+
+#define read_unlock_irq(lock) read_unlock(lock)
+
+#define write_unlock(lock) \
+ do { \
+ rt_write_unlock(lock); \
+ migrate_enable(); \
+ } while (0)
+
+#define write_unlock_bh(lock) \
+ do { \
+ rt_write_unlock(lock); \
+ migrate_enable(); \
+ local_bh_enable(); \
+ } while (0)
+
+#define write_unlock_irq(lock) write_unlock(lock)
+
+#define read_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ (void) flags; \
+ rt_read_unlock(lock); \
+ migrate_enable(); \
+ } while (0)
+
+#define write_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ (void) flags; \
+ rt_write_unlock(lock); \
+ migrate_enable(); \
+ } while (0)
+
+#endif
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index cc0072e93e36..d0da966ad7a0 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -1,6 +1,10 @@
#ifndef __LINUX_RWLOCK_TYPES_H
#define __LINUX_RWLOCK_TYPES_H
+#if !defined(__LINUX_SPINLOCK_TYPES_H)
+# error "Do not include directly, include spinlock_types.h"
+#endif
+
/*
* include/linux/rwlock_types.h - generic rwlock type definitions
* and initializers
@@ -43,6 +47,7 @@ typedef struct {
RW_DEP_MAP_INIT(lockname) }
#endif
-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+#define DEFINE_RWLOCK(name) \
+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h
new file mode 100644
index 000000000000..b13832119591
--- /dev/null
+++ b/include/linux/rwlock_types_rt.h
@@ -0,0 +1,33 @@
+#ifndef __LINUX_RWLOCK_TYPES_RT_H
+#define __LINUX_RWLOCK_TYPES_RT_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+#error "Do not include directly. Include spinlock_types.h instead"
+#endif
+
+/*
+ * rwlocks - rtmutex which allows single reader recursion
+ */
+typedef struct {
+ struct rt_mutex lock;
+ int read_depth;
+ unsigned int break_lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} rwlock_t;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#define __RW_LOCK_UNLOCKED(name) \
+ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \
+ RW_DEP_MAP_INIT(name) }
+
+#define DEFINE_RWLOCK(name) \
+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
+
+#endif
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 63d406554391..209be4b401a6 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -17,6 +17,10 @@
#include <asm/system.h>
#include <linux/atomic.h>
+#ifdef CONFIG_PREEMPT_RT_FULL
+#include <linux/rwsem_rt.h>
+#else /* PREEMPT_RT_FULL */
+
struct rw_semaphore;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -131,4 +135,6 @@ extern void down_write_nested(struct rw_semaphore *sem, int subclass);
# define down_write_nested(sem, subclass) down_write(sem)
#endif
+#endif /* !PREEMPT_RT_FULL */
+
#endif /* _LINUX_RWSEM_H */
diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h
new file mode 100644
index 000000000000..802c6909900a
--- /dev/null
+++ b/include/linux/rwsem_rt.h
@@ -0,0 +1,105 @@
+#ifndef _LINUX_RWSEM_RT_H
+#define _LINUX_RWSEM_RT_H
+
+#ifndef _LINUX_RWSEM_H
+#error "Include rwsem.h"
+#endif
+
+/*
+ * RW-semaphores are a spinlock plus a reader-depth count.
+ *
+ * Note that the semantics are different from the usual
+ * Linux rw-sems, in PREEMPT_RT mode we do not allow
+ * multiple readers to hold the lock at once, we only allow
+ * a read-lock owner to read-lock recursively. This is
+ * better for latency, makes the implementation inherently
+ * fair and makes it simpler as well.
+ */
+
+#include <linux/rtmutex.h>
+
+struct rw_semaphore {
+ struct rt_mutex lock;
+ int read_depth;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#define __RWSEM_INITIALIZER(name) \
+ { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
+ RW_DEP_MAP_INIT(name) }
+
+#define DECLARE_RWSEM(lockname) \
+ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
+
+extern void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
+ struct lock_class_key *key);
+
+# define rt_init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ rt_mutex_init(&(sem)->lock); \
+ __rt_rwsem_init((sem), #sem, &__key); \
+} while (0)
+
+extern void rt_down_write(struct rw_semaphore *rwsem);
+extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
+extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
+extern void rt_down_read(struct rw_semaphore *rwsem);
+extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
+extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
+extern void rt_up_read(struct rw_semaphore *rwsem);
+extern void rt_up_write(struct rw_semaphore *rwsem);
+extern void rt_downgrade_write(struct rw_semaphore *rwsem);
+
+#define init_rwsem(sem) rt_init_rwsem(sem)
+#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
+
+static inline void down_read(struct rw_semaphore *sem)
+{
+ rt_down_read(sem);
+}
+
+static inline int down_read_trylock(struct rw_semaphore *sem)
+{
+ return rt_down_read_trylock(sem);
+}
+
+static inline void down_write(struct rw_semaphore *sem)
+{
+ rt_down_write(sem);
+}
+
+static inline int down_write_trylock(struct rw_semaphore *sem)
+{
+ return rt_down_write_trylock(sem);
+}
+
+static inline void up_read(struct rw_semaphore *sem)
+{
+ rt_up_read(sem);
+}
+
+static inline void up_write(struct rw_semaphore *sem)
+{
+ rt_up_write(sem);
+}
+
+static inline void downgrade_write(struct rw_semaphore *sem)
+{
+ rt_downgrade_write(sem);
+}
+
+static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
+{
+ return rt_down_read_nested(sem, subclass);
+}
+
+static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
+{
+ rt_down_write_nested(sem, subclass);
+}
+
+#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1c4f3e9b9bc5..a10f525ca473 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -63,6 +63,7 @@ struct sched_param {
#include <linux/nodemask.h>
#include <linux/mm_types.h>
+#include <asm/kmap_types.h>
#include <asm/system.h>
#include <asm/page.h>
#include <asm/ptrace.h>
@@ -91,6 +92,7 @@ struct sched_param {
#include <linux/latencytop.h>
#include <linux/cred.h>
#include <linux/llist.h>
+#include <linux/hardirq.h>
#include <asm/processor.h>
@@ -359,6 +361,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void schedule(void);
+extern void schedule_preempt_disabled(void);
extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
struct nsproxy;
@@ -1070,6 +1073,7 @@ struct sched_domain;
#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x04 /* internal use, task got migrated */
+#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
#define ENQUEUE_WAKEUP 1
#define ENQUEUE_HEAD 2
@@ -1219,6 +1223,7 @@ enum perf_event_task_context {
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
+ volatile long saved_state; /* saved state for "spinlock sleepers" */
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
@@ -1255,6 +1260,12 @@ struct task_struct {
#endif
unsigned int policy;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ int migrate_disable;
+#ifdef CONFIG_SCHED_DEBUG
+ int migrate_disable_atomic;
+#endif
+#endif
cpumask_t cpus_allowed;
#ifdef CONFIG_PREEMPT_RCU
@@ -1353,6 +1364,9 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
+#ifdef CONFIG_PREEMPT_RT_BASE
+ struct task_struct *posix_timer_list;
+#endif
/* process credentials */
const struct cred __rcu *real_cred; /* objective and real subjective task
@@ -1386,6 +1400,7 @@ struct task_struct {
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
+ struct sigqueue *sigqueue_cache;
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
@@ -1429,6 +1444,9 @@ struct task_struct {
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+ int pagefault_disabled;
+#endif
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
@@ -1560,6 +1578,12 @@ struct task_struct {
unsigned long trace;
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+ u64 preempt_timestamp_hist;
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ unsigned long timer_offset;
+#endif
+#endif
#endif /* CONFIG_TRACING */
#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
struct memcg_batch_info {
@@ -1572,10 +1596,26 @@ struct task_struct {
#ifdef CONFIG_HAVE_HW_BREAKPOINT
atomic_t ptrace_bp_refcnt;
#endif
+#ifdef CONFIG_PREEMPT_RT_BASE
+ struct rcu_head put_rcu;
+ int softirq_nestcnt;
+#endif
+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
+ int kmap_idx;
+ pte_t kmap_pte[KM_TYPE_NR];
+#endif
};
-/* Future-safe accessor for struct task_struct's cpus_allowed. */
-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+#ifdef CONFIG_PREEMPT_RT_FULL
+static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
+#else
+static inline bool cur_pf_disabled(void) { return false; }
+#endif
+
+static inline bool pagefault_disabled(void)
+{
+ return in_atomic() || cur_pf_disabled();
+}
/*
* Priority of a process goes from 0..MAX_PRIO-1, valid RT
@@ -1745,6 +1785,15 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+#ifdef CONFIG_PREEMPT_RT_BASE
+extern void __put_task_struct_cb(struct rcu_head *rhp);
+
+static inline void put_task_struct(struct task_struct *t)
+{
+ if (atomic_dec_and_test(&t->usage))
+ call_rcu(&t->put_rcu, __put_task_struct_cb);
+}
+#else
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
@@ -1752,6 +1801,7 @@ static inline void put_task_struct(struct task_struct *t)
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
+#endif
extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
@@ -1777,6 +1827,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
#define PF_KSWAPD 0x00040000 /* I am kswapd */
+#define PF_STOMPER 0x00080000 /* I am a stomp machine thread */
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
@@ -2050,12 +2101,20 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
extern void rt_mutex_adjust_pi(struct task_struct *p);
+static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+{
+ return tsk->pi_blocked_on != NULL;
+}
#else
static inline int rt_mutex_getprio(struct task_struct *p)
{
return p->normal_prio;
}
# define rt_mutex_adjust_pi(p) do { } while (0)
+static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+{
+ return false;
+}
#endif
extern bool yield_to(struct task_struct *p, bool preempt);
@@ -2135,6 +2194,7 @@ extern void xtime_update(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
+extern int wake_up_lock_sleeper(struct task_struct * tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
@@ -2225,12 +2285,24 @@ extern struct mm_struct * mm_alloc(void);
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
+
static inline void mmdrop(struct mm_struct * mm)
{
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
+#ifdef CONFIG_PREEMPT_RT_BASE
+extern void __mmdrop_delayed(struct rcu_head *rhp);
+static inline void mmdrop_delayed(struct mm_struct *mm)
+{
+ if (atomic_dec_and_test(&mm->mm_count))
+ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
+}
+#else
+# define mmdrop_delayed(mm) mmdrop(mm)
+#endif
+
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
@@ -2533,7 +2605,7 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
-#ifdef CONFIG_PREEMPT_COUNT
+#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL)
#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
#else
#define PREEMPT_LOCK_OFFSET 0
@@ -2544,12 +2616,16 @@ extern int __cond_resched_lock(spinlock_t *lock);
__cond_resched_lock(lock); \
})
+#ifndef CONFIG_PREEMPT_RT_FULL
extern int __cond_resched_softirq(void);
#define cond_resched_softirq() ({ \
__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
__cond_resched_softirq(); \
})
+#else
+# define cond_resched_softirq() cond_resched()
+#endif
/*
* Does a critical section need to be broken due to another
@@ -2612,6 +2688,26 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
+static inline int __migrate_disabled(struct task_struct *p)
+{
+#ifdef CONFIG_PREEMPT_RT_FULL
+ return p->migrate_disable;
+#else
+ return 0;
+#endif
+}
+
+/* Future-safe accessor for struct task_struct's cpus_allowed. */
+static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
+{
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (p->migrate_disable)
+ return cpumask_of(task_cpu(p));
+#endif
+
+ return &p->cpus_allowed;
+}
+
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index c6db9fb33c44..e26235357202 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -30,92 +30,12 @@
#include <linux/preempt.h>
#include <asm/processor.h>
-typedef struct {
- unsigned sequence;
- spinlock_t lock;
-} seqlock_t;
-
-/*
- * These macros triggered gcc-3.x compile-time problems. We think these are
- * OK now. Be cautious.
- */
-#define __SEQLOCK_UNLOCKED(lockname) \
- { 0, __SPIN_LOCK_UNLOCKED(lockname) }
-
-#define seqlock_init(x) \
- do { \
- (x)->sequence = 0; \
- spin_lock_init(&(x)->lock); \
- } while (0)
-
-#define DEFINE_SEQLOCK(x) \
- seqlock_t x = __SEQLOCK_UNLOCKED(x)
-
-/* Lock out other writers and update the count.
- * Acts like a normal spin_lock/unlock.
- * Don't need preempt_disable() because that is in the spin_lock already.
- */
-static inline void write_seqlock(seqlock_t *sl)
-{
- spin_lock(&sl->lock);
- ++sl->sequence;
- smp_wmb();
-}
-
-static inline void write_sequnlock(seqlock_t *sl)
-{
- smp_wmb();
- sl->sequence++;
- spin_unlock(&sl->lock);
-}
-
-static inline int write_tryseqlock(seqlock_t *sl)
-{
- int ret = spin_trylock(&sl->lock);
-
- if (ret) {
- ++sl->sequence;
- smp_wmb();
- }
- return ret;
-}
-
-/* Start of read calculation -- fetch last complete writer token */
-static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
-{
- unsigned ret;
-
-repeat:
- ret = ACCESS_ONCE(sl->sequence);
- if (unlikely(ret & 1)) {
- cpu_relax();
- goto repeat;
- }
- smp_rmb();
-
- return ret;
-}
-
-/*
- * Test if reader processed invalid data.
- *
- * If sequence value changed then writer changed data while in section.
- */
-static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
-{
- smp_rmb();
-
- return unlikely(sl->sequence != start);
-}
-
-
/*
* Version using sequence counter only.
* This can be used when code has its own mutex protecting the
* updating starting before the write_seqcountbeqin() and ending
* after the write_seqcount_end().
*/
-
typedef struct seqcount {
unsigned sequence;
} seqcount_t;
@@ -197,7 +117,6 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
{
smp_rmb();
-
return __read_seqcount_retry(s, start);
}
@@ -231,31 +150,154 @@ static inline void write_seqcount_barrier(seqcount_t *s)
s->sequence+=2;
}
+typedef struct {
+ struct seqcount seqcount;
+ raw_spinlock_t lock;
+} raw_seqlock_t;
+
+typedef struct {
+ struct seqcount seqcount;
+ spinlock_t lock;
+} seqlock_t;
+
+/*
+ * These macros triggered gcc-3.x compile-time problems. We think these are
+ * OK now. Be cautious.
+ */
+#define __RAW_SEQLOCK_UNLOCKED(lockname) \
+ { \
+ .seqcount = SEQCNT_ZERO, \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+ }
+
+#define raw_seqlock_init(x) \
+ do { \
+ seqcount_init(&(x)->seqcount); \
+ raw_spin_lock_init(&(x)->lock); \
+ } while (0)
+
+#define DEFINE_RAW_SEQLOCK(x) \
+ raw_seqlock_t x = __RAW_SEQLOCK_UNLOCKED(x)
+
+#define __SEQLOCK_UNLOCKED(lockname) \
+ { \
+ .seqcount = SEQCNT_ZERO, \
+ .lock = __SPIN_LOCK_UNLOCKED(lockname) \
+ }
+
+#define seqlock_init(x) \
+ do { \
+ seqcount_init(&(x)->seqcount); \
+ spin_lock_init(&(x)->lock); \
+ } while (0)
+
+#define DEFINE_SEQLOCK(x) \
+ seqlock_t x = __SEQLOCK_UNLOCKED(x)
+
+#define read_seqbegin(sl) read_seqcount_begin(&(sl)->seqcount)
+#define read_seqretry(sl, start) read_seqcount_retry(&(sl)->seqcount, start)
+
/*
- * Possible sw/hw IRQ protected versions of the interfaces.
+ * Lock out other writers and update the count.
+ * Acts like a normal spin_lock/unlock.
+ * Don't need preempt_disable() because that is in the spin_lock already.
*/
+static inline void raw_write_seqlock(raw_seqlock_t *sl)
+{
+ raw_spin_lock(&sl->lock);
+ write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void raw_write_sequnlock(raw_seqlock_t *sl)
+{
+ write_seqcount_end(&sl->seqcount);
+ raw_spin_unlock(&sl->lock);
+}
+
+static inline void raw_write_seqlock_irq(raw_seqlock_t *sl)
+{
+ raw_spin_lock_irq(&sl->lock);
+ write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void raw_write_sequnlock_irq(raw_seqlock_t *sl)
+{
+ write_seqcount_end(&sl->seqcount);
+ raw_spin_unlock_irq(&sl->lock);
+}
+
+static inline unsigned long __raw_write_seqlock_irqsave(raw_seqlock_t *sl)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&sl->lock, flags);
+ write_seqcount_begin(&sl->seqcount);
+ return flags;
+}
+
+#define raw_write_seqlock_irqsave(lock, flags) \
+ do { flags = __raw_write_seqlock_irqsave(lock); } while (0)
+
+static inline void
+raw_write_sequnlock_irqrestore(raw_seqlock_t *sl, unsigned long flags)
+{
+ write_seqcount_end(&sl->seqcount);
+ raw_spin_unlock_irqrestore(&sl->lock, flags);
+}
+
+static inline void write_seqlock(seqlock_t *sl)
+{
+ spin_lock(&sl->lock);
+ write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_sequnlock(seqlock_t *sl)
+{
+ write_seqcount_end(&sl->seqcount);
+ spin_unlock(&sl->lock);
+}
+
+static inline void write_seqlock_bh(seqlock_t *sl)
+{
+ spin_lock_bh(&sl->lock);
+ write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_sequnlock_bh(seqlock_t *sl)
+{
+ write_seqcount_end(&sl->seqcount);
+ spin_unlock_bh(&sl->lock);
+}
+
+static inline void write_seqlock_irq(seqlock_t *sl)
+{
+ spin_lock_irq(&sl->lock);
+ write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_sequnlock_irq(seqlock_t *sl)
+{
+ write_seqcount_end(&sl->seqcount);
+ spin_unlock_irq(&sl->lock);
+}
+
+static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sl->lock, flags);
+ write_seqcount_begin(&sl->seqcount);
+ return flags;
+}
+
#define write_seqlock_irqsave(lock, flags) \
- do { local_irq_save(flags); write_seqlock(lock); } while (0)
-#define write_seqlock_irq(lock) \
- do { local_irq_disable(); write_seqlock(lock); } while (0)
-#define write_seqlock_bh(lock) \
- do { local_bh_disable(); write_seqlock(lock); } while (0)
-
-#define write_sequnlock_irqrestore(lock, flags) \
- do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
-#define write_sequnlock_irq(lock) \
- do { write_sequnlock(lock); local_irq_enable(); } while(0)
-#define write_sequnlock_bh(lock) \
- do { write_sequnlock(lock); local_bh_enable(); } while(0)
-
-#define read_seqbegin_irqsave(lock, flags) \
- ({ local_irq_save(flags); read_seqbegin(lock); })
-
-#define read_seqretry_irqrestore(lock, iv, flags) \
- ({ \
- int ret = read_seqretry(lock, iv); \
- local_irq_restore(flags); \
- ret; \
- })
+ do { flags = __write_seqlock_irqsave(lock); } while (0)
+
+static inline void
+write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+{
+ write_seqcount_end(&sl->seqcount);
+ spin_unlock_irqrestore(&sl->lock, flags);
+}
#endif /* __LINUX_SEQLOCK_H */
diff --git a/include/linux/signal.h b/include/linux/signal.h
index a822300a253b..a4489006a5bb 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -229,6 +229,7 @@ static inline void init_sigpending(struct sigpending *sig)
}
extern void flush_sigqueue(struct sigpending *queue);
+extern void flush_task_sigqueue(struct task_struct *tsk);
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index fe864885c1ed..cc32cceb612a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -124,6 +124,7 @@ struct sk_buff_head {
__u32 qlen;
spinlock_t lock;
+ raw_spinlock_t raw_lock;
};
struct sk_buff;
@@ -925,6 +926,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
__skb_queue_head_init(list);
}
+static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
+{
+ raw_spin_lock_init(&list->raw_lock);
+ __skb_queue_head_init(list);
+}
+
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
struct lock_class_key *class)
{
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 8cc38d3bab0c..78fd0a26fa05 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -80,7 +80,6 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data,
int smp_call_function_any(const struct cpumask *mask,
smp_call_func_t func, void *info, int wait);
-
/*
* Generic and arch helpers
*/
@@ -173,6 +172,9 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
+#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
+#define put_cpu_light() migrate_enable()
+
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the
* boot command line:
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 7df6c17b0281..5fe7e40a269a 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -254,7 +254,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
/* Include rwlock functions */
-#include <linux/rwlock.h>
+#ifdef CONFIG_PREEMPT_RT_FULL
+# include <linux/rwlock_rt.h>
+#else
+# include <linux/rwlock.h>
+#endif
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
@@ -265,6 +269,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
# include <linux/spinlock_api_up.h>
#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+# include <linux/spinlock_rt.h>
+#else /* PREEMPT_RT_FULL */
+
/*
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
@@ -397,4 +405,6 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+#endif /* !PREEMPT_RT_FULL */
+
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index e253ccd7a604..2a5ba05ebace 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -191,6 +191,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
return 0;
}
-#include <linux/rwlock_api_smp.h>
+#ifndef CONFIG_PREEMPT_RT_FULL
+# include <linux/rwlock_api_smp.h>
+#endif
#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
new file mode 100644
index 000000000000..3b555b4b52cf
--- /dev/null
+++ b/include/linux/spinlock_rt.h
@@ -0,0 +1,166 @@
+#ifndef __LINUX_SPINLOCK_RT_H
+#define __LINUX_SPINLOCK_RT_H
+
+#ifndef __LINUX_SPINLOCK_H
+#error Do not include directly. Use spinlock.h
+#endif
+
+extern void
+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
+
+#define spin_lock_init(slock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ rt_mutex_init(&(slock)->lock); \
+ __rt_spin_lock_init(slock, #slock, &__key); \
+} while (0)
+
+extern void __lockfunc rt_spin_lock(spinlock_t *lock);
+extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
+extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
+extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
+extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
+extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
+extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
+extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
+extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
+
+/*
+ * lockdep-less calls, for derived types like rwlock:
+ * (for trylock they can use rt_mutex_trylock() directly.
+ */
+extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
+extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
+
+#define spin_lock_local(lock) rt_spin_lock(lock)
+#define spin_unlock_local(lock) rt_spin_unlock(lock)
+
+#define spin_lock(lock) \
+ do { \
+ migrate_disable(); \
+ rt_spin_lock(lock); \
+ } while (0)
+
+#define spin_lock_bh(lock) \
+ do { \
+ local_bh_disable(); \
+ migrate_disable(); \
+ rt_spin_lock(lock); \
+ } while (0)
+
+#define spin_lock_irq(lock) spin_lock(lock)
+
+#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
+
+#define spin_trylock(lock) \
+({ \
+ int __locked; \
+ migrate_disable(); \
+ __locked = spin_do_trylock(lock); \
+ if (!__locked) \
+ migrate_enable(); \
+ __locked; \
+})
+
+#ifdef CONFIG_LOCKDEP
+# define spin_lock_nested(lock, subclass) \
+ do { \
+ migrate_disable(); \
+ rt_spin_lock_nested(lock, subclass); \
+ } while (0)
+
+# define spin_lock_irqsave_nested(lock, flags, subclass) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ migrate_disable(); \
+ rt_spin_lock_nested(lock, subclass); \
+ } while (0)
+#else
+# define spin_lock_nested(lock, subclass) spin_lock(lock)
+
+# define spin_lock_irqsave_nested(lock, flags, subclass) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ spin_lock(lock); \
+ } while (0)
+#endif
+
+#define spin_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ spin_lock(lock); \
+ } while (0)
+
+static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
+{
+ unsigned long flags = 0;
+#ifdef CONFIG_TRACE_IRQFLAGS
+ flags = rt_spin_lock_trace_flags(lock);
+#else
+ spin_lock(lock); /* lock_local */
+#endif
+ return flags;
+}
+
+/* FIXME: we need rt_spin_lock_nest_lock */
+#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
+
+#define spin_unlock(lock) \
+ do { \
+ rt_spin_unlock(lock); \
+ migrate_enable(); \
+ } while (0)
+
+#define spin_unlock_bh(lock) \
+ do { \
+ rt_spin_unlock(lock); \
+ migrate_enable(); \
+ local_bh_enable(); \
+ } while (0)
+
+#define spin_unlock_irq(lock) spin_unlock(lock)
+
+#define spin_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ (void) flags; \
+ spin_unlock(lock); \
+ } while (0)
+
+#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock))
+#define spin_trylock_irq(lock) spin_trylock(lock)
+
+#define spin_trylock_irqsave(lock, flags) \
+ rt_spin_trylock_irqsave(lock, &(flags))
+
+#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
+
+#ifdef CONFIG_GENERIC_LOCKBREAK
+# define spin_is_contended(lock) ((lock)->break_lock)
+#else
+# define spin_is_contended(lock) (((void)(lock), 0))
+#endif
+
+static inline int spin_can_lock(spinlock_t *lock)
+{
+ return !rt_mutex_is_locked(&lock->lock);
+}
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+ return rt_mutex_is_locked(&lock->lock);
+}
+
+static inline void assert_spin_locked(spinlock_t *lock)
+{
+ BUG_ON(!spin_is_locked(lock));
+}
+
+#define atomic_dec_and_lock(atomic, lock) \
+ atomic_dec_and_spin_lock(atomic, lock)
+
+#endif
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 73548eb13a5d..10bac715ea96 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -9,80 +9,15 @@
* Released under the General Public License (GPL).
*/
-#if defined(CONFIG_SMP)
-# include <asm/spinlock_types.h>
-#else
-# include <linux/spinlock_types_up.h>
-#endif
-
-#include <linux/lockdep.h>
-
-typedef struct raw_spinlock {
- arch_spinlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
- unsigned int break_lock;
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned int magic, owner_cpu;
- void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} raw_spinlock_t;
-
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#define SPINLOCK_OWNER_INIT ((void *)-1L)
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
-#else
-# define SPIN_DEP_MAP_INIT(lockname)
-#endif
+#include <linux/spinlock_types_raw.h>
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define SPIN_DEBUG_INIT(lockname) \
- .magic = SPINLOCK_MAGIC, \
- .owner_cpu = -1, \
- .owner = SPINLOCK_OWNER_INIT,
+#ifndef CONFIG_PREEMPT_RT_FULL
+# include <linux/spinlock_types_nort.h>
+# include <linux/rwlock_types.h>
#else
-# define SPIN_DEBUG_INIT(lockname)
+# include <linux/rtmutex.h>
+# include <linux/spinlock_types_rt.h>
+# include <linux/rwlock_types_rt.h>
#endif
-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
- { \
- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
- SPIN_DEBUG_INIT(lockname) \
- SPIN_DEP_MAP_INIT(lockname) }
-
-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
-
-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
-
-typedef struct spinlock {
- union {
- struct raw_spinlock rlock;
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
- struct {
- u8 __padding[LOCK_PADSIZE];
- struct lockdep_map dep_map;
- };
-#endif
- };
-} spinlock_t;
-
-#define __SPIN_LOCK_INITIALIZER(lockname) \
- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
-
-#define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
-
-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
-
-#include <linux/rwlock_types.h>
-
#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h
new file mode 100644
index 000000000000..f1dac1fb1d6a
--- /dev/null
+++ b/include/linux/spinlock_types_nort.h
@@ -0,0 +1,33 @@
+#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
+#define __LINUX_SPINLOCK_TYPES_NORT_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+#error "Do not include directly. Include spinlock_types.h instead"
+#endif
+
+/*
+ * The non RT version maps spinlocks to raw_spinlocks
+ */
+typedef struct spinlock {
+ union {
+ struct raw_spinlock rlock;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+ struct {
+ u8 __padding[LOCK_PADSIZE];
+ struct lockdep_map dep_map;
+ };
+#endif
+ };
+} spinlock_t;
+
+#define __SPIN_LOCK_INITIALIZER(lockname) \
+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+
+#define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+
+#endif
diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
new file mode 100644
index 000000000000..edffc4d53fc9
--- /dev/null
+++ b/include/linux/spinlock_types_raw.h
@@ -0,0 +1,56 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+#define __LINUX_SPINLOCK_TYPES_RAW_H
+
+#if defined(CONFIG_SMP)
+# include <asm/spinlock_types.h>
+#else
+# include <linux/spinlock_types_up.h>
+#endif
+
+#include <linux/lockdep.h>
+
+typedef struct raw_spinlock {
+ arch_spinlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+ unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} raw_spinlock_t;
+
+#define SPINLOCK_MAGIC 0xdead4ead
+
+#define SPINLOCK_OWNER_INIT ((void *)-1L)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define SPIN_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_DEBUG_INIT(lockname) \
+ .magic = SPINLOCK_MAGIC, \
+ .owner_cpu = -1, \
+ .owner = SPINLOCK_OWNER_INIT,
+#else
+# define SPIN_DEBUG_INIT(lockname)
+#endif
+
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+ { \
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEBUG_INIT(lockname) \
+ SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+#endif
diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h
new file mode 100644
index 000000000000..1fe8fc069d9b
--- /dev/null
+++ b/include/linux/spinlock_types_rt.h
@@ -0,0 +1,49 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RT_H
+#define __LINUX_SPINLOCK_TYPES_RT_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+#error "Do not include directly. Include spinlock_types.h instead"
+#endif
+
+/*
+ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
+ */
+typedef struct spinlock {
+ struct rt_mutex lock;
+ unsigned int break_lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} spinlock_t;
+
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+# define __RT_SPIN_INITIALIZER(name) \
+ { \
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
+ .save_state = 1, \
+ .file = __FILE__, \
+ .line = __LINE__ , \
+ }
+#else
+# define __RT_SPIN_INITIALIZER(name) \
+ { \
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
+ .save_state = 1, \
+ }
+#endif
+
+/*
+.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
+*/
+
+#define __SPIN_LOCK_UNLOCKED(name) \
+ { .lock = __RT_SPIN_INITIALIZER(name.lock), \
+ SPIN_DEP_MAP_INIT(name) }
+
+#define __DEFINE_SPINLOCK(name) \
+ spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
+
+#define DEFINE_SPINLOCK(name) \
+ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
+
+#endif
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 703cfa33a3ca..b954c4136da3 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -932,6 +932,7 @@ enum
#include <linux/list.h>
#include <linux/rcupdate.h>
#include <linux/wait.h>
+#include <linux/atomic.h>
/* For the /proc/sys support */
struct ctl_table;
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 7faf933cced7..d224c0bae8df 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -38,6 +38,11 @@ struct sysrq_key_op {
int enable_mask;
};
+#ifdef CONFIG_MAGIC_SYSRQ_FORCE_PRINTK
+extern int sysrq_in_progress;
+#else
+#define sysrq_in_progress 0
+#endif
#ifdef CONFIG_MAGIC_SYSRQ
/* Generic SysRq interface -- you may call it from any device driver, supplying
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 6abd9138beda..b703477bd2d6 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -276,7 +276,7 @@ extern void add_timer(struct timer_list *timer);
extern int try_to_del_timer_sync(struct timer_list *timer);
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
extern int del_timer_sync(struct timer_list *timer);
#else
# define del_timer_sync(t) del_timer(t)
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 5ca0951e1855..44b37510c14f 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -6,38 +6,37 @@
/*
* These routines enable/disable the pagefault handler in that
- * it will not take any locks and go straight to the fixup table.
- *
- * They have great resemblance to the preempt_disable/enable calls
- * and in fact they are identical; this is because currently there is
- * no other way to make the pagefault handlers do this. So we do
- * disable preemption but we don't necessarily care about that.
+ * it will not take any MM locks and go straight to the fixup table.
*/
-static inline void pagefault_disable(void)
+static inline void raw_pagefault_disable(void)
{
inc_preempt_count();
- /*
- * make sure to have issued the store before a pagefault
- * can hit.
- */
barrier();
}
-static inline void pagefault_enable(void)
+static inline void raw_pagefault_enable(void)
{
- /*
- * make sure to issue those last loads/stores before enabling
- * the pagefault handler again.
- */
barrier();
dec_preempt_count();
- /*
- * make sure we do..
- */
barrier();
preempt_check_resched();
}
+#ifndef CONFIG_PREEMPT_RT_FULL
+static inline void pagefault_disable(void)
+{
+ raw_pagefault_disable();
+}
+
+static inline void pagefault_enable(void)
+{
+ raw_pagefault_enable();
+}
+#else
+extern void pagefault_disable(void);
+extern void pagefault_enable(void);
+#endif
+
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
@@ -77,9 +76,9 @@ static inline unsigned long __copy_from_user_nocache(void *to,
mm_segment_t old_fs = get_fs(); \
\
set_fs(KERNEL_DS); \
- pagefault_disable(); \
+ raw_pagefault_disable(); \
ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
- pagefault_enable(); \
+ raw_pagefault_enable(); \
set_fs(old_fs); \
ret; \
})
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 65efb92da996..1b3f2efd3ad0 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -29,7 +29,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
static inline void __count_vm_event(enum vm_event_item item)
{
+ preempt_disable_rt();
__this_cpu_inc(vm_event_states.event[item]);
+ preempt_enable_rt();
}
static inline void count_vm_event(enum vm_event_item item)
@@ -39,7 +41,9 @@ static inline void count_vm_event(enum vm_event_item item)
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
+ preempt_disable_rt();
__this_cpu_add(vm_event_states.event[item], delta);
+ preempt_enable_rt();
}
static inline void count_vm_events(enum vm_event_item item, long delta)
diff --git a/include/linux/wait-simple.h b/include/linux/wait-simple.h
new file mode 100644
index 000000000000..de69d8a2a94e
--- /dev/null
+++ b/include/linux/wait-simple.h
@@ -0,0 +1,152 @@
+#ifndef _LINUX_WAIT_SIMPLE_H
+#define _LINUX_WAIT_SIMPLE_H
+
+#include <linux/spinlock.h>
+#include <linux/list.h>
+
+#include <asm/current.h>
+
+struct swaiter {
+ struct task_struct *task;
+ struct list_head node;
+};
+
+#define DEFINE_SWAITER(name) \
+ struct swaiter name = { \
+ .task = current, \
+ .node = LIST_HEAD_INIT((name).node), \
+ }
+
+struct swait_head {
+ raw_spinlock_t lock;
+ struct list_head list;
+};
+
+#define DEFINE_SWAIT_HEAD(name) \
+ struct swait_head name = { \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .list = LIST_HEAD_INIT((name).list), \
+ }
+
+extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key);
+
+#define init_swait_head(swh) \
+ do { \
+ static struct lock_class_key __key; \
+ \
+ __init_swait_head((swh), &__key); \
+ } while (0)
+
+/*
+ * Waiter functions
+ */
+static inline bool swaiter_enqueued(struct swaiter *w)
+{
+ return w->task != NULL;
+}
+
+extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state);
+extern void swait_finish(struct swait_head *head, struct swaiter *w);
+
+/*
+ * Adds w to head->list. Must be called with head->lock locked.
+ */
+static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w)
+{
+ list_add(&w->node, &head->list);
+}
+
+/*
+ * Removes w from head->list. Must be called with head->lock locked.
+ */
+static inline void __swait_dequeue(struct swaiter *w)
+{
+ list_del_init(&w->node);
+}
+
+/*
+ * Wakeup functions
+ */
+extern void __swait_wake(struct swait_head *head, unsigned int state);
+
+static inline void swait_wake(struct swait_head *head)
+{
+ __swait_wake(head, TASK_NORMAL);
+}
+
+/*
+ * Event API
+ */
+
+#define __swait_event(wq, condition) \
+do { \
+ DEFINE_SWAITER(__wait); \
+ \
+ for (;;) { \
+ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ schedule(); \
+ } \
+ swait_finish(&wq, &__wait); \
+} while (0)
+
+/**
+ * swait_event - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ */
+#define swait_event(wq, condition) \
+do { \
+ if (condition) \
+ break; \
+ __swait_event(wq, condition); \
+} while (0)
+
+#define __swait_event_timeout(wq, condition, ret) \
+do { \
+ DEFINE_SWAITER(__wait); \
+ \
+ for (;;) { \
+ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ ret = schedule_timeout(ret); \
+ if (!ret) \
+ break; \
+ } \
+ swait_finish(&wq, &__wait); \
+} while (0)
+
+/**
+ * swait_event_timeout - sleep until a condition gets true or a timeout elapses
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function returns 0 if the @timeout elapsed, and the remaining
+ * jiffies if the condition evaluated to true before the timeout elapsed.
+ */
+#define swait_event_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!(condition)) \
+ __swait_event_timeout(wq, condition, __ret); \
+ __ret; \
+})
+
+#endif
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 3efc9f3f43a0..1e904b88c1ea 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -157,7 +157,7 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
void *key);
-void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
+void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
void __wake_up_bit(wait_queue_head_t *, void *, int);
int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
@@ -170,7 +170,8 @@ wait_queue_head_t *bit_waitqueue(void *, int);
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
-#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL)
+#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
+#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 0d556deb497b..d68ead81b52b 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -254,9 +254,10 @@ enum {
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
+ WQ_NON_AFFINE = 1 << 6, /* free to move works around cpus */
- WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */
- WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
+ WQ_DRAINING = 1 << 7, /* internal: workqueue is draining */
+ WQ_RESCUER = 1 << 8, /* internal: workqueue has rescuer */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index d786b4fc02a4..8cef1d197dd2 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -47,6 +47,7 @@ struct netns_ipv4 {
int sysctl_icmp_echo_ignore_all;
int sysctl_icmp_echo_ignore_broadcasts;
+ int sysctl_icmp_echo_sysrq;
int sysctl_icmp_ignore_bogus_error_responses;
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;
diff --git a/include/trace/events/hist.h b/include/trace/events/hist.h
new file mode 100644
index 000000000000..28646db2c775
--- /dev/null
+++ b/include/trace/events/hist.h
@@ -0,0 +1,69 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hist
+
+#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HIST_H
+
+#include "latency_hist.h"
+#include <linux/tracepoint.h>
+
+#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST)
+#define trace_preemptirqsoff_hist(a,b)
+#else
+TRACE_EVENT(preemptirqsoff_hist,
+
+ TP_PROTO(int reason, int starthist),
+
+ TP_ARGS(reason, starthist),
+
+ TP_STRUCT__entry(
+ __field(int, reason )
+ __field(int, starthist )
+ ),
+
+ TP_fast_assign(
+ __entry->reason = reason;
+ __entry->starthist = starthist;
+ ),
+
+ TP_printk("reason=%s starthist=%s", getaction(__entry->reason),
+ __entry->starthist ? "start" : "stop")
+);
+#endif
+
+#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST
+#define trace_hrtimer_interrupt(a,b,c,d)
+#else
+TRACE_EVENT(hrtimer_interrupt,
+
+ TP_PROTO(int cpu, long long offset, struct task_struct *curr, struct task_struct *task),
+
+ TP_ARGS(cpu, offset, curr, task),
+
+ TP_STRUCT__entry(
+ __field(int, cpu )
+ __field(long long, offset )
+ __array(char, ccomm, TASK_COMM_LEN)
+ __field(int, cprio )
+ __array(char, tcomm, TASK_COMM_LEN)
+ __field(int, tprio )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->offset = offset;
+ memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN);
+ __entry->cprio = curr->prio;
+ memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>", task != NULL ? TASK_COMM_LEN : 7);
+ __entry->tprio = task != NULL ? task->prio : -1;
+ ),
+
+ TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]",
+ __entry->cpu, __entry->offset, __entry->ccomm, __entry->cprio, __entry->tcomm, __entry->tprio)
+);
+#endif
+
+#endif /* _TRACE_HIST_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/latency_hist.h b/include/trace/events/latency_hist.h
new file mode 100644
index 000000000000..d6b5d77c57aa
--- /dev/null
+++ b/include/trace/events/latency_hist.h
@@ -0,0 +1,30 @@
+#ifndef _LATENCY_HIST_H
+#define _LATENCY_HIST_H
+
+enum hist_action {
+ IRQS_ON,
+ PREEMPT_ON,
+ TRACE_STOP,
+ IRQS_OFF,
+ PREEMPT_OFF,
+ TRACE_START,
+};
+
+static char *actions[] = {
+ "IRQS_ON",
+ "PREEMPT_ON",
+ "TRACE_STOP",
+ "IRQS_OFF",
+ "PREEMPT_OFF",
+ "TRACE_START",
+};
+
+static inline char *getaction(int action)
+{
+ if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0]))
+ return(actions[action]);
+ return("unknown");
+}
+
+#endif /* _LATENCY_HIST_H */
+