summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c20
-rw-r--r--kernel/printk/printk.c11
-rw-r--r--kernel/sched/cpufreq_schedutil.c8
-rw-r--r--kernel/time/clockevents.c21
-rw-r--r--kernel/time/tick-sched.c21
-rw-r--r--kernel/trace/blktrace.c14
6 files changed, 52 insertions, 43 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index b5a0165b7300..bf875e94ac9e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -2206,3 +2206,23 @@ void __init boot_cpu_hotplug_init(void)
#endif
this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
}
+
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+ atomic_notifier_chain_register(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+ atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+
+void idle_notifier_call_chain(unsigned long val)
+{
+ atomic_notifier_call_chain(&idle_notifier, val, NULL);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index ab6855a4218b..9ae22e845029 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2171,9 +2171,9 @@ void resume_console(void)
* @hcpu: unused
*
* If printk() is called from a CPU that is not online yet, the messages
- * will be spooled but will not show up on the console. This function is
- * called when a new CPU comes online (or fails to come up), and ensures
- * that any such output gets printed.
+ * will be printed on the console only if there are CON_ANYTIME consoles.
+ * This function is called when a new CPU comes online (or fails to come
+ * up) or goes offline.
*/
static int console_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
@@ -2183,8 +2183,9 @@ static int console_cpu_notify(struct notifier_block *self,
case CPU_DEAD:
case CPU_DOWN_FAILED:
case CPU_UP_CANCELED:
- console_lock();
- console_unlock();
+ /* If trylock fails, someone else is doing the printing */
+ if (console_trylock())
+ console_unlock();
}
return NOTIFY_OK;
}
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index cb771c76682e..4704f654e7c3 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -321,15 +321,15 @@ static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr
return container_of(attr_set, struct sugov_tunables, attr_set);
}
-static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
+static ssize_t show_rate_limit_us(struct gov_attr_set *attr_set, char *buf)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
return sprintf(buf, "%u\n", tunables->rate_limit_us);
}
-static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf,
- size_t count)
+static ssize_t store_rate_limit_us(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
struct sugov_policy *sg_policy;
@@ -346,7 +346,7 @@ static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *bu
return count;
}
-static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
+gov_attr_rw(rate_limit_us);
static struct attribute *sugov_attributes[] = {
&rate_limit_us.attr,
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 2c5bc77c0bb0..56db6bcb5568 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -280,17 +280,22 @@ static int clockevents_program_min_delta(struct clock_event_device *dev)
static int clockevents_program_min_delta(struct clock_event_device *dev)
{
unsigned long long clc;
- int64_t delta;
+ int64_t delta = 0;
+ int i;
- delta = dev->min_delta_ns;
- dev->next_event = ktime_add_ns(ktime_get(), delta);
+ for (i = 0; i < 10; i++) {
+ delta += dev->min_delta_ns;
+ dev->next_event = ktime_add_ns(ktime_get(), delta);
- if (clockevent_state_shutdown(dev))
- return 0;
+ if (clockevent_state_shutdown(dev))
+ return 0;
- dev->retries++;
- clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
- return dev->set_next_event((unsigned long) clc, dev);
+ dev->retries++;
+ clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
+ if (dev->set_next_event((unsigned long) clc, dev) == 0)
+ return 0;
+ }
+ return -ETIME;
}
#endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index b6bebe28a3e0..2fb84028cfb0 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -717,8 +717,6 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
*/
delta = next_tick - basemono;
if (delta <= (u64)TICK_NSEC) {
- tick.tv64 = 0;
-
/*
* Tell the timer code that the base is not idle, i.e. undo
* the effect of get_next_timer_interrupt():
@@ -728,23 +726,8 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
* We've not stopped the tick yet, and there's a timer in the
* next period, so no point in stopping it either, bail.
*/
- if (!ts->tick_stopped)
- goto out;
-
- /*
- * If, OTOH, we did stop it, but there's a pending (expired)
- * timer reprogram the timer hardware to fire now.
- *
- * We will not restart the tick proper, just prod the timer
- * hardware into firing an interrupt to process the pending
- * timers. Just like tick_irq_exit() will not restart the tick
- * for 'normal' interrupts.
- *
- * Only once we exit the idle loop will we re-enable the tick,
- * see tick_nohz_idle_exit().
- */
- if (delta == 0) {
- tick_nohz_restart(ts, now);
+ if (!ts->tick_stopped) {
+ tick.tv64 = 0;
goto out;
}
}
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index bfa8bb3a6e19..55257a3763f9 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1793,14 +1793,14 @@ void blk_dump_cmd(char *buf, struct request *rq)
}
}
-void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes)
+void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
{
int i = 0;
- if (rw & REQ_PREFLUSH)
+ if (op & REQ_PREFLUSH)
rwbs[i++] = 'F';
- switch (op) {
+ switch (op & REQ_OP_MASK) {
case REQ_OP_WRITE:
case REQ_OP_WRITE_SAME:
rwbs[i++] = 'W';
@@ -1822,13 +1822,13 @@ void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes)
rwbs[i++] = 'N';
}
- if (rw & REQ_FUA)
+ if (op & REQ_FUA)
rwbs[i++] = 'F';
- if (rw & REQ_RAHEAD)
+ if (op & REQ_RAHEAD)
rwbs[i++] = 'A';
- if (rw & REQ_SYNC)
+ if (op & REQ_SYNC)
rwbs[i++] = 'S';
- if (rw & REQ_META)
+ if (op & REQ_META)
rwbs[i++] = 'M';
rwbs[i] = '\0';