summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorPhilippe Schenker <philippe.schenker@toradex.com>2019-05-08 16:08:30 +0200
committerPhilippe Schenker <philippe.schenker@toradex.com>2019-05-08 16:08:30 +0200
commit774f42075a4800fe4106dffca804e3207bc3c2e7 (patch)
tree4bb3e8ca84f7f23ac41966017e81ffe09e98412e /kernel/sched
parent95a0514c2e68a035e2adbdb5a297da310dccb09c (diff)
parent33e2f28596a7059dbe31d837caae924a74aa36d8 (diff)
Merge branch 'linux-4.14.y_for_4.14-2.0.x-imx' into 4.14-2.0.x-imx
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/cpufreq_schedutil.c3
-rw-r--r--kernel/sched/deadline.c3
-rw-r--r--kernel/sched/debug.c4
-rw-r--r--kernel/sched/fair.c36
-rw-r--r--kernel/sched/topology.c2
5 files changed, 40 insertions, 8 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 6d689c039189..d0d39f714f47 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -605,10 +605,9 @@ fail:
stop_kthread:
sugov_kthread_stop(sg_policy);
-
-free_sg_policy:
mutex_unlock(&global_tunables_lock);
+free_sg_policy:
sugov_policy_free(sg_policy);
disable_fast_switch:
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index b2589c7e9439..22770168bff8 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -217,7 +217,6 @@ static void task_non_contending(struct task_struct *p)
if (dl_se->dl_runtime == 0)
return;
- WARN_ON(hrtimer_active(&dl_se->inactive_timer));
WARN_ON(dl_se->dl_non_contending);
zerolag_time = dl_se->deadline -
@@ -234,7 +233,7 @@ static void task_non_contending(struct task_struct *p)
* If the "0-lag time" already passed, decrease the active
* utilization now, instead of starting a timer
*/
- if (zerolag_time < 0) {
+ if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
if (dl_task(p))
sub_running_bw(dl_se->dl_bw, dl_rq);
if (!dl_task(p) || p->state == TASK_DEAD) {
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 2f93e4a2d9f6..187c04a34ba1 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -339,6 +339,7 @@ void register_sched_domain_sysctl(void)
{
static struct ctl_table *cpu_entries;
static struct ctl_table **cpu_idx;
+ static bool init_done = false;
char buf[32];
int i;
@@ -368,7 +369,10 @@ void register_sched_domain_sysctl(void)
if (!cpumask_available(sd_sysctl_cpus)) {
if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
return;
+ }
+ if (!init_done) {
+ init_done = true;
/* init to possible to not have holes in @cpu_entries */
cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f33b24080b1c..af7de1f9906c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2026,6 +2026,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
if (p->last_task_numa_placement) {
delta = runtime - p->last_sum_exec_runtime;
*period = now - p->last_task_numa_placement;
+
+ /* Avoid time going backwards, prevent potential divide error: */
+ if (unlikely((s64)*period < 0))
+ *period = 0;
} else {
delta = p->se.avg.load_sum / p->se.load.weight;
*period = LOAD_AVG_MAX;
@@ -4672,12 +4676,15 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
+extern const u64 max_cfs_quota_period;
+
static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
{
struct cfs_bandwidth *cfs_b =
container_of(timer, struct cfs_bandwidth, period_timer);
int overrun;
int idle = 0;
+ int count = 0;
raw_spin_lock(&cfs_b->lock);
for (;;) {
@@ -4685,6 +4692,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
if (!overrun)
break;
+ if (++count > 3) {
+ u64 new, old = ktime_to_ns(cfs_b->period);
+
+ new = (old * 147) / 128; /* ~115% */
+ new = min(new, max_cfs_quota_period);
+
+ cfs_b->period = ns_to_ktime(new);
+
+ /* since max is 1s, this is limited to 1e9^2, which fits in u64 */
+ cfs_b->quota *= new;
+ cfs_b->quota = div64_u64(cfs_b->quota, old);
+
+ pr_warn_ratelimited(
+ "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
+ smp_processor_id(),
+ div_u64(new, NSEC_PER_USEC),
+ div_u64(cfs_b->quota, NSEC_PER_USEC));
+
+ /* reset count so we don't come right back in here */
+ count = 0;
+ }
+
idle = do_sched_cfs_period_timer(cfs_b, overrun);
}
if (idle)
@@ -5651,6 +5680,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
#ifdef CONFIG_SCHED_SMT
DEFINE_STATIC_KEY_FALSE(sched_smt_present);
+EXPORT_SYMBOL_GPL(sched_smt_present);
static inline void set_idle_cores(int cpu, int val)
{
@@ -7017,10 +7047,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
if (cfs_rq->last_h_load_update == now)
return;
- cfs_rq->h_load_next = NULL;
+ WRITE_ONCE(cfs_rq->h_load_next, NULL);
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
- cfs_rq->h_load_next = se;
+ WRITE_ONCE(cfs_rq->h_load_next, se);
if (cfs_rq->last_h_load_update == now)
break;
}
@@ -7030,7 +7060,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
cfs_rq->last_h_load_update = now;
}
- while ((se = cfs_rq->h_load_next) != NULL) {
+ while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
load = cfs_rq->h_load;
load = div64_ul(load * se->avg.load_avg,
cfs_rq_load_avg(cfs_rq) + 1);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 659e075ef70b..9dcd80ed9d4c 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -499,7 +499,7 @@ static int __init isolated_cpu_setup(char *str)
__setup("isolcpus=", isolated_cpu_setup);
struct s_data {
- struct sched_domain ** __percpu sd;
+ struct sched_domain * __percpu *sd;
struct root_domain *rd;
};