summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorGary Bisson <gary.bisson@boundarydevices.com>2018-10-02 15:10:19 +0200
committerGary Bisson <gary.bisson@boundarydevices.com>2018-10-02 15:10:19 +0200
commitc98b0c5db65001d10d7f0af026801b9c193c369e (patch)
treef10a85be5659f9b2f8f3ca4aff41d8fd3aa01b37 /kernel
parentcc9333d7aace5c1de4e81932870b53bb6614dedd (diff)
parent46f9f7c3c326389d5765c28f120fead6cc068e67 (diff)
Merge tag 'v4.9.130' into 4.9-2.3.x-imx
This is the 4.9.130 stable release
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit_watch.c12
-rw-r--r--kernel/events/core.c4
-rw-r--r--kernel/sched/fair.c3
-rw-r--r--kernel/trace/ring_buffer.c2
4 files changed, 19 insertions, 2 deletions
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 690e1e3c59f7..f036b6ada6ef 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -419,6 +419,13 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list)
struct path parent_path;
int h, ret = 0;
+ /*
+ * When we will be calling audit_add_to_parent, krule->watch might have
+ * been updated and watch might have been freed.
+ * So we need to keep a reference of watch.
+ */
+ audit_get_watch(watch);
+
mutex_unlock(&audit_filter_mutex);
/* Avoid calling path_lookup under audit_filter_mutex. */
@@ -427,8 +434,10 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list)
/* caller expects mutex locked */
mutex_lock(&audit_filter_mutex);
- if (ret)
+ if (ret) {
+ audit_put_watch(watch);
return ret;
+ }
/* either find an old parent or attach a new one */
parent = audit_find_parent(d_backing_inode(parent_path.dentry));
@@ -446,6 +455,7 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list)
*list = &audit_inode_hash[h];
error:
path_put(&parent_path);
+ audit_put_watch(watch);
return ret;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6e6ec229c780..95bd00d9f2c3 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5563,6 +5563,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
unsigned long sp;
unsigned int rem;
u64 dyn_size;
+ mm_segment_t fs;
/*
* We dump:
@@ -5580,7 +5581,10 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
/* Data. */
sp = perf_user_stack_pointer(regs);
+ fs = get_fs();
+ set_fs(USER_DS);
rem = __output_copy_user(handle, (void *) sp, dump_size);
+ set_fs(fs);
dyn_size = dump_size - rem;
perf_output_skip(handle, rem);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f6e8727f7fa3..f81adb476c03 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8639,7 +8639,8 @@ static inline bool vruntime_normalized(struct task_struct *p)
* - A task which has been woken up by try_to_wake_up() and
* waiting for actually being woken up by sched_ttwu_pending().
*/
- if (!se->sum_exec_runtime || p->state == TASK_WAKING)
+ if (!se->sum_exec_runtime ||
+ (p->state == TASK_WAKING && p->sched_remote_wakeup))
return true;
return false;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index dc29b600d2cb..f316e90ad538 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1504,6 +1504,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
tmp_iter_page = first_page;
do {
+ cond_resched();
+
to_remove_page = tmp_iter_page;
rb_inc_page(cpu_buffer, &tmp_iter_page);